From 39465d0cfc4dd44ee87f666c39195fa342230c42 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Sat, 3 Nov 2018 10:39:58 +0800 Subject: [PATCH 01/20] Add Porsch project with Nephos --- .../x86_64-pegatron_porsche-r0/installer.conf | 3 + .../x86_64-pegatron_porsche-r0/minigraph.xml | 1074 +++++++++++++ .../plugins/eeprom.py | 21 + .../plugins/psuutil.py | 92 ++ .../plugins/sfputil.py | 238 +++ .../porsche/port_config.ini | 55 + .../porsche/sai.profile | 2 + .../porsche/tau-porsche.dsh | 497 ++++++ .../tau-porsche.cfg | 23 + platform/nephos/one-image.mk | 3 +- platform/nephos/platform-modules-pegatron.mk | 13 + platform/nephos/rules.mk | 1 + .../sonic-platform-modules-pegatron/LICENSE | 16 + .../sonic-platform-modules-pegatron/README.md | 1 + .../common/modules/pegatron_hwmon_mcu.c | 1374 +++++++++++++++++ .../debian/changelog | 5 + .../debian/compat | 1 + .../debian/control | 12 + .../debian/rules | 88 ++ .../porsche/modules/Makefile | 1 + .../porsche/modules/pegatron_hwmon_mcu.c | 1 + .../porsche/modules/pegatron_porsche_cpld.c | 1132 ++++++++++++++ .../porsche/modules/pegatron_porsche_sfp.c | 431 ++++++ .../porsche/scripts/sensors | 7 + .../service/porsche-platform-init.service | 13 + .../porsche/utils/pegatron_porsche_util.py | 209 +++ .../porsche/utils/porsche_sensors.py | 141 ++ 27 files changed, 5453 insertions(+), 1 deletion(-) create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/installer.conf create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/minigraph.xml create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/plugins/eeprom.py create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/plugins/psuutil.py create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/porsche/sai.profile create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/porsche/tau-porsche.dsh create mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/tau-porsche.cfg create mode 100755 platform/nephos/platform-modules-pegatron.mk create mode 100644 platform/nephos/sonic-platform-modules-pegatron/LICENSE create mode 100644 platform/nephos/sonic-platform-modules-pegatron/README.md create mode 100644 platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/debian/changelog create mode 100644 platform/nephos/sonic-platform-modules-pegatron/debian/compat create mode 100755 platform/nephos/sonic-platform-modules-pegatron/debian/control create mode 100755 platform/nephos/sonic-platform-modules-pegatron/debian/rules create mode 100644 platform/nephos/sonic-platform-modules-pegatron/porsche/modules/Makefile create mode 120000 platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_hwmon_mcu.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_cpld.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c create mode 100755 platform/nephos/sonic-platform-modules-pegatron/porsche/scripts/sensors create mode 100644 platform/nephos/sonic-platform-modules-pegatron/porsche/service/porsche-platform-init.service create mode 100755 platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py create mode 100755 platform/nephos/sonic-platform-modules-pegatron/porsche/utils/porsche_sensors.py diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/installer.conf b/device/pegatron/x86_64-pegatron_porsche-r0/installer.conf new file mode 100755 index 000000000000..14404194ef53 --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/installer.conf @@ -0,0 +1,3 @@ +CONSOLE_PORT=0x2f8 +CONSOLE_DEV=1 +CONSOLE_SPEED=115200 diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/minigraph.xml b/device/pegatron/x86_64-pegatron_porsche-r0/minigraph.xml new file mode 100755 index 000000000000..2a4b6414d05a --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/minigraph.xml @@ -0,0 +1,1074 @@ + + + + + + ARISTA01T0 + 10.0.0.33 + switch1 + 10.0.0.32 + 1 + 180 + 60 + + + switch1 + 10.0.0.0 + ARISTA01T2 + 10.0.0.1 + 1 + 180 + 60 + + + ARISTA02T0 + 10.0.0.35 + switch1 + 10.0.0.34 + 1 + 180 + 60 + + + switch1 + 10.0.0.2 + ARISTA02T2 + 10.0.0.3 + 1 + 180 + 60 + + + ARISTA03T0 + 10.0.0.37 + switch1 + 10.0.0.36 + 1 + 180 + 60 + + + switch1 + 10.0.0.4 + ARISTA03T2 + 10.0.0.5 + 1 + 180 + 60 + + + ARISTA04T0 + 10.0.0.39 + switch1 + 10.0.0.38 + 1 + 180 + 60 + + + switch1 + 10.0.0.6 + ARISTA04T2 + 10.0.0.7 + 1 + 180 + 60 + + + ARISTA05T0 + 10.0.0.41 + switch1 + 10.0.0.40 + 1 + 180 + 60 + + + switch1 + 10.0.0.8 + ARISTA05T2 + 10.0.0.9 + 1 + 180 + 60 + + + ARISTA06T0 + 10.0.0.43 + switch1 + 10.0.0.42 + 1 + 180 + 60 + + + switch1 + 10.0.0.10 + ARISTA06T2 + 10.0.0.11 + 1 + 180 + 60 + + + ARISTA07T0 + 10.0.0.45 + switch1 + 10.0.0.44 + 1 + 180 + 60 + + + switch1 + 10.0.0.12 + ARISTA07T2 + 10.0.0.13 + 1 + 180 + 60 + + + ARISTA08T0 + 10.0.0.47 + switch1 + 10.0.0.46 + 1 + 180 + 60 + + + switch1 + 10.0.0.14 + ARISTA08T2 + 10.0.0.15 + 1 + 180 + 60 + + + ARISTA09T0 + 10.0.0.49 + switch1 + 10.0.0.48 + 1 + 180 + 60 + + + switch1 + 10.0.0.16 + ARISTA09T2 + 10.0.0.17 + 1 + 180 + 60 + + + ARISTA10T0 + 10.0.0.51 + switch1 + 10.0.0.50 + 1 + 180 + 60 + + + switch1 + 10.0.0.18 + ARISTA10T2 + 10.0.0.19 + 1 + 180 + 60 + + + ARISTA11T0 + 10.0.0.53 + switch1 + 10.0.0.52 + 1 + 180 + 60 + + + switch1 + 10.0.0.20 + ARISTA11T2 + 10.0.0.21 + 1 + 180 + 60 + + + ARISTA12T0 + 10.0.0.55 + switch1 + 10.0.0.54 + 1 + 180 + 60 + + + switch1 + 10.0.0.22 + ARISTA12T2 + 10.0.0.23 + 1 + 180 + 60 + + + ARISTA13T0 + 10.0.0.57 + switch1 + 10.0.0.56 + 1 + 180 + 60 + + + switch1 + 10.0.0.24 + ARISTA13T2 + 10.0.0.25 + 1 + 180 + 60 + + + ARISTA14T0 + 10.0.0.59 + switch1 + 10.0.0.58 + 1 + 180 + 60 + + + switch1 + 10.0.0.26 + ARISTA14T2 + 10.0.0.27 + 1 + 180 + 60 + + + ARISTA15T0 + 10.0.0.61 + switch1 + 10.0.0.60 + 1 + 180 + 60 + + + switch1 + 10.0.0.28 + ARISTA15T2 + 10.0.0.29 + 1 + 180 + 60 + + + ARISTA16T0 + 10.0.0.63 + switch1 + 10.0.0.62 + 1 + 180 + 60 + + + switch1 + 10.0.0.30 + ARISTA16T2 + 10.0.0.31 + 1 + 180 + 60 + + + + + 65100 + switch + + +
10.0.0.33
+ + +
+ +
10.0.0.1
+ + +
+ +
10.0.0.35
+ + +
+ +
10.0.0.3
+ + +
+ +
10.0.0.37
+ + +
+ +
10.0.0.5
+ + +
+ +
10.0.0.39
+ + +
+ +
10.0.0.7
+ + +
+ +
10.0.0.41
+ + +
+ +
10.0.0.9
+ + +
+ +
10.0.0.43
+ + +
+ +
10.0.0.11
+ + +
+ +
10.0.0.45
+ + +
+ +
10.0.0.13
+ + +
+ +
10.0.0.47
+ + +
+ +
10.0.0.15
+ + +
+ +
10.0.0.49
+ + +
+ +
10.0.0.17
+ + +
+ +
10.0.0.51
+ + +
+ +
10.0.0.19
+ + +
+ +
10.0.0.53
+ + +
+ +
10.0.0.21
+ + +
+ +
10.0.0.55
+ + +
+ +
10.0.0.23
+ + +
+ +
10.0.0.57
+ + +
+ +
10.0.0.25
+ + +
+ +
10.0.0.59
+ + +
+ +
10.0.0.27
+ + +
+ +
10.0.0.61
+ + +
+ +
10.0.0.29
+ + +
+ +
10.0.0.63
+ + +
+ +
10.0.0.31
+ + +
+
+ +
+ + 64001 + ARISTA01T0 + + + + 65200 + ARISTA01T2 + + + + 64002 + ARISTA02T0 + + + + 65200 + ARISTA02T2 + + + + 64003 + ARISTA03T0 + + + + 65200 + ARISTA03T2 + + + + 64004 + ARISTA04T0 + + + + 65200 + ARISTA04T2 + + + + 64005 + ARISTA05T0 + + + + 65200 + ARISTA05T2 + + + + 64006 + ARISTA06T0 + + + + 65200 + ARISTA06T2 + + + + 64007 + ARISTA07T0 + + + + 65200 + ARISTA07T2 + + + + 64008 + ARISTA08T0 + + + + 65200 + ARISTA08T2 + + + + 64009 + ARISTA09T0 + + + + 65200 + ARISTA09T2 + + + + 64010 + ARISTA10T0 + + + + 65200 + ARISTA10T2 + + + + 64011 + ARISTA11T0 + + + + 65200 + ARISTA11T2 + + + + 64012 + ARISTA12T0 + + + + 65200 + ARISTA12T2 + + + + 64013 + ARISTA13T0 + + + + 65200 + ARISTA13T2 + + + + 64014 + ARISTA14T0 + + + + 65200 + ARISTA14T2 + + + + 64015 + ARISTA15T0 + + + + 65200 + ARISTA15T2 + + + + 64016 + ARISTA16T0 + + + + 65200 + ARISTA16T2 + + +
+
+ + + + + + HostIP + Loopback0 + + 10.1.0.32/32 + + 10.1.0.32/32 + + + + + + + + switch + + + + + + Ethernet0 + 10.0.0.0/31 + + + + Ethernet4 + 10.0.0.2/31 + + + + Ethernet8 + 10.0.0.4/31 + + + + Ethernet12 + 10.0.0.6/31 + + + + Ethernet16 + 10.0.0.8/31 + + + + Ethernet20 + 10.0.0.10/31 + + + + Ethernet24 + 10.0.0.12/31 + + + + Ethernet28 + 10.0.0.14/31 + + + + Ethernet32 + 10.0.0.16/31 + + + + Ethernet36 + 10.0.0.18/31 + + + + Ethernet40 + 10.0.0.20/31 + + + + Ethernet44 + 10.0.0.22/31 + + + + Ethernet48 + 10.0.0.24/31 + + + + Ethernet52 + 10.0.0.26/31 + + + + Ethernet56 + 10.0.0.28/31 + + + + Ethernet60 + 10.0.0.30/31 + + + + Ethernet64 + 10.0.0.32/31 + + + + Ethernet68 + 10.0.0.34/31 + + + + Ethernet72 + 10.0.0.36/31 + + + + Ethernet76 + 10.0.0.38/31 + + + + Ethernet80 + 10.0.0.40/31 + + + + Ethernet84 + 10.0.0.42/31 + + + + Ethernet88 + 10.0.0.44/31 + + + + Ethernet92 + 10.0.0.46/31 + + + + Ethernet96 + 10.0.0.48/31 + + + + Ethernet100 + 10.0.0.50/31 + + + + Ethernet104 + 10.0.0.52/31 + + + + Ethernet108 + 10.0.0.54/31 + + + + Ethernet112 + 10.0.0.56/31 + + + + Ethernet116 + 10.0.0.58/31 + + + + Ethernet120 + 10.0.0.60/31 + + + + Ethernet124 + 10.0.0.62/31 + + + + + + + + + + + + DeviceInterfaceLink + switch1 + Ethernet0 + ARISTA01T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet4 + ARISTA02T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet8 + ARISTA03T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet12 + ARISTA04T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet16 + ARISTA05T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet20 + ARISTA06T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet24 + ARISTA07T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet28 + ARISTA08T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet32 + ARISTA09T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet36 + ARISTA10T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet40 + ARISTA11T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet44 + ARISTA12T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet48 + ARISTA13T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet52 + ARISTA14T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet56 + ARISTA15T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet60 + ARISTA16T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet64 + ARISTA01T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet68 + ARISTA02T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet72 + ARISTA03T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet76 + ARISTA04T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet80 + ARISTA05T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet84 + ARISTA06T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet88 + ARISTA07T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet92 + ARISTA08T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet96 + ARISTA09T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet100 + ARISTA10T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet104 + ARISTA11T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet108 + ARISTA12T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet112 + ARISTA13T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet116 + ARISTA14T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet120 + ARISTA15T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet124 + ARISTA16T0 + Ethernet1 + + + + + switch + porsche + + + + + + + switch1 + + + DhcpResources + + + + + NtpResources + + 0.debian.pool.ntp.org;1.debian.pool.ntp.org;2.debian.pool.ntp.org;3.debian.pool.ntp.org + + + SyslogResources + + + + + + + + + switch + porsche +
diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/plugins/eeprom.py b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/eeprom.py new file mode 100755 index 000000000000..6964c6bade4f --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/eeprom.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python + +try: + import exceptions + import binascii + import time + import optparse + import warnings + import os + import sys + from sonic_eeprom import eeprom_base + from sonic_eeprom import eeprom_tlvinfo + import subprocess +except ImportError, e: + raise ImportError (str(e) + "- required module not found") + +class board(eeprom_tlvinfo.TlvInfoDecoder): + _TLV_INFO_MAX_LEN = 256 + def __init__(self, name, path, cpld_root, ro): + self.eeprom_path = "/sys/bus/i2c/devices/4-0054/eeprom" + super(board, self).__init__(self.eeprom_path, 0, '', True) diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/plugins/psuutil.py b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/psuutil.py new file mode 100755 index 000000000000..a23a7b7fe73e --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/psuutil.py @@ -0,0 +1,92 @@ +# +# psuutil.py +# Platform-specific PSU status interface for SONiC +# + + +import os.path + +try: + from sonic_psu.psu_base import PsuBase +except ImportError as e: + raise ImportError(str(e) + "- required module not found") + + +class PsuUtil(PsuBase): + """Platform-specific PSUutil class""" + + SYSFS_PSU_DIR = "/sys/bus/i2c/devices/7-0075" + + def __init__(self): + PsuBase.__init__(self) + + + # Get sysfs attribute + def get_attr_value(self, attr_path): + + retval = 'ERR' + if (not os.path.isfile(attr_path)): + return retval + + try: + with open(attr_path, 'r') as fd: + retval = fd.read() + except Exception as error: + logging.error("Unable to open ", attr_path, " file !") + + retval = retval.rstrip('\r\n') + + fd.close() + return retval + + def get_num_psus(self): + """ + Retrieves the number of PSUs available on the device + :return: An integer, the number of PSUs available on the device + """ + MAX_PSUS = 2 + return MAX_PSUS + + def get_psu_status(self, index): + """ + Retrieves the oprational status of power supply unit (PSU) defined + by index + :param index: An integer, index of the PSU of which to query status + :return: Boolean, True if PSU is operating properly, False if PSU is\ + faulty + """ + status = 0 + attr_file = 'psu_'+str(index)+'_status' + attr_path = self.SYSFS_PSU_DIR +'/' + attr_file + + attr_value = self.get_attr_value(attr_path) + + if (attr_value != 'ERR'): + attr_value = int(attr_value, 16) + # Check for PSU status + if (attr_value == 1): + status = 1 + + return status + + def get_psu_presence(self, index): + """ + Retrieves the presence status of power supply unit (PSU) defined + by index + :param index: An integer, index of the PSU of which to query status + :return: Boolean, True if PSU is plugged, False if not + """ + status = 0 + attr_file = 'psu_'+str(index)+'_present' + attr_path = self.SYSFS_PSU_DIR +'/' + attr_file + + attr_value = self.get_attr_value(attr_path) + + if (attr_value != 'ERR'): + attr_value = int(attr_value, 16) + # Check for PSU presence + if (attr_value == 0): + status = 1 + + return status + diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py new file mode 100755 index 000000000000..28909f00110c --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python + +try: + import os + import re + import time + from sonic_sfp.sfputilbase import SfpUtilBase +except ImportError, e: + raise ImportError (str(e) + "- required module not found") + + +class SfpUtil(SfpUtilBase): + """Platform specific sfputil class""" + + port_start = 0 + port_end = 53 + ports_in_block = 54 + cplda_sfp_num = 24 + cpldb_sfp_num = 12 + cpldc_sfp_num = 18 + + port_to_eeprom_mapping = {} + port_to_i2c_mapping = {} + sfp_ports = range(0, ports_in_block) + qsfp_ports = range(ports_in_block - 6, ports_in_block) + + + def __init__(self): + for x in range(self.port_start, self.port_end + 1): + if x < self.cpldb_sfp_num: + self.port_to_i2c_mapping.update({x:7}) + elif x < self.cplda_sfp_num + self.cpldb_sfp_num: + self.port_to_i2c_mapping.update({x:6}) + else: + self.port_to_i2c_mapping.update({x:8}) + + for x in range(self.port_start, self.port_end+1): + eeprom_path = '/sys/bus/i2c/devices/{0}-0050/sfp'+str(x+1)+'_eeprom' + port_eeprom_path = eeprom_path.format(self.port_to_i2c_mapping[x]) + self.port_to_eeprom_mapping[x] = port_eeprom_path + SfpUtilBase.__init__(self) + + + def get_presence(self, port_num): + if port_num < self.port_start or port_num > self.port_end: + return False + + if port_num < self.cpldb_sfp_num: + presence_path = '/sys/bus/i2c/devices/7-0075/sfp'+str(port_num+1)+'_present' + elif port_num < self.cpldb_sfp_num + self.cplda_sfp_num: + presence_path = '/sys/bus/i2c/devices/6-0074/sfp'+str(port_num+1)+'_present' + else: + presence_path = '/sys/bus/i2c/devices/8-0076/sfp'+str(port_num+1)+'_present' + + try: + file = open(presence_path) + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + value = int(file.readline().rstrip()) + + file.close() + if value == 0: + return True + + return False + + def get_low_power_mode(self, port_num): + if port_num not in self.qsfp_ports: + return False + + lowpower_path = '/sys/bus/i2c/devices/8-0076/sfp'+str(port_num+1)+'_lowpower' + + try: + file = open(lowpower_path) + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + value = int(file.readline().rstrip()) + + file.close() + if value == 1: + return True + + return False + + def set_low_power_mode(self, port_num, lpmode): + if port_num not in self.qsfp_ports: + return False + + lowpower_path = '/sys/bus/i2c/devices/8-0076/sfp'+str(port_num+1)+'_lowpower' + + # LPMode is active high; set or clear the bit accordingly + if lpmode is True: + value = 1 + else: + value = 0 + + try: + file = open(lowpower_path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + file.seek(0) + file.write(str(value)) + file.close() + + return True + + def reset(self, port_num): + if port_num not in self.qsfp_ports: + return False + reset_path = '/sys/bus/i2c/devices/8-0076/sfp'+str(port_num+1)+'_reset' + + try: + file = open(reset_path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + file.seek(0) + file.write(str(2)) + file.close() + + # Sleep 1 second to allow it to settle + time.sleep(1) + + try: + file = open(reset_path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + file.seek(0) + file.write(str(1)) + file.close() + + return True + + def read_porttab_mappings(self, porttabfile): + logical = [] + logical_to_bcm = {} + logical_to_physical = {} + physical_to_logical = {} + last_fp_port_index = 0 + last_portname = "" + first = 1 + port_pos_in_file = 0 + parse_fmt_port_config_ini = False + + try: + f = open(porttabfile) + except: + raise + + parse_fmt_port_config_ini = (os.path.basename(porttabfile) == "port_config.ini") + + # Read the porttab file and generate dicts + # with mapping for future reference. + # XXX: move the porttab + # parsing stuff to a separate module, or reuse + # if something already exists + for line in f: + line.strip() + if re.search("^#", line) is not None: + continue + + # Parsing logic for 'port_config.ini' file + if (parse_fmt_port_config_ini): + # bcm_port is not explicitly listed in port_config.ini format + # Currently we assume ports are listed in numerical order according to bcm_port + # so we use the port's position in the file (zero-based) as bcm_port + portname = line.split()[0] + + bcm_port = str(port_pos_in_file) + + if len(line.split()) >= 4: + fp_port_index = int(line.split()[3]) + else: + fp_port_index = portname.split("Ethernet").pop() + fp_port_index = int(fp_port_index.split("s").pop(0))/4 + else: # Parsing logic for older 'portmap.ini' file + (portname, bcm_port) = line.split("=")[1].split(",")[:2] + + fp_port_index = portname.split("Ethernet").pop() + fp_port_index = int(fp_port_index.split("s").pop(0))/4 + + if ((len(self.sfp_ports) > 0) and (fp_port_index not in self.sfp_ports)): + continue + + if first == 1: + # Initialize last_[physical|logical]_port + # to the first valid port + last_fp_port_index = fp_port_index + last_portname = portname + first = 0 + + logical.append(portname) + + logical_to_bcm[portname] = "xe" + bcm_port + logical_to_physical[portname] = [fp_port_index] + if physical_to_logical.get(fp_port_index) is None: + physical_to_logical[fp_port_index] = [portname] + else: + physical_to_logical[fp_port_index].append( + portname) + + if (fp_port_index - last_fp_port_index) > 1: + # last port was a gang port + for p in range(last_fp_port_index+1, fp_port_index): + logical_to_physical[last_portname].append(p) + if physical_to_logical.get(p) is None: + physical_to_logical[p] = [last_portname] + else: + physical_to_logical[p].append(last_portname) + + last_fp_port_index = fp_port_index + last_portname = portname + + port_pos_in_file += 1 + + self.logical = logical + self.logical_to_bcm = logical_to_bcm + self.logical_to_physical = logical_to_physical + self.physical_to_logical = physical_to_logical + + """ + print "logical: " + self.logical + print "logical to bcm: " + self.logical_to_bcm + print "logical to physical: " + self.logical_to_physical + print "physical to logical: " + self.physical_to_logical + """ + + + diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini new file mode 100755 index 000000000000..cc4cf6d44388 --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini @@ -0,0 +1,55 @@ +# name lanes alias index speed +Ethernet0 8 Ethernet1/1 0 10000 +Ethernet1 9 Ethernet2/1 1 10000 +Ethernet2 10 Ethernet3/1 2 10000 +Ethernet3 11 Ethernet4/1 3 10000 +Ethernet4 12 Ethernet5/1 4 10000 +Ethernet5 13 Ethernet6/1 5 10000 +Ethernet6 14 Ethernet7/1 6 10000 +Ethernet7 15 Ethernet8/1 7 10000 +Ethernet8 16 Ethernet9/1 8 10000 +Ethernet9 17 Ethernet10/1 9 10000 +Ethernet10 18 Ethernet11/1 10 10000 +Ethernet11 19 Ethernet12/1 11 10000 +Ethernet12 20 Ethernet13/1 12 10000 +Ethernet13 21 Ethernet14/1 13 10000 +Ethernet14 22 Ethernet15/1 14 10000 +Ethernet15 23 Ethernet16/1 15 10000 +Ethernet16 32 Ethernet17/1 16 10000 +Ethernet17 33 Ethernet18/1 17 10000 +Ethernet18 34 Ethernet19/1 18 10000 +Ethernet19 35 Ethernet20/1 19 10000 +Ethernet20 40 Ethernet21/1 20 10000 +Ethernet21 41 Ethernet22/1 21 10000 +Ethernet22 42 Ethernet23/1 22 10000 +Ethernet23 43 Ethernet24/1 23 10000 +Ethernet24 48 Ethernet25/1 24 10000 +Ethernet25 49 Ethernet26/1 25 10000 +Ethernet26 50 Ethernet27/1 26 10000 +Ethernet27 51 Ethernet28/1 27 10000 +Ethernet28 56 Ethernet29/1 28 10000 +Ethernet29 57 Ethernet30/1 29 10000 +Ethernet30 58 Ethernet31/1 30 10000 +Ethernet31 59 Ethernet32/1 31 10000 +Ethernet32 64 Ethernet33/1 32 10000 +Ethernet33 65 Ethernet34/1 33 10000 +Ethernet34 66 Ethernet35/1 34 10000 +Ethernet35 67 Ethernet36/1 35 10000 +Ethernet36 68 Ethernet37/1 36 10000 +Ethernet37 69 Ethernet38/1 37 10000 +Ethernet38 70 Ethernet39/1 38 10000 +Ethernet39 71 Ethernet40/1 39 10000 +Ethernet40 72 Ethernet41/1 40 10000 +Ethernet41 73 Ethernet42/1 41 10000 +Ethernet42 74 Ethernet43/1 42 10000 +Ethernet43 75 Ethernet44/1 43 10000 +Ethernet44 76 Ethernet45/1 44 10000 +Ethernet45 77 Ethernet46/1 45 10000 +Ethernet46 78 Ethernet47/1 46 10000 +Ethernet47 79 Ethernet48/1 47 10000 +Ethernet48 80,81,82,83 Ethernet49/1 48 100000 +Ethernet49 84,85,86,87 Ethernet50/1 49 100000 +Ethernet50 104,105,106,107 Ethernet51/1 50 100000 +Ethernet51 108,109,110,111 Ethernet52/1 51 100000 +Ethernet52 112,113,114,115 Ethernet53/1 52 100000 +Ethernet53 116,117,118,119 Ethernet54/1 53 100000 diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/sai.profile b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/sai.profile new file mode 100755 index 000000000000..f19a366b1cac --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/sai.profile @@ -0,0 +1,2 @@ +SAI_INIT_CONFIG_FILE=/usr/share/sonic/platform/tau-porsche.cfg +SAI_DSH_CONFIG_FILE=/usr/share/sonic/hwsku/tau-porsche.dsh diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/tau-porsche.dsh b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/tau-porsche.dsh new file mode 100755 index 000000000000..b370fe83b837 --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/tau-porsche.dsh @@ -0,0 +1,497 @@ +init start stage unit=0 low-level +init set port-map unit=0 port=0 eth-macro=2 lane=0 max-speed=10g active=true +init set port-map unit=0 port=1 eth-macro=2 lane=1 max-speed=10g active=true +init set port-map unit=0 port=2 eth-macro=2 lane=2 max-speed=10g active=true +init set port-map unit=0 port=3 eth-macro=2 lane=3 max-speed=10g active=true +init set port-map unit=0 port=4 eth-macro=3 lane=0 max-speed=10g active=true +init set port-map unit=0 port=5 eth-macro=3 lane=1 max-speed=10g active=true +init set port-map unit=0 port=6 eth-macro=3 lane=2 max-speed=10g active=true +init set port-map unit=0 port=7 eth-macro=3 lane=3 max-speed=10g active=true +init set port-map unit=0 port=8 eth-macro=4 lane=0 max-speed=10g active=true +init set port-map unit=0 port=9 eth-macro=4 lane=1 max-speed=10g active=true +init set port-map unit=0 port=10 eth-macro=4 lane=2 max-speed=10g active=true +init set port-map unit=0 port=11 eth-macro=4 lane=3 max-speed=10g active=true +init set port-map unit=0 port=12 eth-macro=5 lane=0 max-speed=10g active=true +init set port-map unit=0 port=13 eth-macro=5 lane=1 max-speed=10g active=true +init set port-map unit=0 port=14 eth-macro=5 lane=2 max-speed=10g active=true +init set port-map unit=0 port=15 eth-macro=5 lane=3 max-speed=10g active=true +init set port-map unit=0 port=16 eth-macro=8 lane=0 max-speed=10g active=true +init set port-map unit=0 port=17 eth-macro=8 lane=1 max-speed=10g active=true +init set port-map unit=0 port=18 eth-macro=8 lane=2 max-speed=10g active=true +init set port-map unit=0 port=19 eth-macro=8 lane=3 max-speed=10g active=true +init set port-map unit=0 port=20 eth-macro=10 lane=0 max-speed=10g active=true +init set port-map unit=0 port=21 eth-macro=10 lane=1 max-speed=10g active=true +init set port-map unit=0 port=22 eth-macro=10 lane=2 max-speed=10g active=true +init set port-map unit=0 port=23 eth-macro=10 lane=3 max-speed=10g active=true +init set port-map unit=0 port=24 eth-macro=12 lane=0 max-speed=10g active=true +init set port-map unit=0 port=25 eth-macro=12 lane=1 max-speed=10g active=true +init set port-map unit=0 port=26 eth-macro=12 lane=2 max-speed=10g active=true +init set port-map unit=0 port=27 eth-macro=12 lane=3 max-speed=10g active=true +init set port-map unit=0 port=28 eth-macro=14 lane=0 max-speed=10g active=true +init set port-map unit=0 port=29 eth-macro=14 lane=1 max-speed=10g active=true +init set port-map unit=0 port=30 eth-macro=14 lane=2 max-speed=10g active=true +init set port-map unit=0 port=31 eth-macro=14 lane=3 max-speed=10g active=true +init set port-map unit=0 port=32 eth-macro=16 lane=0 max-speed=10g active=true +init set port-map unit=0 port=33 eth-macro=16 lane=1 max-speed=10g active=true +init set port-map unit=0 port=34 eth-macro=16 lane=2 max-speed=10g active=true +init set port-map unit=0 port=35 eth-macro=16 lane=3 max-speed=10g active=true +init set port-map unit=0 port=36 eth-macro=17 lane=0 max-speed=10g active=true +init set port-map unit=0 port=37 eth-macro=17 lane=1 max-speed=10g active=true +init set port-map unit=0 port=38 eth-macro=17 lane=2 max-speed=10g active=true +init set port-map unit=0 port=39 eth-macro=17 lane=3 max-speed=10g active=true +init set port-map unit=0 port=40 eth-macro=18 lane=0 max-speed=10g active=true +init set port-map unit=0 port=41 eth-macro=18 lane=1 max-speed=10g active=true +init set port-map unit=0 port=42 eth-macro=18 lane=2 max-speed=10g active=true +init set port-map unit=0 port=43 eth-macro=18 lane=3 max-speed=10g active=true +init set port-map unit=0 port=44 eth-macro=19 lane=0 max-speed=10g active=true +init set port-map unit=0 port=45 eth-macro=19 lane=1 max-speed=10g active=true +init set port-map unit=0 port=46 eth-macro=19 lane=2 max-speed=10g active=true +init set port-map unit=0 port=47 eth-macro=19 lane=3 max-speed=10g active=true +init set port-map unit=0 port=48 eth-macro=20 lane=0 max-speed=100g active=true +init set port-map unit=0 port=49 eth-macro=21 lane=0 max-speed=100g active=true +init set port-map unit=0 port=50 eth-macro=26 lane=0 max-speed=100g active=true +init set port-map unit=0 port=51 eth-macro=27 lane=0 max-speed=100g active=true +init set port-map unit=0 port=52 eth-macro=28 lane=0 max-speed=100g active=true +init set port-map unit=0 port=53 eth-macro=29 lane=0 max-speed=100g active=true init-done=true +init start stage unit=0 task-rsrc +init start stage unit=0 module +init start stage unit=0 task +phy set lane-swap unit=0 portlist=0 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=1 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=2 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=3 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=4 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=5 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=6 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=7 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=8 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=9 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=10 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=11 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=12 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=13 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=14 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=15 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=16 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=17 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=18 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=19 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=20 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=21 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=22 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=23 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=24 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=25 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=26 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=27 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=28 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=29 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=30 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=31 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=32 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=33 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=34 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=35 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=36 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=37 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=38 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=39 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=40 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=41 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=42 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=43 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=44 lane-cnt=1 property=tx data=0x00 +phy set lane-swap unit=0 portlist=45 lane-cnt=1 property=tx data=0x01 +phy set lane-swap unit=0 portlist=46 lane-cnt=1 property=tx data=0x02 +phy set lane-swap unit=0 portlist=47 lane-cnt=1 property=tx data=0x03 +phy set lane-swap unit=0 portlist=48 lane-cnt=4 property=tx data=0x03.02.01.00 +phy set lane-swap unit=0 portlist=49 lane-cnt=4 property=tx data=0x01.02.03.00 +phy set lane-swap unit=0 portlist=50 lane-cnt=4 property=tx data=0x01.02.03.00 +phy set lane-swap unit=0 portlist=51 lane-cnt=4 property=tx data=0x03.02.01.00 +phy set lane-swap unit=0 portlist=52 lane-cnt=4 property=tx data=0x03.02.01.00 +phy set lane-swap unit=0 portlist=53 lane-cnt=4 property=tx data=0x01.02.03.00 +phy set lane-swap unit=0 portlist=0 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=1 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=2 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=3 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=4 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=5 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=6 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=7 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=8 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=9 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=10 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=11 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=12 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=13 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=14 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=15 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=16 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=17 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=18 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=19 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=20 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=21 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=22 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=23 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=24 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=25 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=26 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=27 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=28 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=29 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=30 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=31 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=32 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=33 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=34 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=35 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=36 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=37 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=38 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=39 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=40 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=41 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=42 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=43 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=44 lane-cnt=1 property=rx data=0x02 +phy set lane-swap unit=0 portlist=45 lane-cnt=1 property=rx data=0x01 +phy set lane-swap unit=0 portlist=46 lane-cnt=1 property=rx data=0x00 +phy set lane-swap unit=0 portlist=47 lane-cnt=1 property=rx data=0x03 +phy set lane-swap unit=0 portlist=48 lane-cnt=4 property=rx data=0x03.00.01.02 +phy set lane-swap unit=0 portlist=49 lane-cnt=4 property=rx data=0x03.00.01.02 +phy set lane-swap unit=0 portlist=50 lane-cnt=4 property=rx data=0x03.01.02.00 +phy set lane-swap unit=0 portlist=51 lane-cnt=4 property=rx data=0x03.02.01.00 +phy set lane-swap unit=0 portlist=52 lane-cnt=4 property=rx data=0x03.02.01.00 +phy set lane-swap unit=0 portlist=53 lane-cnt=4 property=rx data=0x00.01.02.03 +phy set polarity-rev unit=0 portlist=0 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=1 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=2 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=3 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=4 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=5 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=6 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=7 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=8 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=9 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=10 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=11 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=12 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=13 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=14 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=15 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=16 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=17 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=18 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=19 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=20 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=21 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=22 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=23 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=24 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=25 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=26 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=27 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=28 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=29 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=30 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=31 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=32 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=33 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=34 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=35 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=36 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=37 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=38 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=39 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=40 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=41 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=42 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=43 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=44 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev unit=0 portlist=45 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=46 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=47 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev unit=0 portlist=48 lane-cnt=4 property=tx data=0x00.01.00.00 +phy set polarity-rev unit=0 portlist=49 lane-cnt=4 property=tx data=0x00.00.01.00 +phy set polarity-rev unit=0 portlist=50 lane-cnt=4 property=tx data=0x01.00.01.01 +phy set polarity-rev unit=0 portlist=51 lane-cnt=4 property=tx data=0x01.01.01.01 +phy set polarity-rev unit=0 portlist=52 lane-cnt=4 property=tx data=0x01.00.00.00 +phy set polarity-rev unit=0 portlist=53 lane-cnt=4 property=tx data=0x00.00.01.00 +phy set polarity-rev unit=0 portlist=0 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=1 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=2 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=3 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=4 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=5 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=6 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=7 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=8 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=9 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=10 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=11 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=12 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=13 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=14 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=15 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=16 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=17 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=18 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=19 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=20 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=21 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=22 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=23 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=24 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=25 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=26 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=27 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=28 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=29 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=30 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=31 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=32 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=33 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=34 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=35 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=36 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=37 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=38 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=39 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=40 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=41 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=42 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=43 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=44 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=45 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=46 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev unit=0 portlist=47 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev unit=0 portlist=48 lane-cnt=4 property=rx data=0x00.01.00.00 +phy set polarity-rev unit=0 portlist=49 lane-cnt=4 property=rx data=0x00.00.01.00 +phy set polarity-rev unit=0 portlist=50 lane-cnt=4 property=rx data=0x00.00.01.01 +phy set polarity-rev unit=0 portlist=51 lane-cnt=4 property=rx data=0x00.01.00.01 +phy set polarity-rev unit=0 portlist=52 lane-cnt=4 property=rx data=0x00.01.00.01 +phy set polarity-rev unit=0 portlist=53 lane-cnt=4 property=rx data=0x01.01.01.01 +phy set pre-emphasis unit=0 portlist=0 lane-cnt=1 property=c2 data=0x00 +phy set pre-emphasis unit=0 portlist=0 lane-cnt=1 property=cn1 data=0x04 +phy set pre-emphasis unit=0 portlist=0 lane-cnt=1 property=c0 data=0x1E +phy set pre-emphasis unit=0 portlist=0 lane-cnt=1 property=c1 data=0x02 +phy set pre-emphasis unit=0 portlist=1 lane-cnt=1 property=c2 data=0x00 +phy set pre-emphasis unit=0 portlist=1 lane-cnt=1 property=cn1 data=0x04 +phy set pre-emphasis unit=0 portlist=1 lane-cnt=1 property=c0 data=0x1E +phy set pre-emphasis unit=0 portlist=1 lane-cnt=1 property=c1 data=0x02 +phy set pre-emphasis unit=0 portlist=2 lane-cnt=1 property=c2 data=0x00 +phy set pre-emphasis unit=0 portlist=2 lane-cnt=1 property=cn1 data=0x04 +phy set pre-emphasis unit=0 portlist=2 lane-cnt=1 property=c0 data=0x1E +phy set pre-emphasis unit=0 portlist=2 lane-cnt=1 property=c1 data=0x02 +phy set pre-emphasis unit=0 portlist=3 lane-cnt=1 property=c2 data=0x00 +phy set pre-emphasis unit=0 portlist=3 lane-cnt=1 property=cn1 data=0x04 +phy set pre-emphasis unit=0 portlist=3 lane-cnt=1 property=c0 data=0x1E +phy set pre-emphasis unit=0 portlist=3 lane-cnt=1 property=c1 data=0x02 +phy set pre-emphasis unit=0 portlist=4 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=4 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=4 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=4 lane-cnt=1 property=c1 data=0x07 +phy set pre-emphasis unit=0 portlist=5 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=5 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=5 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=5 lane-cnt=1 property=c1 data=0x07 +phy set pre-emphasis unit=0 portlist=6 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=6 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=6 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=6 lane-cnt=1 property=c1 data=0x07 +phy set pre-emphasis unit=0 portlist=7 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=7 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=7 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=7 lane-cnt=1 property=c1 data=0x07 +phy set pre-emphasis unit=0 portlist=8 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=8 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=8 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=8 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=9 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=9 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=9 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=9 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=10 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=10 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=10 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=10 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=11 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=11 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=11 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=11 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=12 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=12 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=12 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=12 lane-cnt=1 property=c1 data=0x07 +phy set pre-emphasis unit=0 portlist=13 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=13 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=13 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=13 lane-cnt=1 property=c1 data=0x07 +phy set pre-emphasis unit=0 portlist=14 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=14 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=14 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=14 lane-cnt=1 property=c1 data=0x07 +phy set pre-emphasis unit=0 portlist=15 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=15 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=15 lane-cnt=1 property=c0 data=0x1B +phy set pre-emphasis unit=0 portlist=15 lane-cnt=1 property=c1 data=0x07 +phy set pre-emphasis unit=0 portlist=16 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=16 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=16 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=16 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=17 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=17 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=17 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=17 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=18 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=18 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=18 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=18 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=19 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=19 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=19 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=19 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=20 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=20 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=20 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=20 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=21 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=21 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=21 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=21 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=22 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=22 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=22 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=22 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=23 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=23 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=23 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=23 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=24 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=24 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=24 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=24 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=25 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=25 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=25 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=25 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=26 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=26 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=26 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=26 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=27 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=27 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=27 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=27 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=28 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=28 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=28 lane-cnt=1 property=c0 data=0x1D +phy set pre-emphasis unit=0 portlist=28 lane-cnt=1 property=c1 data=0x05 +phy set pre-emphasis unit=0 portlist=29 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=29 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=29 lane-cnt=1 property=c0 data=0x1D +phy set pre-emphasis unit=0 portlist=29 lane-cnt=1 property=c1 data=0x05 +phy set pre-emphasis unit=0 portlist=30 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=30 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=30 lane-cnt=1 property=c0 data=0x1D +phy set pre-emphasis unit=0 portlist=30 lane-cnt=1 property=c1 data=0x05 +phy set pre-emphasis unit=0 portlist=31 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=31 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=31 lane-cnt=1 property=c0 data=0x1D +phy set pre-emphasis unit=0 portlist=31 lane-cnt=1 property=c1 data=0x05 +phy set pre-emphasis unit=0 portlist=32 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=32 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=32 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=32 lane-cnt=1 property=c1 data=0x05 +phy set pre-emphasis unit=0 portlist=33 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=33 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=33 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=33 lane-cnt=1 property=c1 data=0x05 +phy set pre-emphasis unit=0 portlist=34 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=34 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=34 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=34 lane-cnt=1 property=c1 data=0x05 +phy set pre-emphasis unit=0 portlist=35 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=35 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=35 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=35 lane-cnt=1 property=c1 data=0x05 +phy set pre-emphasis unit=0 portlist=36 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=36 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=36 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=36 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=37 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=37 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=37 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=37 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=38 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=38 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=38 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=38 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=39 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=39 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=39 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=39 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=40 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=40 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=40 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=40 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=41 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=41 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=41 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=41 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=42 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=42 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=42 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=42 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=43 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=43 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=43 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=43 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=44 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=44 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=44 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=44 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=45 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=45 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=45 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=45 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=46 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=46 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=46 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=46 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=47 lane-cnt=1 property=c2 data=0x02 +phy set pre-emphasis unit=0 portlist=47 lane-cnt=1 property=cn1 data=0x00 +phy set pre-emphasis unit=0 portlist=47 lane-cnt=1 property=c0 data=0x1C +phy set pre-emphasis unit=0 portlist=47 lane-cnt=1 property=c1 data=0x06 +phy set pre-emphasis unit=0 portlist=48 lane-cnt=4 property=c2 data=0x02.02.02.02 +phy set pre-emphasis unit=0 portlist=48 lane-cnt=4 property=cn1 data=0x00.00.00.00 +phy set pre-emphasis unit=0 portlist=48 lane-cnt=4 property=c0 data=0x1C.1C.1C.1C +phy set pre-emphasis unit=0 portlist=48 lane-cnt=4 property=c1 data=0x06.06.06.06 +phy set pre-emphasis unit=0 portlist=49 lane-cnt=4 property=c2 data=0x02.02.02.02 +phy set pre-emphasis unit=0 portlist=49 lane-cnt=4 property=cn1 data=0x00.00.00.00 +phy set pre-emphasis unit=0 portlist=49 lane-cnt=4 property=c0 data=0x1B.1B.1B.1B +phy set pre-emphasis unit=0 portlist=49 lane-cnt=4 property=c1 data=0x06.06.06.06 +phy set pre-emphasis unit=0 portlist=50 lane-cnt=4 property=c2 data=0x02.02.02.02 +phy set pre-emphasis unit=0 portlist=50 lane-cnt=4 property=cn1 data=0x00.00.00.00 +phy set pre-emphasis unit=0 portlist=50 lane-cnt=4 property=c0 data=0x1B.1B.1B.1B +phy set pre-emphasis unit=0 portlist=50 lane-cnt=4 property=c1 data=0x06.06.06.06 +phy set pre-emphasis unit=0 portlist=51 lane-cnt=4 property=c2 data=0x02.02.02.02 +phy set pre-emphasis unit=0 portlist=51 lane-cnt=4 property=cn1 data=0x00.00.00.00 +phy set pre-emphasis unit=0 portlist=51 lane-cnt=4 property=c0 data=0x1B.1B.1B.1B +phy set pre-emphasis unit=0 portlist=51 lane-cnt=4 property=c1 data=0x06.06.06.06 +phy set pre-emphasis unit=0 portlist=52 lane-cnt=4 property=c2 data=0x02.02.02.02 +phy set pre-emphasis unit=0 portlist=52 lane-cnt=4 property=cn1 data=0x00.00.00.00 +phy set pre-emphasis unit=0 portlist=52 lane-cnt=4 property=c0 data=0x1B.1B.1B.1B +phy set pre-emphasis unit=0 portlist=52 lane-cnt=4 property=c1 data=0x07.07.07.07 +phy set pre-emphasis unit=0 portlist=53 lane-cnt=4 property=c2 data=0x02.02.02.02 +phy set pre-emphasis unit=0 portlist=53 lane-cnt=4 property=cn1 data=0x00.00.00.00 +phy set pre-emphasis unit=0 portlist=53 lane-cnt=4 property=c0 data=0x1A.1A.1A.1A +phy set pre-emphasis unit=0 portlist=53 lane-cnt=4 property=c1 data=0x07.07.07.07 +port set property unit=0 portlist=0-47 speed=10g +port set property unit=0 portlist=0-47 medium-type=sr +port set property unit=0 portlist=48-53 speed=100g +port set property unit=0 portlist=48-53 medium-type=sr4 +port set property unit=0 portlist=0-53 fec=disable +port set property unit=0 portlist=0-53 an=disable +port set property unit=0 portlist=0-53 admin=enable \ No newline at end of file diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/tau-porsche.cfg b/device/pegatron/x86_64-pegatron_porsche-r0/tau-porsche.cfg new file mode 100755 index 000000000000..bbd7c8f80ff5 --- /dev/null +++ b/device/pegatron/x86_64-pegatron_porsche-r0/tau-porsche.cfg @@ -0,0 +1,23 @@ +#This configuration file is for customer init value feature. Please refer to mtk_cfg.h/mtk_cfg.c for detail. +#1. The lines beginning with # are comment lines. The lines beginning with number are the setting lines. +#2. There are five parameters which can be set. +# 1) the first is unit. +# 2) the second is NPS_CFG_TYPE_XXX. Refer to NPS_CFG_TYPE_T. +# 3) the 3-5 are {param0, param1, value} pairs. Refer to NPS_CFG_VALUE_T. Support HEX format. +# 4) the (unit, NPS_CFG_TYPE_XXX, param0, param1) group is the key to get the correspingding value. +# There should be no same (unit, NPS_CFG_TYPE_XXX, param0, param1) group. +#3. User must follow correct format to apply the setting. Please refer to below commentted example(#0 NPS_CFG_TYPE_L2_ADDR_MODE 0 0 1); +#4. Usage under the linux shell: +# 1) ./image-path/image-name -c cfg-path/NPS_Ari_EVB_24.cfg : mamually specify directory path if they are not in current work dirctory. +# 2) ./image-name -c NPS_Ari_EVB_24.cfg : the image and the NPS_Ari_EVB_24.cfg are in the current work directory. + +#unit NPS_CFG_TYPE_XXX param0 param1 value +#---- ---------------- ------ ------ ----- +0 NPS_CFG_TYPE_USE_UNIT_PORT 0 0 1 +0 NPS_CFG_TYPE_LED_CFG 0 0 3 +0 NPS_CFG_TYPE_CPI_PORT_MODE 129 0 1 +0 NPS_CFG_TYPE_CPI_PORT_MODE 130 0 1 +0 NPS_CFG_TYPE_USER_BUF_CTRL 0 0 1 +0 NPS_CFG_TYPE_HASH_L2_FDB_REGION_ENTRY_NUM 0 0 49152 +0 NPS_CFG_TYPE_HASH_L3_WITH_IPV6_PREFIX_64_REGION_ENTRY_NUM 0 0 32768 + diff --git a/platform/nephos/one-image.mk b/platform/nephos/one-image.mk index aa5cc1ff4324..c29dac5a9a18 100644 --- a/platform/nephos/one-image.mk +++ b/platform/nephos/one-image.mk @@ -6,6 +6,7 @@ $(SONIC_ONE_IMAGE)_IMAGE_TYPE = onie $(SONIC_ONE_IMAGE)_INSTALLS += $(NEPHOS_NPS_KERNEL) $(SONIC_ONE_IMAGE)_LAZY_INSTALLS += $(INGRASYS_S9130_32X_PLATFORM_MODULE) \ $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ - $(ACCTON_AS7116_54X_PLATFORM_MODULE) + $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ + $(PEGATRON_PORSCHE_PLATFORM_MODULE) $(SONIC_ONE_IMAGE)_DOCKERS += $(SONIC_INSTALL_DOCKER_IMAGES) SONIC_INSTALLERS += $(SONIC_ONE_IMAGE) diff --git a/platform/nephos/platform-modules-pegatron.mk b/platform/nephos/platform-modules-pegatron.mk new file mode 100755 index 000000000000..9a411763cec2 --- /dev/null +++ b/platform/nephos/platform-modules-pegatron.mk @@ -0,0 +1,13 @@ +# Pegatron Platform modules + +PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION = 0.1 + +export PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION + +PEGATRON_PORSCHE_PLATFORM_MODULE = sonic-platform-pegatron-porsche_$(PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION)_amd64.deb +$(PEGATRON_PORSCHE_PLATFORM_MODULE)_SRC_PATH = $(PLATFORM_PATH)/sonic-platform-modules-pegatron +$(PEGATRON_PORSCHE_PLATFORM_MODULE)_DEPENDS += $(LINUX_HEADERS) $(LINUX_HEADERS_COMMON) +$(PEGATRON_PORSCHE_PLATFORM_MODULE)_PLATFORM = x86_64-pegatron_porsche-r0 +SONIC_DPKG_DEBS += $(PEGATRON_PORSCHE_PLATFORM_MODULE) + +$(eval $(call add_extra_package,$(PEGATRON_PORSCHE_PLATFORM_MODULE))) diff --git a/platform/nephos/rules.mk b/platform/nephos/rules.mk index bf77ad0e6edf..1950c6777cfe 100644 --- a/platform/nephos/rules.mk +++ b/platform/nephos/rules.mk @@ -2,6 +2,7 @@ include $(PLATFORM_PATH)/sdk.mk include $(PLATFORM_PATH)/sai.mk include $(PLATFORM_PATH)/platform-modules-ingrasys.mk include $(PLATFORM_PATH)/platform-modules-accton.mk +include $(PLATFORM_PATH)/platform-modules-pegatron.mk include $(PLATFORM_PATH)/docker-orchagent-nephos.mk include $(PLATFORM_PATH)/docker-syncd-nephos.mk include $(PLATFORM_PATH)/docker-syncd-nephos-rpc.mk diff --git a/platform/nephos/sonic-platform-modules-pegatron/LICENSE b/platform/nephos/sonic-platform-modules-pegatron/LICENSE new file mode 100644 index 000000000000..a23cc2b232cd --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/LICENSE @@ -0,0 +1,16 @@ +Copyright (C) 2016 Microsoft, Inc +Copyright (C) 2018 Pegatron Corporation. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. diff --git a/platform/nephos/sonic-platform-modules-pegatron/README.md b/platform/nephos/sonic-platform-modules-pegatron/README.md new file mode 100644 index 000000000000..32444b4b8916 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/README.md @@ -0,0 +1 @@ +platform drivers of Pegatron products for the SONiC project diff --git a/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c b/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c new file mode 100644 index 000000000000..76cbd8844708 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c @@ -0,0 +1,1374 @@ +/* + * A MCU driver connect to hwmon + * + * Copyright (C) 2018 Pegatron Corporation. + * Peter5_Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pega_DEBUG +/*#define pega_DEBUG*/ +#ifdef pega_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ + +#define FW_UPGRADE_COMMAND 0xA5 +#define FAN_DISABLE_COMMAND 0x20 +#define FAN_ENABLE_COMMAND 0x21 +#define FAN_LED_SETTO_MANUAL_COMMAND 0x30 +#define FAN_LED_SETTO_AUTO_COMMAND 0x31 +#define FAN_LED_GREENON_COMMAND 0x40 +#define FAN_LED_GREENOFF_COMMAND 0x41 +#define FAN_LED_AMBERON_COMMAND 0x50 +#define FAN_LED_AMBEROFF_COMMAND 0x51 +#define SMART_FAN_ENABLE_BIT 0 +#define SMART_FAN_SETTING_ENABLE_BIT 0 +#define SA56004X_REMOTE_TEMP_ALERT_BIT 4 +#define I2C_FANBOARD_TIMEOUT_BIT 0 +#define ALERT_MODE_BIT 0 +#define GET_BIT(data, bit, value) value = (data >> bit) & 0x1 +#define SET_BIT(data, bit) data |= (1 << bit) +#define CLEAR_BIT(data, bit) data &= ~(1 << bit) + +enum chips +{ + mercedes3 = 0, + cadillac, + porsche, +}; + +enum fan_alert +{ + FAN_OUTER_RPM_OVER_ALERT_BIT = 0, + FAN_OUTER_RPM_UNDER_ALERT_BIT, + FAN_INNER_RPM_OVER_ALERT_BIT, + FAN_INNER_RPM_UNDER_ALERT_BIT, + FAN_CONNECT_ALERT_BIT, + FAN_DISCONNECT_ALERT_BIT, +}; + +enum fan_status +{ + FAN_ALERT_BIT = 2, + FAN_LED_AMBER_BIT, + FAN_LED_GREEN_BIT, + FAN_LED_AUTO_BIT, + FAN_ENABLE_BIT, + FAN_PRESENT_BIT, +}; + +enum hwmon_mcu_register +{ + MB_FW_UG_REG = 0, + FB_FW_UG_REG, + MB_HW_VER_REG, + FB_HW_SKUVER_REG, + MB_FW_VER_REG, + FB_FW_VER_REG, + + FAN_PWM_REG = 16, + + SF_ENABLE_REG, + SF_SETTING_ENABLE_REG, + SF_DEVICE_REG, + SF_UPDATE_REG, + SF_TEMP_MAX_REG, + SF_TEMP_MID_REG, + SF_TEMP_MIN_REG, + SF_PWM_MAX_REG, + SF_PWM_MID_REG, + SF_PWM_MIN_REG, + + FAN1_INNER_RPM_REG = 32, + FAN2_INNER_RPM_REG, + FAN3_INNER_RPM_REG, + FAN4_INNER_RPM_REG, + FAN5_INNER_RPM_REG, + + FAN1_OUTER_RPM_REG = 48, + FAN2_OUTER_RPM_REG, + FAN3_OUTER_RPM_REG, + FAN4_OUTER_RPM_REG, + FAN5_OUTER_RPM_REG, + + FAN1_STATUS_REG = 64, + FAN2_STATUS_REG, + FAN3_STATUS_REG, + FAN4_STATUS_REG, + FAN5_STATUS_REG, + + ADC_UNDER_VOL_ALERT_REG = 80, + ADC_OVER_VOL_ALERT_REG, + TS_OVER_TEMP_ALERT_REG, + + FAN1_ALERT_REG, + FAN2_ALERT_REG, + FAN3_ALERT_REG, + FAN4_ALERT_REG, + FAN5_ALERT_REG, + + I2C_BUS_ALERT_REG, + ALERT_MODE_REG, + + MONITOR_ADC_VOLTAGE_REG = 96, + + LM_0X49_TEMP_REG = 112, + LM_0X48_TEMP_REG, + SA56004X_LOCAL_TEMP_REG, + SA56004X_REMOTE_TEMP_REG, + +}; + +static struct mutex pega_hwmon_mcu_lock; + +static int pega_hwmon_mcu_read(struct i2c_client *client, u8 reg) +{ + int data = -EPERM; + + mutex_lock(&pega_hwmon_mcu_lock); + + data = i2c_smbus_read_word_data(client, reg); + + mutex_unlock(&pega_hwmon_mcu_lock); + + return data; +} + +static int pega_hwmon_mcu_write(struct i2c_client *client, u8 reg, u8 val) +{ + int ret = -EIO; + + mutex_lock(&pega_hwmon_mcu_lock); + + ret = i2c_smbus_write_byte_data(client, reg, val); + + mutex_unlock(&pega_hwmon_mcu_lock); + + return ret; +} + +static ssize_t mainBoardUpgrade(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = MB_FW_UG_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + if(val) + pega_hwmon_mcu_write(client, reg, FW_UPGRADE_COMMAND); + else + pega_hwmon_mcu_write(client, reg, 0xff); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, FW_UPGRADE_COMMAND)); + + return count; +} + +static ssize_t fanBoardUpgrade(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = FB_FW_UG_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + if(val) + pega_hwmon_mcu_write(client, reg, FW_UPGRADE_COMMAND); + else + pega_hwmon_mcu_write(client, reg, 0xff); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, FW_UPGRADE_COMMAND)); + + return count; +} + +static ssize_t get_MB_HW_version(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = MB_HW_VER_REG; + + data = pega_hwmon_mcu_read(client, reg); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + data &= 0x1f; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t get_FB_HW_version(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = FB_HW_SKUVER_REG; + + data = pega_hwmon_mcu_read(client, reg); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + data = (data >> 5) & 0x7; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t get_FB_boardId(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = FB_HW_SKUVER_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data &= 0x1f; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t get_MB_FW_version(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, major_ver = 0, minor_ver = 0; + u8 reg = MB_FW_VER_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + major_ver = (data >> 4) & 0xf; + minor_ver = data & 0xf; + + return sprintf(buf, "%d.%d\n", major_ver, minor_ver); +} + +static ssize_t get_FB_FW_version(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, major_ver = 0, minor_ver = 0; + u8 reg = FB_FW_VER_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + major_ver = (data >> 4) & 0xf; + minor_ver = data & 0xf; + + return sprintf(buf, "%d.%d\n", major_ver, minor_ver); +} + +static ssize_t get_fan_PWM(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = FAN_PWM_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_fan_pwm(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = FAN_PWM_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, val: %x\r\n", __func__, client->addr, reg, val)); + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_smartFan_enable(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = SF_ENABLE_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SMART_FAN_ENABLE_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t set_smartFan_enable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_ENABLE_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + if(val) + SET_BIT(data, SMART_FAN_ENABLE_BIT); + else + CLEAR_BIT(data, SMART_FAN_ENABLE_BIT); + pega_hwmon_mcu_write(client, reg, data); + + return count; +} + +static ssize_t get_smartFan_setting_enable(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = SF_SETTING_ENABLE_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SMART_FAN_SETTING_ENABLE_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t set_smartFan_setting_enable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_SETTING_ENABLE_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + if(val) + SET_BIT(data, SMART_FAN_SETTING_ENABLE_BIT); + else + CLEAR_BIT(data, SMART_FAN_SETTING_ENABLE_BIT); + pega_hwmon_mcu_write(client, reg, data); + + return count; +} + +static ssize_t get_smartFan_device(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_DEVICE_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%x\n", data); +} + +static ssize_t set_smartFan_device(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_DEVICE_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, val)); + + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_smartFan_update(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_UPDATE_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_smartFan_update(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_UPDATE_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, val)); + + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_smartFan_max_temp(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_TEMP_MAX_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_smartFan_max_temp(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_TEMP_MAX_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, val)); + + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_smartFan_mid_temp(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_TEMP_MID_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_smartFan_mid_temp(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_TEMP_MID_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, val)); + + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_smartFan_min_temp(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_TEMP_MID_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_smartFan_min_temp(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_TEMP_MID_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, val)); + + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_smartFan_max_pwm(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MAX_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_smartFan_max_pwm(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MAX_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, val)); + + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_smartFan_mid_pwm(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MID_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_smartFan_mid_pwm(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MID_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, val)); + + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_smartFan_min_pwm(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MIN_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_smartFan_min_pwm(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MIN_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, val)); + + pega_hwmon_mcu_write(client, reg, val); + + return count; +} + +static ssize_t get_fan_inner_rpm(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u16 data = 0; + u8 reg = FAN1_INNER_RPM_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t get_fan_outer_rpm(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u16 data = 0; + u8 reg = FAN1_OUTER_RPM_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} + +static ssize_t get_fan_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_STATUS_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_PRESENT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_fan_enable(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_STATUS_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_ENABLE_BIT, val); + + return sprintf(buf, "%d\n", val); +} + + +static ssize_t set_fan_enable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MID_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, val: %x\r\n", __func__, client->addr, reg, val)); + + if(val) + pega_hwmon_mcu_write(client, reg, FAN_ENABLE_COMMAND); + else + pega_hwmon_mcu_write(client, reg, FAN_DISABLE_COMMAND); + + return count; +} + +static ssize_t get_fan_led_auto(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_STATUS_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_LED_AUTO_BIT, val); + + return sprintf(buf, "%d\n", val); +} + + +static ssize_t set_fan_led_auto(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MID_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, val: %x\r\n", __func__, client->addr, reg, val)); + + if(val) + pega_hwmon_mcu_write(client, reg, FAN_LED_SETTO_AUTO_COMMAND); + else + pega_hwmon_mcu_write(client, reg, FAN_LED_SETTO_MANUAL_COMMAND); + + return count; +} + +static ssize_t get_fan_led_green(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_STATUS_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_LED_GREEN_BIT, val); + + return sprintf(buf, "%d\n", val); +} + + +static ssize_t set_fan_led_green(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MID_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, val: %x\r\n", __func__, client->addr, reg, val)); + + if(val) + pega_hwmon_mcu_write(client, reg, FAN_LED_GREENON_COMMAND); + else + pega_hwmon_mcu_write(client, reg, FAN_LED_GREENOFF_COMMAND); + + return count; +} + +static ssize_t get_fan_led_amber(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_STATUS_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_LED_AMBER_BIT, val); + + return sprintf(buf, "%d\n", val); +} + + +static ssize_t set_fan_led_amber(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = SF_PWM_MID_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, val: %x\r\n", __func__, client->addr, reg, val)); + + if(val) + pega_hwmon_mcu_write(client, reg, FAN_LED_AMBERON_COMMAND); + else + pega_hwmon_mcu_write(client, reg, FAN_LED_AMBEROFF_COMMAND); + + return count; +} + +static ssize_t get_fan_status_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_STATUS_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_ALERT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_adc_under_vol_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = ADC_UNDER_VOL_ALERT_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, attr->index, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_adc_over_vol_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = ADC_OVER_VOL_ALERT_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, attr->index, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_temp_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = TS_OVER_TEMP_ALERT_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SA56004X_REMOTE_TEMP_ALERT_BIT + attr->index, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_fan_outerRPMOver_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_ALERT_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_OUTER_RPM_OVER_ALERT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_fan_outerRPMUnder_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_ALERT_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_OUTER_RPM_UNDER_ALERT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_fan_innerRPMOver_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_ALERT_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_INNER_RPM_OVER_ALERT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_fan_innerRPMUnder_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_ALERT_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_INNER_RPM_UNDER_ALERT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_fan_connect_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_ALERT_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_CONNECT_ALERT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_fan_disconnect_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_ALERT_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_DISCONNECT_ALERT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_i2c_timeout(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = I2C_BUS_ALERT_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, I2C_FANBOARD_TIMEOUT_BIT + attr->index, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_alert_mode(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = ALERT_MODE_REG; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, ALERT_MODE_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t set_alert_mode(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = ALERT_MODE_REG; + long val = 0; + + if (kstrtol(buf, 10, &val)) + { + return -EINVAL; + } + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, val: %x\r\n", __func__, client->addr, reg, val)); + + if(val) + SET_BIT(data, ALERT_MODE_BIT); + else + CLEAR_BIT(data, ALERT_MODE_BIT); + pega_hwmon_mcu_write(client, reg, data); + + return count; +} + +static ssize_t get_adc_vol(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u16 data = 0, reg = MONITOR_ADC_VOLTAGE_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d.%02d\n", data/1000, (data/10)%12); +} + +static ssize_t get_hwmon_temp(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0; + u8 reg = LM_0X49_TEMP_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%d\n", data); +} +#define SET_FAN_ATTR(_num) \ + static SENSOR_DEVICE_ATTR(fan##_num##_inner_rpm, S_IRUGO, get_fan_inner_rpm, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_outer_rpm, S_IRUGO, get_fan_outer_rpm, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_present, S_IRUGO, get_fan_present, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_enable, S_IRUGO | S_IWUSR, get_fan_enable, set_fan_enable, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_led_auto, S_IRUGO | S_IWUSR, get_fan_led_auto, set_fan_led_auto, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_led_green, S_IRUGO | S_IWUSR, get_fan_led_green, set_fan_led_green, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_led_amber, S_IRUGO | S_IWUSR, get_fan_led_amber, set_fan_led_amber, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_status_alert, S_IRUGO, get_fan_status_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_outerRPMOver_alert, S_IRUGO, get_fan_outerRPMOver_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_outerRPMUnder_alert, S_IRUGO, get_fan_outerRPMUnder_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_innerRPMOver_alert, S_IRUGO, get_fan_innerRPMOver_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_innerRPMUnder_alert, S_IRUGO, get_fan_innerRPMUnder_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_connect_alert, S_IRUGO, get_fan_connect_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_disconnect_alert, S_IRUGO, get_fan_disconnect_alert, NULL, _num-1) + +SET_FAN_ATTR(1);SET_FAN_ATTR(2);SET_FAN_ATTR(3);SET_FAN_ATTR(4);SET_FAN_ATTR(5); + +#define SET_ADC_ATTR(_num) \ + static SENSOR_DEVICE_ATTR(ADC##_num##_under_alert, S_IRUGO, get_adc_under_vol_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(ADC##_num##_over_alert, S_IRUGO, get_adc_over_vol_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(ADC##_num##_vol, S_IRUGO, get_adc_vol, NULL, 8-_num) + +SET_ADC_ATTR(1);SET_ADC_ATTR(2);SET_ADC_ATTR(3);SET_ADC_ATTR(4);SET_ADC_ATTR(5);SET_ADC_ATTR(6);SET_ADC_ATTR(7);SET_ADC_ATTR(8); + +static SENSOR_DEVICE_ATTR(mb_fw_upgrade, S_IWUSR, NULL, mainBoardUpgrade, 0); +static SENSOR_DEVICE_ATTR(fb_fw_upgrade, S_IWUSR, NULL, fanBoardUpgrade, 0); +static SENSOR_DEVICE_ATTR(mb_hw_version, S_IRUGO, get_MB_HW_version, NULL, 0); +static SENSOR_DEVICE_ATTR(fb_hw_version, S_IRUGO, get_FB_HW_version, NULL, 0); +static SENSOR_DEVICE_ATTR(fb_board_id, S_IRUGO, get_FB_boardId, NULL, 0); +static SENSOR_DEVICE_ATTR(mb_fw_version, S_IRUGO, get_MB_FW_version, NULL, 0); +static SENSOR_DEVICE_ATTR(fb_fw_version, S_IRUGO, get_FB_FW_version, NULL, 0); +static SENSOR_DEVICE_ATTR(fan_pwm, S_IRUGO | S_IWUSR, get_fan_PWM, set_fan_pwm, 0); + +static SENSOR_DEVICE_ATTR(smartFan_enable, S_IRUGO | S_IWUSR, get_smartFan_enable, set_smartFan_enable, 0); +static SENSOR_DEVICE_ATTR(smartFan_setting_enable, S_IRUGO | S_IWUSR, get_smartFan_setting_enable, set_smartFan_setting_enable, 0); +static SENSOR_DEVICE_ATTR(smartFan_device, S_IRUGO | S_IWUSR, get_smartFan_device, set_smartFan_device, 0); +static SENSOR_DEVICE_ATTR(smartFan_update, S_IRUGO | S_IWUSR, get_smartFan_update, set_smartFan_update, 0); +static SENSOR_DEVICE_ATTR(smartFan_max_temp, S_IRUGO | S_IWUSR, get_smartFan_max_temp, set_smartFan_max_temp, 0); +static SENSOR_DEVICE_ATTR(smartFan_mid_temp, S_IRUGO | S_IWUSR, get_smartFan_mid_temp, set_smartFan_mid_temp, 0); +static SENSOR_DEVICE_ATTR(smartFan_min_temp, S_IRUGO | S_IWUSR, get_smartFan_min_temp, set_smartFan_min_temp, 0); +static SENSOR_DEVICE_ATTR(smartFan_max_pwm, S_IRUGO | S_IWUSR, get_smartFan_max_pwm, set_smartFan_max_pwm, 0); +static SENSOR_DEVICE_ATTR(smartFan_mid_pwm, S_IRUGO | S_IWUSR, get_smartFan_mid_pwm, set_smartFan_mid_pwm, 0); +static SENSOR_DEVICE_ATTR(smartFan_min_pwm, S_IRUGO | S_IWUSR, get_smartFan_min_pwm, set_smartFan_min_pwm, 0); + +static SENSOR_DEVICE_ATTR(lm75_49_temp_alert, S_IRUGO, get_temp_alert, NULL, 3); +static SENSOR_DEVICE_ATTR(lm75_48_temp_alert, S_IRUGO, get_temp_alert, NULL, 2); +static SENSOR_DEVICE_ATTR(SA56004X_Ltemp_alert, S_IRUGO, get_temp_alert, NULL, 1); +static SENSOR_DEVICE_ATTR(SA56004X_Rtemp_alert, S_IRUGO, get_temp_alert, NULL, 0); + +static SENSOR_DEVICE_ATTR(i2c_fb_timeout, S_IRUGO, get_i2c_timeout, NULL, 0); +static SENSOR_DEVICE_ATTR(i2c_remote_timeout, S_IRUGO, get_i2c_timeout, NULL, 1); +static SENSOR_DEVICE_ATTR(i2c_local_timeout, S_IRUGO, get_i2c_timeout, NULL, 2); +static SENSOR_DEVICE_ATTR(i2c_lm75_48_timeout, S_IRUGO, get_i2c_timeout, NULL, 3); +static SENSOR_DEVICE_ATTR(i2c_lm75_49_timeout, S_IRUGO, get_i2c_timeout, NULL, 4); +static SENSOR_DEVICE_ATTR(alert_mode, S_IRUGO | S_IWUSR, get_alert_mode, set_alert_mode, 0); + +static SENSOR_DEVICE_ATTR(lm75_49_temp, S_IRUGO, get_hwmon_temp, NULL, 0); +static SENSOR_DEVICE_ATTR(lm75_48_temp, S_IRUGO, get_hwmon_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(SA56004_local_temp, S_IRUGO, get_hwmon_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(SA56004_remote_temp, S_IRUGO, get_hwmon_temp, NULL, 3); + +static struct attribute *pega_hwmon_mcu_attributes[] = { + &sensor_dev_attr_mb_fw_upgrade.dev_attr.attr, + &sensor_dev_attr_fb_fw_upgrade.dev_attr.attr, + &sensor_dev_attr_mb_hw_version.dev_attr.attr, + &sensor_dev_attr_fb_hw_version.dev_attr.attr, + &sensor_dev_attr_fb_board_id.dev_attr.attr, + &sensor_dev_attr_mb_fw_version.dev_attr.attr, + &sensor_dev_attr_fb_fw_version.dev_attr.attr, + &sensor_dev_attr_fan_pwm.dev_attr.attr, + + &sensor_dev_attr_smartFan_enable.dev_attr.attr, + &sensor_dev_attr_smartFan_setting_enable.dev_attr.attr, + &sensor_dev_attr_smartFan_device.dev_attr.attr, + &sensor_dev_attr_smartFan_update.dev_attr.attr, + &sensor_dev_attr_smartFan_max_temp.dev_attr.attr, + &sensor_dev_attr_smartFan_mid_temp.dev_attr.attr, + &sensor_dev_attr_smartFan_min_temp.dev_attr.attr, + &sensor_dev_attr_smartFan_max_pwm.dev_attr.attr, + &sensor_dev_attr_smartFan_mid_pwm.dev_attr.attr, + &sensor_dev_attr_smartFan_min_pwm.dev_attr.attr, + + &sensor_dev_attr_fan1_inner_rpm.dev_attr.attr, + &sensor_dev_attr_fan2_inner_rpm.dev_attr.attr, + &sensor_dev_attr_fan3_inner_rpm.dev_attr.attr, + &sensor_dev_attr_fan4_inner_rpm.dev_attr.attr, + &sensor_dev_attr_fan5_inner_rpm.dev_attr.attr, + + &sensor_dev_attr_fan1_outer_rpm.dev_attr.attr, + &sensor_dev_attr_fan2_outer_rpm.dev_attr.attr, + &sensor_dev_attr_fan3_outer_rpm.dev_attr.attr, + &sensor_dev_attr_fan4_outer_rpm.dev_attr.attr, + &sensor_dev_attr_fan5_outer_rpm.dev_attr.attr, + + &sensor_dev_attr_fan1_present.dev_attr.attr, + &sensor_dev_attr_fan2_present.dev_attr.attr, + &sensor_dev_attr_fan3_present.dev_attr.attr, + &sensor_dev_attr_fan4_present.dev_attr.attr, + &sensor_dev_attr_fan5_present.dev_attr.attr, + + &sensor_dev_attr_fan1_enable.dev_attr.attr, + &sensor_dev_attr_fan2_enable.dev_attr.attr, + &sensor_dev_attr_fan3_enable.dev_attr.attr, + &sensor_dev_attr_fan4_enable.dev_attr.attr, + &sensor_dev_attr_fan5_enable.dev_attr.attr, + + &sensor_dev_attr_fan1_led_auto.dev_attr.attr, + &sensor_dev_attr_fan2_led_auto.dev_attr.attr, + &sensor_dev_attr_fan3_led_auto.dev_attr.attr, + &sensor_dev_attr_fan4_led_auto.dev_attr.attr, + &sensor_dev_attr_fan5_led_auto.dev_attr.attr, + + &sensor_dev_attr_fan1_led_green.dev_attr.attr, + &sensor_dev_attr_fan2_led_green.dev_attr.attr, + &sensor_dev_attr_fan3_led_green.dev_attr.attr, + &sensor_dev_attr_fan4_led_green.dev_attr.attr, + &sensor_dev_attr_fan5_led_green.dev_attr.attr, + + &sensor_dev_attr_fan1_led_amber.dev_attr.attr, + &sensor_dev_attr_fan2_led_amber.dev_attr.attr, + &sensor_dev_attr_fan3_led_amber.dev_attr.attr, + &sensor_dev_attr_fan4_led_amber.dev_attr.attr, + &sensor_dev_attr_fan5_led_amber.dev_attr.attr, + + &sensor_dev_attr_fan1_status_alert.dev_attr.attr, + &sensor_dev_attr_fan2_status_alert.dev_attr.attr, + &sensor_dev_attr_fan3_status_alert.dev_attr.attr, + &sensor_dev_attr_fan4_status_alert.dev_attr.attr, + &sensor_dev_attr_fan5_status_alert.dev_attr.attr, + + &sensor_dev_attr_ADC1_under_alert.dev_attr.attr, + &sensor_dev_attr_ADC2_under_alert.dev_attr.attr, + &sensor_dev_attr_ADC3_under_alert.dev_attr.attr, + &sensor_dev_attr_ADC4_under_alert.dev_attr.attr, + &sensor_dev_attr_ADC5_under_alert.dev_attr.attr, + &sensor_dev_attr_ADC6_under_alert.dev_attr.attr, + &sensor_dev_attr_ADC7_under_alert.dev_attr.attr, + &sensor_dev_attr_ADC8_under_alert.dev_attr.attr, + + &sensor_dev_attr_ADC1_over_alert.dev_attr.attr, + &sensor_dev_attr_ADC2_over_alert.dev_attr.attr, + &sensor_dev_attr_ADC3_over_alert.dev_attr.attr, + &sensor_dev_attr_ADC4_over_alert.dev_attr.attr, + &sensor_dev_attr_ADC5_over_alert.dev_attr.attr, + &sensor_dev_attr_ADC6_over_alert.dev_attr.attr, + &sensor_dev_attr_ADC7_over_alert.dev_attr.attr, + &sensor_dev_attr_ADC8_over_alert.dev_attr.attr, + + &sensor_dev_attr_lm75_48_temp_alert.dev_attr.attr, + &sensor_dev_attr_lm75_49_temp_alert.dev_attr.attr, + &sensor_dev_attr_SA56004X_Ltemp_alert.dev_attr.attr, + &sensor_dev_attr_SA56004X_Rtemp_alert.dev_attr.attr, + + &sensor_dev_attr_fan1_outerRPMOver_alert.dev_attr.attr, + &sensor_dev_attr_fan2_outerRPMOver_alert.dev_attr.attr, + &sensor_dev_attr_fan3_outerRPMOver_alert.dev_attr.attr, + &sensor_dev_attr_fan4_outerRPMOver_alert.dev_attr.attr, + &sensor_dev_attr_fan5_outerRPMOver_alert.dev_attr.attr, + + &sensor_dev_attr_fan1_outerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan2_outerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan3_outerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan4_outerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan5_outerRPMUnder_alert.dev_attr.attr, + + &sensor_dev_attr_fan1_innerRPMOver_alert.dev_attr.attr, + &sensor_dev_attr_fan2_innerRPMOver_alert.dev_attr.attr, + &sensor_dev_attr_fan3_innerRPMOver_alert.dev_attr.attr, + &sensor_dev_attr_fan4_innerRPMOver_alert.dev_attr.attr, + &sensor_dev_attr_fan5_innerRPMOver_alert.dev_attr.attr, + + &sensor_dev_attr_fan1_innerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan2_innerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan3_innerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan4_innerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan5_innerRPMUnder_alert.dev_attr.attr, + + &sensor_dev_attr_fan1_connect_alert.dev_attr.attr, + &sensor_dev_attr_fan2_connect_alert.dev_attr.attr, + &sensor_dev_attr_fan3_connect_alert.dev_attr.attr, + &sensor_dev_attr_fan4_connect_alert.dev_attr.attr, + &sensor_dev_attr_fan5_connect_alert.dev_attr.attr, + + &sensor_dev_attr_fan1_disconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan2_disconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan3_disconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan4_disconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan5_disconnect_alert.dev_attr.attr, + + &sensor_dev_attr_i2c_fb_timeout.dev_attr.attr, + &sensor_dev_attr_i2c_remote_timeout.dev_attr.attr, + &sensor_dev_attr_i2c_local_timeout.dev_attr.attr, + &sensor_dev_attr_i2c_lm75_48_timeout.dev_attr.attr, + &sensor_dev_attr_i2c_lm75_49_timeout.dev_attr.attr, + &sensor_dev_attr_alert_mode.dev_attr.attr, + + &sensor_dev_attr_ADC1_vol.dev_attr.attr, + &sensor_dev_attr_ADC2_vol.dev_attr.attr, + &sensor_dev_attr_ADC3_vol.dev_attr.attr, + &sensor_dev_attr_ADC4_vol.dev_attr.attr, + &sensor_dev_attr_ADC5_vol.dev_attr.attr, + &sensor_dev_attr_ADC6_vol.dev_attr.attr, + &sensor_dev_attr_ADC7_vol.dev_attr.attr, + &sensor_dev_attr_ADC8_vol.dev_attr.attr, + + &sensor_dev_attr_lm75_49_temp.dev_attr.attr, + &sensor_dev_attr_lm75_48_temp.dev_attr.attr, + &sensor_dev_attr_SA56004_local_temp.dev_attr.attr, + &sensor_dev_attr_SA56004_remote_temp.dev_attr.attr, + + NULL +}; + +static const struct attribute_group pega_hwmon_mcu_group = { .attrs = pega_hwmon_mcu_attributes}; + +static int pega_hwmon_mcu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + int status; + + status = sysfs_create_group(&client->dev.kobj, &pega_hwmon_mcu_group); + + if (status) { + goto exit; + } + + dev_info(&client->dev, "chip found\n"); + + return 0; + +exit: + return status; +} + +static int pega_hwmon_mcu_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &pega_hwmon_mcu_group); + return 0; +} + +static const struct i2c_device_id pega_hwmon_mcu_id[] = { + { "porsche_hwmon_mcu", porsche }, + {} +}; +MODULE_DEVICE_TABLE(i2c, pega_hwmon_mcu_id); + +static struct i2c_driver pega_hwmon_mcu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "pegatron_hwmon_mcu", + }, + .probe = pega_hwmon_mcu_probe, + .remove = pega_hwmon_mcu_remove, + .id_table = pega_hwmon_mcu_id, +}; + +static int __init pega_hwmon_mcu_init(void) +{ + mutex_init(&pega_hwmon_mcu_lock); + + return i2c_add_driver(&pega_hwmon_mcu_driver); +} + +static void __exit pega_hwmon_mcu_exit(void) +{ + i2c_del_driver(&pega_hwmon_mcu_driver); +} + +MODULE_AUTHOR("Peter5 Lin "); +MODULE_DESCRIPTION("pega_hwmon_mcu driver"); +MODULE_LICENSE("GPL"); + +module_init(pega_hwmon_mcu_init); +module_exit(pega_hwmon_mcu_exit); \ No newline at end of file diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/changelog b/platform/nephos/sonic-platform-modules-pegatron/debian/changelog new file mode 100644 index 000000000000..39ecd34d960c --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/changelog @@ -0,0 +1,5 @@ +sonic-pegatron-platform-modules (0.1) unstable; urgency=low + + * Initial release + + -- Pegatron Mon, 12 Mar 2018 15:22:37 +0800 diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/compat b/platform/nephos/sonic-platform-modules-pegatron/debian/compat new file mode 100644 index 000000000000..ec635144f600 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/compat @@ -0,0 +1 @@ +9 diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/control b/platform/nephos/sonic-platform-modules-pegatron/debian/control new file mode 100755 index 000000000000..18e74be1455d --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/control @@ -0,0 +1,12 @@ +Source: sonic-pegatron-platform-modules +Section: main +Priority: extra +Maintainer: Pegatron +Build-Depends: debhelper (>= 8.0.0), bzip2 +Standards-Version: 3.9.3 + +Package: sonic-platform-pegatron-porsche +Architecture: amd64 +Depends: linux-image-3.16.0-5-amd64 +Description: kernel modules for platform devices such as fan, led, sfp + diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/rules b/platform/nephos/sonic-platform-modules-pegatron/debian/rules new file mode 100755 index 000000000000..472ec939a47c --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/rules @@ -0,0 +1,88 @@ +#!/usr/bin/make -f +# -*- makefile -*- +# Sample debian/rules that uses debhelper. +# This file was originally written by Joey Hess and Craig Small. +# As a special exception, when this file is copied by dh-make into a +# dh-make output file, you may use that output file without restriction. +# This special exception was added by Craig Small in version 0.37 of dh-make. + +include /usr/share/dpkg/pkg-info.mk + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +export INSTALL_MOD_DIR:=extra + +PYTHON ?= python2 + +PACKAGE_PRE_NAME := sonic-platform-pegatron +KVERSION ?= $(shell uname -r) +KERNEL_SRC := /lib/modules/$(KVERSION) +MOD_SRC_DIR:= $(shell pwd) +MODULE_DIRS:= porsche +MODULE_DIR := modules +UTILS_DIR := utils +SERVICE_DIR := service +SCRIPTS_DIR := scripts +CONF_DIR := conf + +%: + dh $@ --with systemd,python2,python3 --buildsystem=pybuild + +clean: + dh_testdir + dh_testroot + dh_clean + +build: + #make modules -C $(KERNEL_SRC)/build M=$(MODULE_SRC) + (for mod in $(MODULE_DIRS); do \ + make modules -C $(KERNEL_SRC)/build M=$(MOD_SRC_DIR)/$${mod}/modules; \ + #$(PYTHON) $${mod}/setup.py build; \ + done) + +binary: binary-arch binary-indep + # Nothing to do + +binary-arch: + # Nothing to do + +#install: build + #dh_testdir + #dh_testroot + #dh_clean -k + #dh_installdirs + +binary-indep: + dh_testdir + dh_installdirs + + # Custom package commands + (for mod in $(MODULE_DIRS); do \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod}/$(KERNEL_SRC)/$(INSTALL_MOD_DIR); \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod}/usr/local/bin; \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod}/usr/bin; \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod}/lib/systemd/system; \ + cp $(MOD_SRC_DIR)/$${mod}/$(MODULE_DIR)/*.ko debian/$(PACKAGE_PRE_NAME)-$${mod}/$(KERNEL_SRC)/$(INSTALL_MOD_DIR); \ + cp $(MOD_SRC_DIR)/$${mod}/$(UTILS_DIR)/* debian/$(PACKAGE_PRE_NAME)-$${mod}/usr/local/bin/; \ + cp $(MOD_SRC_DIR)/$${mod}/$(SERVICE_DIR)/*.service debian/$(PACKAGE_PRE_NAME)-$${mod}/lib/systemd/system/; \ + cp $(MOD_SRC_DIR)/$${mod}/$(SCRIPTS_DIR)/* debian/$(PACKAGE_PRE_NAME)-$${mod}/usr/bin/; \ + #$(PYTHON) $${mod}/setup.py install --root=$(MOD_SRC_DIR)/debian/$(PACKAGE_PRE_NAME)-$${mod} --install-layout=deb; \ + done) + # Resuming debhelper scripts + dh_testroot + dh_install + dh_installchangelogs + dh_installdocs + dh_systemd_enable + dh_installinit + dh_systemd_start + dh_link + dh_fixperms + dh_compress + dh_strip + dh_installdeb + dh_gencontrol + dh_md5sums + dh_builddeb +.PHONY: build binary binary-arch binary-indep clean diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/Makefile b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/Makefile new file mode 100644 index 000000000000..60e882a586d9 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/Makefile @@ -0,0 +1 @@ +obj-m:=pegatron_porsche_cpld.o pegatron_hwmon_mcu.o pegatron_porsche_sfp.o diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_hwmon_mcu.c b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_hwmon_mcu.c new file mode 120000 index 000000000000..1357104478a3 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_hwmon_mcu.c @@ -0,0 +1 @@ +../../common/modules/pegatron_hwmon_mcu.c \ No newline at end of file diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_cpld.c b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_cpld.c new file mode 100644 index 000000000000..154a68dcb836 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_cpld.c @@ -0,0 +1,1132 @@ +/* + * A CPLD driver for the porsche + * + * Copyright (C) 2018 Pegatron Corporation. + * Peter5_Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pegatron_porsche_DEBUG +/*#define pegatron_porsche_DEBUG*/ +#ifdef pegatron_porsche_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ + +#define CPLD_SFP_MAX_GROUP 3 +#define SFP_PORT_MAX_NUM 54 +#define SFP_EEPROM_SIZE 256 +#define QSFP_FIRST_PORT 48 +#define CPLDA_SFP_NUM 24 +#define CPLDB_SFP_NUM 12 +#define CPLDC_SFP_NUM 18 +#define CPLDA_ADDRESS 0x74 +#define CPLDB_ADDRESS 0x75 +#define CPLDC_ADDRESS 0x76 +#define CPLD_VERSION_REG 0x0 +#define SYNC_CONTROL_REG 0x1 +#define CPLD_SYS_PWR_LED_REG 0xD +#define CPLD_LOC_FAN_LED_REG 0xE +#define CPLD_EEPROM_WRITE_REG 0x12 +#define CPLD_PSU_REG 0x15 +#define SFP_13_36_SCL_BASE 0x4 +#define SFP_1_12_SCL_BASE 0x2 +#define SFP_37_54_SCL_BASE 0x5 +#define SFP_13_36_STATUS_BASE 0x8 +#define SFP_1_12_STATUS_BASE 0x5 +#define SFP_37_54_STATUS_BASE 0x9 +#define QSFP_PRESENT_ADDRESS 0xF +#define QSFP_RESET_ADDRESS_BASE 0x10 +#define QSFP_MODSELN_ADDRESS 0x17 +#define QSFP_LOW_POWER_ADDRESS 0x18 +#define CPLD_SERIAL_LED_BIT 2 +#define CPLD_EEPROM_WRITE_BIT 2 +#define SFP_PRESENT_BASE 0 +#define SFP_RXLOSS_BASE 1 +#define SFP_TXFAULT_BASE 2 +#define SFP_TXDISABLE_BASE 3 +#define CPLD_PSU_PWOK_BASE 0 +#define CPLD_PSU_PRESENT_BASE 2 +#define GET_BIT(data, bit, value) value = (data >> bit) & 0x1 +#define SET_BIT(data, bit) data |= (1 << bit) +#define CLEAR_BIT(data, bit) data &= ~(1 << bit) + +static LIST_HEAD(cpld_client_list); +static struct mutex list_lock; +/* Addresses scanned for pegatron_porsche_cpld + */ +static const unsigned short normal_i2c[] = { CPLDA_ADDRESS, CPLDB_ADDRESS, CPLDC_ADDRESS, I2C_CLIENT_END }; + +struct cpld_client_node { + struct i2c_client *client; + struct list_head list; +}; + +int pegatron_porsche_cpld_read(unsigned short addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int data = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == addr) { + data = i2c_smbus_read_byte_data(cpld_node->client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, addr, reg, data)); + break; + } + } + + mutex_unlock(&list_lock); + + return data; +} +EXPORT_SYMBOL(pegatron_porsche_cpld_read); + +int pegatron_porsche_cpld_write(unsigned short addr, u8 reg, u8 val) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EIO; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == addr) { + ret = i2c_smbus_write_byte_data(cpld_node->client, reg, val); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, addr, reg, val)); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(pegatron_porsche_cpld_write); + +static ssize_t read_cpld_HWversion(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_VERSION_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%02x\n", (data >> 5) & 0x7); +} + +static ssize_t read_cpld_SWversion(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_VERSION_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%02x\n", (data & 0x1f)); +} + +static ssize_t show_allled_ctrl(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = SYNC_CONTROL_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data &= 0x3; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_allled_ctrl(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = SYNC_CONTROL_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_porsche_cpld_read(client->addr, reg); + data = val | (data & 0xfc); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t show_serial_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = SYNC_CONTROL_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, CPLD_SERIAL_LED_BIT, val); + + return sprintf(buf, "%02x\n", val); +} + +static ssize_t set_serial_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = SYNC_CONTROL_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + if(val) + SET_BIT(data, CPLD_SERIAL_LED_BIT); + else + CLEAR_BIT(data, CPLD_SERIAL_LED_BIT); + + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t show_sys_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_SYS_PWR_LED_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (data >> 5) & 0x7; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_sys_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_SYS_PWR_LED_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_porsche_cpld_read(client->addr, reg); + data = (val << 5) | (data & 0x1f); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} +static ssize_t show_pwr_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_SYS_PWR_LED_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (data >> 2) & 0x7; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_pwr_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_SYS_PWR_LED_REG; + long val = 0; + + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_porsche_cpld_read(client->addr, reg); + data = (val << 2) | (data & 0xe3); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} +static ssize_t show_loc_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_LOC_FAN_LED_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (data>>4) & 0x3; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_loc_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_LOC_FAN_LED_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_porsche_cpld_read(client->addr, reg); + data = (val << 4) | (data & 0xf); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t show_fan_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_LOC_FAN_LED_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data &= 0x7; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_fan_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_LOC_FAN_LED_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_porsche_cpld_read(client->addr, reg); + data = val | (data & 0xf8); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t show_eeprom_write_enable(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = CPLD_EEPROM_WRITE_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, reg, val); + + return sprintf(buf, "%02x\n", val); +} + +static ssize_t set_eeprom_write_enable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_EEPROM_WRITE_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + if(val) + SET_BIT(data, CPLD_EEPROM_WRITE_BIT); + else + CLEAR_BIT(data, CPLD_EEPROM_WRITE_BIT); + + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t read_psu_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = CPLD_PSU_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (CPLD_PSU_PRESENT_BASE + attr->index), val); + + return sprintf(buf, "%02x\n", val); +} + +static ssize_t read_psu_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val=0, reg = CPLD_PSU_REG; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (CPLD_PSU_PWOK_BASE + attr->index), val); + + return sprintf(buf, "%02x\n", val); +} + +#define GET_SFP_STATUS_ADDRESS(idx, reg) \ + if(idx < CPLDB_SFP_NUM) \ + reg = SFP_1_12_STATUS_BASE + (idx / 2); \ + else if(idx < CPLDA_SFP_NUM + CPLDB_SFP_NUM) \ + reg = SFP_13_36_STATUS_BASE + ((idx-CPLDB_SFP_NUM) / 2); \ + else \ + reg = SFP_37_54_STATUS_BASE + ((idx-CPLDB_SFP_NUM-CPLDA_SFP_NUM) / 2) + +static ssize_t get_sfp_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0, val = 0; + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SFP_PRESENT_BASE + 4*(attr->index % 2), val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_sfp_tx_disable(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0, val = 0; + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SFP_TXDISABLE_BASE + 4*(attr->index % 2), val); + + return sprintf(buf, "%d\n", val); +} +static ssize_t set_sfp_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = pegatron_porsche_cpld_read(client->addr, reg); + + if(val) + SET_BIT(data, SFP_TXDISABLE_BASE + 4*(attr->index % 2)); + else + CLEAR_BIT(data, SFP_TXDISABLE_BASE + 4*(attr->index % 2)); + + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} +static ssize_t get_sfp_rx_loss(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0, val = 0; + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SFP_RXLOSS_BASE + 4*(attr->index % 2), val); + + return sprintf(buf, "%d\n", val); +} +static ssize_t get_sfp_tx_fault(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0, val = 0; + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SFP_TXFAULT_BASE + 4*(attr->index % 2), val); + + return sprintf(buf, "%d\n",val); +} + +static ssize_t get_qsfp_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = QSFP_PRESENT_ADDRESS; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (attr->index % QSFP_FIRST_PORT), val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_qsfp_reset(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = (QSFP_RESET_ADDRESS_BASE + attr->index % QSFP_FIRST_PORT / 4), data =0; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (data >> ((attr->index % QSFP_FIRST_PORT % 4)*2)) & 0x3; + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_qsfp_reset(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = (QSFP_RESET_ADDRESS_BASE + attr->index % QSFP_FIRST_PORT / 4), data = 0; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (val & 0x3) << ((attr->index % QSFP_FIRST_PORT % 4)*2); + + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t get_qsfp_lowpower(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = QSFP_LOW_POWER_ADDRESS; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (attr->index % QSFP_FIRST_PORT), val); + return sprintf(buf, "%02x\n", val); +} + +static ssize_t set_qsfp_lowpower(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = QSFP_LOW_POWER_ADDRESS; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + if(val) + SET_BIT(data, (attr->index % QSFP_FIRST_PORT)); + else + CLEAR_BIT(data, (attr->index % QSFP_FIRST_PORT)); + + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t get_qsfp_modeseln(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = QSFP_MODSELN_ADDRESS; + + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (attr->index % QSFP_FIRST_PORT), val); + return sprintf(buf, "%02x\n", val); +} + +static ssize_t set_qsfp_modeseln(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = QSFP_MODSELN_ADDRESS; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + data = pegatron_porsche_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + if(val) + SET_BIT(data, (attr->index % QSFP_FIRST_PORT)); + else + CLEAR_BIT(data, (attr->index % QSFP_FIRST_PORT)); + + pegatron_porsche_cpld_write(client->addr, reg, data); + + return count; +} + +static SENSOR_DEVICE_ATTR(cpld_hw_version, S_IRUGO, read_cpld_HWversion, NULL, 0); +static SENSOR_DEVICE_ATTR(cpld_sw_version, S_IRUGO, read_cpld_SWversion, NULL, 0); +static SENSOR_DEVICE_ATTR(cpld_allled_ctrl, S_IRUGO | S_IWUSR, show_allled_ctrl, set_allled_ctrl, 0); +static SENSOR_DEVICE_ATTR(serial_led_enable, S_IRUGO | S_IWUSR, show_serial_led, set_serial_led, 0); +static SENSOR_DEVICE_ATTR(sys_led, S_IRUGO | S_IWUSR, show_sys_led, set_sys_led, 0); +static SENSOR_DEVICE_ATTR(pwr_led, S_IRUGO | S_IWUSR, show_pwr_led, set_pwr_led, 0); +static SENSOR_DEVICE_ATTR(loc_led, S_IRUGO | S_IWUSR, show_loc_led, set_loc_led, 0); +static SENSOR_DEVICE_ATTR(fan_led, S_IRUGO | S_IWUSR, show_fan_led, set_fan_led, 0); +static SENSOR_DEVICE_ATTR(eeprom_write_enable, S_IRUGO | S_IWUSR, show_eeprom_write_enable, set_eeprom_write_enable, 0); +static SENSOR_DEVICE_ATTR(psu_1_present, S_IRUGO, read_psu_present, NULL, 1); +static SENSOR_DEVICE_ATTR(psu_2_present, S_IRUGO, read_psu_present, NULL, 0); +static SENSOR_DEVICE_ATTR(psu_1_status, S_IRUGO, read_psu_status, NULL, 1); +static SENSOR_DEVICE_ATTR(psu_2_status, S_IRUGO, read_psu_status, NULL, 0); + +#define SET_SFP_ATTR(_num) \ + static SENSOR_DEVICE_ATTR(sfp##_num##_present, S_IRUGO, get_sfp_present, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_tx_disable, S_IRUGO | S_IWUSR, get_sfp_tx_disable, set_sfp_tx_disable, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_rx_loss, S_IRUGO, get_sfp_rx_loss, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_tx_fault, S_IRUGO, get_sfp_tx_fault, NULL, _num-1) + +#define SET_QSFP_ATTR(_num) \ + static SENSOR_DEVICE_ATTR(sfp##_num##_present, S_IRUGO, get_qsfp_present, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_reset, S_IRUGO | S_IWUSR, get_qsfp_reset, set_qsfp_reset, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_lowpower, S_IRUGO | S_IWUSR, get_qsfp_lowpower, set_qsfp_lowpower, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_modeseln, S_IRUGO | S_IWUSR, get_qsfp_modeseln, set_qsfp_modeseln, _num-1) + +SET_SFP_ATTR(1);SET_SFP_ATTR(2);SET_SFP_ATTR(3);SET_SFP_ATTR(4);SET_SFP_ATTR(5);SET_SFP_ATTR(6);SET_SFP_ATTR(7);SET_SFP_ATTR(8);SET_SFP_ATTR(9); +SET_SFP_ATTR(10);SET_SFP_ATTR(11);SET_SFP_ATTR(12);SET_SFP_ATTR(13);SET_SFP_ATTR(14);SET_SFP_ATTR(15);SET_SFP_ATTR(16);SET_SFP_ATTR(17);SET_SFP_ATTR(18); +SET_SFP_ATTR(19);SET_SFP_ATTR(20);SET_SFP_ATTR(21);SET_SFP_ATTR(22);SET_SFP_ATTR(23);SET_SFP_ATTR(24);SET_SFP_ATTR(25);SET_SFP_ATTR(26);SET_SFP_ATTR(27); +SET_SFP_ATTR(28);SET_SFP_ATTR(29);SET_SFP_ATTR(30);SET_SFP_ATTR(31);SET_SFP_ATTR(32);SET_SFP_ATTR(33);SET_SFP_ATTR(34);SET_SFP_ATTR(35);SET_SFP_ATTR(36); +SET_SFP_ATTR(37);SET_SFP_ATTR(38);SET_SFP_ATTR(39);SET_SFP_ATTR(40);SET_SFP_ATTR(41);SET_SFP_ATTR(42);SET_SFP_ATTR(43);SET_SFP_ATTR(44);SET_SFP_ATTR(45); +SET_SFP_ATTR(46);SET_SFP_ATTR(47);SET_SFP_ATTR(48); +SET_QSFP_ATTR(49);SET_QSFP_ATTR(50);SET_QSFP_ATTR(51);SET_QSFP_ATTR(52);SET_QSFP_ATTR(53);SET_QSFP_ATTR(54); + +static struct attribute *pegatron_porsche_cpldA_attributes[] = { + &sensor_dev_attr_cpld_hw_version.dev_attr.attr, + &sensor_dev_attr_cpld_sw_version.dev_attr.attr, + + &sensor_dev_attr_sfp13_present.dev_attr.attr, + &sensor_dev_attr_sfp13_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp13_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp13_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp14_present.dev_attr.attr, + &sensor_dev_attr_sfp14_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp14_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp14_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp15_present.dev_attr.attr, + &sensor_dev_attr_sfp15_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp15_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp15_tx_fault.dev_attr.attr, + + + &sensor_dev_attr_sfp16_present.dev_attr.attr, + &sensor_dev_attr_sfp16_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp16_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp16_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp17_present.dev_attr.attr, + &sensor_dev_attr_sfp17_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp17_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp17_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp18_present.dev_attr.attr, + &sensor_dev_attr_sfp18_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp18_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp18_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp19_present.dev_attr.attr, + &sensor_dev_attr_sfp19_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp19_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp19_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp20_present.dev_attr.attr, + &sensor_dev_attr_sfp20_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp20_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp20_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp21_present.dev_attr.attr, + &sensor_dev_attr_sfp21_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp21_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp21_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp22_present.dev_attr.attr, + &sensor_dev_attr_sfp22_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp22_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp22_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp23_present.dev_attr.attr, + &sensor_dev_attr_sfp23_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp23_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp23_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp24_present.dev_attr.attr, + &sensor_dev_attr_sfp24_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp24_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp24_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp25_present.dev_attr.attr, + &sensor_dev_attr_sfp25_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp25_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp25_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp26_present.dev_attr.attr, + &sensor_dev_attr_sfp26_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp26_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp26_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp27_present.dev_attr.attr, + &sensor_dev_attr_sfp27_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp27_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp27_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp28_present.dev_attr.attr, + &sensor_dev_attr_sfp28_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp28_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp28_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp29_present.dev_attr.attr, + &sensor_dev_attr_sfp29_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp29_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp29_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp30_present.dev_attr.attr, + &sensor_dev_attr_sfp30_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp30_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp30_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp31_present.dev_attr.attr, + &sensor_dev_attr_sfp31_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp31_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp31_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp32_present.dev_attr.attr, + &sensor_dev_attr_sfp32_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp32_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp32_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp33_present.dev_attr.attr, + &sensor_dev_attr_sfp33_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp33_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp33_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp34_present.dev_attr.attr, + &sensor_dev_attr_sfp34_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp34_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp34_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp35_present.dev_attr.attr, + &sensor_dev_attr_sfp35_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp35_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp35_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp36_present.dev_attr.attr, + &sensor_dev_attr_sfp36_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp36_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp36_tx_fault.dev_attr.attr, + + NULL +}; + +static struct attribute *pegatron_porsche_cpldB_attributes[] = { + &sensor_dev_attr_cpld_hw_version.dev_attr.attr, + &sensor_dev_attr_cpld_sw_version.dev_attr.attr, + &sensor_dev_attr_cpld_allled_ctrl.dev_attr.attr, + &sensor_dev_attr_serial_led_enable.dev_attr.attr, + &sensor_dev_attr_sys_led.dev_attr.attr, + &sensor_dev_attr_pwr_led.dev_attr.attr, + &sensor_dev_attr_loc_led.dev_attr.attr, + &sensor_dev_attr_fan_led.dev_attr.attr, + &sensor_dev_attr_eeprom_write_enable.dev_attr.attr, + &sensor_dev_attr_psu_1_present.dev_attr.attr, + &sensor_dev_attr_psu_2_present.dev_attr.attr, + &sensor_dev_attr_psu_1_status.dev_attr.attr, + &sensor_dev_attr_psu_2_status.dev_attr.attr, + + &sensor_dev_attr_sfp1_present.dev_attr.attr, + &sensor_dev_attr_sfp1_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp1_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp1_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp2_present.dev_attr.attr, + &sensor_dev_attr_sfp2_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp2_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp2_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp3_present.dev_attr.attr, + &sensor_dev_attr_sfp3_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp3_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp3_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp4_present.dev_attr.attr, + &sensor_dev_attr_sfp4_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp4_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp4_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp5_present.dev_attr.attr, + &sensor_dev_attr_sfp5_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp5_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp5_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp6_present.dev_attr.attr, + &sensor_dev_attr_sfp6_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp6_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp6_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp7_present.dev_attr.attr, + &sensor_dev_attr_sfp7_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp7_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp7_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp8_present.dev_attr.attr, + &sensor_dev_attr_sfp8_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp8_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp8_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp9_present.dev_attr.attr, + &sensor_dev_attr_sfp9_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp9_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp9_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp10_present.dev_attr.attr, + &sensor_dev_attr_sfp10_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp10_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp10_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp11_present.dev_attr.attr, + &sensor_dev_attr_sfp11_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp11_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp11_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp12_present.dev_attr.attr, + &sensor_dev_attr_sfp12_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp12_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp12_tx_fault.dev_attr.attr, + NULL +}; + +static struct attribute *pegatron_porsche_cpldC_attributes[] = { + &sensor_dev_attr_cpld_hw_version.dev_attr.attr, + &sensor_dev_attr_cpld_sw_version.dev_attr.attr, + + &sensor_dev_attr_sfp37_present.dev_attr.attr, + &sensor_dev_attr_sfp37_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp37_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp37_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp38_present.dev_attr.attr, + &sensor_dev_attr_sfp38_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp38_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp38_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp39_present.dev_attr.attr, + &sensor_dev_attr_sfp39_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp39_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp39_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp40_present.dev_attr.attr, + &sensor_dev_attr_sfp40_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp40_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp40_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp41_present.dev_attr.attr, + &sensor_dev_attr_sfp41_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp41_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp41_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp42_present.dev_attr.attr, + &sensor_dev_attr_sfp42_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp42_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp42_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp43_present.dev_attr.attr, + &sensor_dev_attr_sfp43_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp43_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp43_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp44_present.dev_attr.attr, + &sensor_dev_attr_sfp44_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp44_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp44_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp45_present.dev_attr.attr, + &sensor_dev_attr_sfp45_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp45_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp45_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp46_present.dev_attr.attr, + &sensor_dev_attr_sfp46_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp46_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp46_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp47_present.dev_attr.attr, + &sensor_dev_attr_sfp47_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp47_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp47_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp48_present.dev_attr.attr, + &sensor_dev_attr_sfp48_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp48_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp48_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp49_present.dev_attr.attr, + &sensor_dev_attr_sfp49_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp49_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp49_reset.dev_attr.attr, + + &sensor_dev_attr_sfp50_present.dev_attr.attr, + &sensor_dev_attr_sfp50_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp50_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp50_reset.dev_attr.attr, + + &sensor_dev_attr_sfp51_present.dev_attr.attr, + &sensor_dev_attr_sfp51_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp51_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp51_reset.dev_attr.attr, + + &sensor_dev_attr_sfp52_present.dev_attr.attr, + &sensor_dev_attr_sfp52_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp52_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp52_reset.dev_attr.attr, + + &sensor_dev_attr_sfp53_present.dev_attr.attr, + &sensor_dev_attr_sfp53_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp53_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp53_reset.dev_attr.attr, + + &sensor_dev_attr_sfp54_present.dev_attr.attr, + &sensor_dev_attr_sfp54_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp54_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp54_reset.dev_attr.attr, + NULL +}; + +static const struct attribute_group pegatron_porsche_cpldA_group = { .attrs = pegatron_porsche_cpldA_attributes}; +static const struct attribute_group pegatron_porsche_cpldB_group = { .attrs = pegatron_porsche_cpldB_attributes}; +static const struct attribute_group pegatron_porsche_cpldC_group = { .attrs = pegatron_porsche_cpldC_attributes}; + +static void pegatron_porsche_cpld_add_client(struct i2c_client *client) +{ + struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &cpld_client_list); + mutex_unlock(&list_lock); +} + +static void pegatron_porsche_cpld_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(cpld_node); + } + + mutex_unlock(&list_lock); +} + +static int pegatron_porsche_cpld_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_dbg(&client->dev, "i2c_check_functionality failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + } + + /* Register sysfs hooks */ + switch(client->addr) + { + case CPLDA_ADDRESS: + status = sysfs_create_group(&client->dev.kobj, &pegatron_porsche_cpldA_group); + break; + case CPLDB_ADDRESS: + status = sysfs_create_group(&client->dev.kobj, &pegatron_porsche_cpldB_group); + break; + case CPLDC_ADDRESS: + status = sysfs_create_group(&client->dev.kobj, &pegatron_porsche_cpldC_group); + break; + default: + dev_dbg(&client->dev, "i2c_check_CPLD failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + break; + } + + if (status) { + goto exit; + } + + dev_info(&client->dev, "chip found\n"); + pegatron_porsche_cpld_add_client(client); + + return 0; + +exit: + return status; +} + +static int pegatron_porsche_cpld_remove(struct i2c_client *client) +{ + switch(client->addr) + { + case CPLDA_ADDRESS: + sysfs_remove_group(&client->dev.kobj, &pegatron_porsche_cpldA_group); + break; + case CPLDB_ADDRESS: + sysfs_remove_group(&client->dev.kobj, &pegatron_porsche_cpldB_group); + break; + case CPLDC_ADDRESS: + sysfs_remove_group(&client->dev.kobj, &pegatron_porsche_cpldC_group); + break; + default: + dev_dbg(&client->dev, "i2c_remove_CPLD failed (0x%x)\n", client->addr); + break; + } + + + pegatron_porsche_cpld_remove_client(client); + return 0; +} + +static const struct i2c_device_id pegatron_porsche_cpld_id[] = { + { "porsche_cpld", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, pegatron_porsche_cpld_id); + +static struct i2c_driver pegatron_porsche_cpld_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "pegatron_porsche_cpld", + }, + .probe = pegatron_porsche_cpld_probe, + .remove = pegatron_porsche_cpld_remove, + .id_table = pegatron_porsche_cpld_id, + .address_list = normal_i2c, +}; + +static int __init pegatron_porsche_cpld_init(void) +{ + mutex_init(&list_lock); + + return i2c_add_driver(&pegatron_porsche_cpld_driver); +} + +static void __exit pegatron_porsche_cpld_exit(void) +{ + i2c_del_driver(&pegatron_porsche_cpld_driver); +} + +MODULE_AUTHOR("Peter5 Lin "); +MODULE_DESCRIPTION("pegatron_porsche_cpld driver"); +MODULE_LICENSE("GPL"); + +module_init(pegatron_porsche_cpld_init); +module_exit(pegatron_porsche_cpld_exit); diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c new file mode 100644 index 000000000000..5d5d64b15e1a --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c @@ -0,0 +1,431 @@ +/* + * A SFP driver for the porsche platform + * + * Copyright (C) 2018 Pegatron Corporation. + * Peter5_Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef PEGA_DEBUG +/*#define PEGA_DEBUG*/ +#ifdef PEGA_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ + +#define SFP_EEPROM_SIZE 256 +#define SFP_EEPROM_A0_ADDR 0x50 +#define SFP_EEPROM_A2_ADDR 0x51 +#define SFP_EEPROM_BUS_TYPE I2C_SMBUS_I2C_BLOCK_DATA +#define CPLDA_SFP_NUM 24 +#define CPLDB_SFP_NUM 12 +#define CPLDC_SFP_NUM 18 +#define CPLDA_ADDRESS 0x74 +#define CPLDB_ADDRESS 0x75 +#define CPLDC_ADDRESS 0x76 +#define SFP_13_36_SCL_BASE 0x4 +#define SFP_1_12_SCL_BASE 0x2 +#define SFP_37_54_SCL_BASE 0x5 +#define QSFP_I2C_ENABLE_BASE 0x17 +#define GET_BIT(data, bit, value) value = (data >> bit) & 0x1 +#define SET_BIT(data, bit) data |= (1 << bit) +#define CLEAR_BIT(data, bit) data &= ~(1 << bit) + +enum cpld_croups { cpld_group_a, cpld_group_b, cpld_group_c}; + +static const unsigned short normal_i2c[] = { SFP_EEPROM_A0_ADDR, SFP_EEPROM_A2_ADDR, I2C_CLIENT_END }; +static char *SFP_CPLD_GROUPA_MAPPING[CPLDA_SFP_NUM][16]={0}; +static char *SFP_CPLD_GROUPB_MAPPING[CPLDB_SFP_NUM][16]={0}; +static char *SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; + +/* + * This parameter is to help this driver avoid blocking other drivers out + * of I2C for potentially troublesome amounts of time. With a 100 kHz I2C + * clock, one 256 byte read takes about 1/43 second which is excessive; + * but the 1/170 second it takes at 400 kHz may be quite reasonable; and + * at 1 MHz (Fm+) a 1/430 second delay could easily be invisible. + * + * This value is forced to be a power of two so that writes align on pages. + */ +static unsigned io_limit = 128; +module_param(io_limit, uint, 0); +MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)"); + +/* + * Specs often allow 5 msec for a page write, sometimes 20 msec; + * it's important to recover from write timeouts. + */ +static unsigned write_timeout = 25; +module_param(write_timeout, uint, 0); +MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)"); + + +struct porsche_sfp_data { + struct mutex lock; + struct bin_attribute bin; + int use_smbus; + kernel_ulong_t driver_data; + + struct i2c_client *client; +}; + +extern int pegatron_porsche_cpld_read(unsigned short cpld_addr, u8 reg); +extern int pegatron_porsche_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +static ssize_t porsche_sfp_eeprom_read(struct porsche_sfp_data *data, char *buf, + unsigned offset, size_t count) +{ + struct i2c_msg msg[2]; + u8 msgbuf[2]; + struct i2c_client *client = data->client; + unsigned long timeout, read_time; + int status; + + memset(msg, 0, sizeof(msg)); + + if (count > io_limit) + count = io_limit; + + /* Smaller eeproms can work given some SMBus extension calls */ + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + + /* + * Reads fail if the previous write didn't complete yet. We may + * loop a few times until this one succeeds, waiting at least + * long enough for one entire page write to work. + */ + timeout = jiffies + msecs_to_jiffies(write_timeout); + do { + read_time = jiffies; + switch (data->use_smbus) { + case I2C_SMBUS_I2C_BLOCK_DATA: + status = i2c_smbus_read_i2c_block_data(client, offset, + count, buf); + break; + case I2C_SMBUS_WORD_DATA: + status = i2c_smbus_read_word_data(client, offset); + if (status >= 0) { + buf[0] = status & 0xff; + if (count == 2) + buf[1] = status >> 8; + status = count; + } + break; + case I2C_SMBUS_BYTE_DATA: + status = i2c_smbus_read_byte_data(client, offset); + if (status >= 0) { + buf[0] = status; + status = count; + } + break; + default: + status = i2c_transfer(client->adapter, msg, 2); + if (status == 2) + status = count; + } + dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", + count, offset, status, jiffies); + + if (status == count) + return count; + + /* REVISIT: at HZ=100, this is sloooow */ + msleep(1); + } while (time_before(read_time, timeout)); + + return -ETIMEDOUT; +} + +static ssize_t porsche_sfp_read(struct porsche_sfp_data *data, + char *buf, loff_t off, size_t count) +{ + ssize_t retval = 0; + + if (unlikely(!count)) + return count; + + /* + * Read data from chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&data->lock); + + while (count) { + ssize_t status; + + status = porsche_sfp_eeprom_read(data, buf, off, count); + if (status <= 0) { + if (retval == 0) + retval = status; + break; + } + buf += status; + off += status; + count -= status; + retval += status; + } + + mutex_unlock(&data->lock); + + return retval; +} + +static ssize_t +porsche_sfp_bin_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + int i; + u8 cpldData = 0; + struct porsche_sfp_data *data; + + /*SFP 1-12*/ + for(i=0; iattr.name, SFP_CPLD_GROUPB_MAPPING[i])) + { + pegatron_porsche_cpld_write(CPLDB_ADDRESS, SFP_1_12_SCL_BASE, i+1); + goto check_done; + } + } + /*SFP 13-36*/ + for(i=0; iattr.name, SFP_CPLD_GROUPA_MAPPING[i])) + { + pegatron_porsche_cpld_write(CPLDA_ADDRESS, SFP_13_36_SCL_BASE, i+1); + goto check_done; + } + } + + /*SFP 37-54*/ + for(i=0; iattr.name, SFP_CPLD_GROUPC_MAPPING[i])) + { + /* Enable QSFP i2c function */ + if(i >= 12) + { + cpldData = 0xff; + cpldData = pegatron_porsche_cpld_read(CPLDC_ADDRESS, QSFP_I2C_ENABLE_BASE); + CLEAR_BIT(cpldData, i-12); + pegatron_porsche_cpld_write(CPLDC_ADDRESS, QSFP_I2C_ENABLE_BASE, cpldData); + } + pegatron_porsche_cpld_write(CPLDC_ADDRESS, SFP_37_54_SCL_BASE, i+1); + goto check_done; + } + } + +check_done: + data = dev_get_drvdata(container_of(kobj, struct device, kobj)); + + return porsche_sfp_read(data, buf, off, count); +} + +#define SFP_EEPROM_ATTR(_num) \ + static struct bin_attribute sfp##_num##_eeprom_attr = { \ + .attr = { \ + .name = __stringify(sfp##_num##_eeprom), \ + .mode = S_IRUGO\ + }, \ + .size = SFP_EEPROM_SIZE, \ + .read = porsche_sfp_bin_read, \ + } + +SFP_EEPROM_ATTR(1);SFP_EEPROM_ATTR(2);SFP_EEPROM_ATTR(3);SFP_EEPROM_ATTR(4);SFP_EEPROM_ATTR(5);SFP_EEPROM_ATTR(6);SFP_EEPROM_ATTR(7);SFP_EEPROM_ATTR(8);SFP_EEPROM_ATTR(9); +SFP_EEPROM_ATTR(10);SFP_EEPROM_ATTR(11);SFP_EEPROM_ATTR(12);SFP_EEPROM_ATTR(13);SFP_EEPROM_ATTR(14);SFP_EEPROM_ATTR(15);SFP_EEPROM_ATTR(16);SFP_EEPROM_ATTR(17);SFP_EEPROM_ATTR(18); +SFP_EEPROM_ATTR(19);SFP_EEPROM_ATTR(20);SFP_EEPROM_ATTR(21);SFP_EEPROM_ATTR(22);SFP_EEPROM_ATTR(23);SFP_EEPROM_ATTR(24);SFP_EEPROM_ATTR(25);SFP_EEPROM_ATTR(26);SFP_EEPROM_ATTR(27); +SFP_EEPROM_ATTR(28);SFP_EEPROM_ATTR(29);SFP_EEPROM_ATTR(30);SFP_EEPROM_ATTR(31);SFP_EEPROM_ATTR(32);SFP_EEPROM_ATTR(33);SFP_EEPROM_ATTR(34);SFP_EEPROM_ATTR(35);SFP_EEPROM_ATTR(36); +SFP_EEPROM_ATTR(37);SFP_EEPROM_ATTR(38);SFP_EEPROM_ATTR(39);SFP_EEPROM_ATTR(40);SFP_EEPROM_ATTR(41);SFP_EEPROM_ATTR(42);SFP_EEPROM_ATTR(43);SFP_EEPROM_ATTR(44);SFP_EEPROM_ATTR(45); +SFP_EEPROM_ATTR(46);SFP_EEPROM_ATTR(47);SFP_EEPROM_ATTR(48);SFP_EEPROM_ATTR(49);SFP_EEPROM_ATTR(50);SFP_EEPROM_ATTR(51);SFP_EEPROM_ATTR(52);SFP_EEPROM_ATTR(53);SFP_EEPROM_ATTR(54); + +static struct bin_attribute *porsche_cpldA_sfp_epprom_attributes[] = { + &sfp13_eeprom_attr, &sfp14_eeprom_attr, &sfp15_eeprom_attr, &sfp16_eeprom_attr, &sfp17_eeprom_attr, &sfp18_eeprom_attr, &sfp19_eeprom_attr, &sfp20_eeprom_attr, + &sfp21_eeprom_attr, &sfp22_eeprom_attr, &sfp23_eeprom_attr, &sfp24_eeprom_attr, &sfp25_eeprom_attr, &sfp26_eeprom_attr, &sfp27_eeprom_attr, &sfp28_eeprom_attr, + &sfp29_eeprom_attr, &sfp30_eeprom_attr, &sfp31_eeprom_attr, &sfp32_eeprom_attr, &sfp33_eeprom_attr, &sfp34_eeprom_attr, &sfp35_eeprom_attr, &sfp36_eeprom_attr, + NULL +}; + +static struct bin_attribute *porsche_cpldB_sfp_epprom_attributes[] = { + &sfp1_eeprom_attr, &sfp2_eeprom_attr, &sfp3_eeprom_attr, &sfp4_eeprom_attr, &sfp5_eeprom_attr, &sfp6_eeprom_attr, &sfp7_eeprom_attr, &sfp8_eeprom_attr, + &sfp9_eeprom_attr, &sfp10_eeprom_attr, &sfp11_eeprom_attr, &sfp12_eeprom_attr, + NULL +}; + +static struct bin_attribute *porsche_cpldC_sfp_epprom_attributes[] = { + &sfp37_eeprom_attr, &sfp38_eeprom_attr, &sfp39_eeprom_attr, &sfp40_eeprom_attr, &sfp41_eeprom_attr, &sfp42_eeprom_attr, &sfp43_eeprom_attr, &sfp44_eeprom_attr, + &sfp45_eeprom_attr, &sfp46_eeprom_attr, &sfp47_eeprom_attr, &sfp48_eeprom_attr, &sfp49_eeprom_attr, &sfp50_eeprom_attr, &sfp51_eeprom_attr, &sfp52_eeprom_attr, + &sfp53_eeprom_attr, &sfp54_eeprom_attr, + NULL +}; + +static const struct attribute_group porsche_sfpA_group = { .bin_attrs = porsche_cpldA_sfp_epprom_attributes}; +static const struct attribute_group porsche_sfpB_group = { .bin_attrs = porsche_cpldB_sfp_epprom_attributes}; +static const struct attribute_group porsche_sfpC_group = { .bin_attrs = porsche_cpldC_sfp_epprom_attributes}; + +static int porsche_sfp_device_probe(struct i2c_client *client, const struct i2c_device_id *dev_id) +{ + int use_smbus = SFP_EEPROM_BUS_TYPE; + struct porsche_sfp_data *data; + int err, i; + unsigned num_addresses; + kernel_ulong_t magic; + + data = kzalloc(sizeof(struct porsche_sfp_data) , GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_init(&data->lock); + data->use_smbus = use_smbus; + /* + * Export the EEPROM bytes through sysfs, since that's convenient. + * By default, only root should see the data (maybe passwords etc) + */ + + data->client = client; + data->driver_data = dev_id->driver_data; + + sysfs_bin_attr_init(&data->bin); + + switch(dev_id->driver_data) + { + case cpld_group_a: + err = sysfs_create_group(&client->dev.kobj, &porsche_sfpA_group); + if (err) + goto err_clients; + break; + case cpld_group_b: + err = sysfs_create_group(&client->dev.kobj, &porsche_sfpB_group); + if (err) + goto err_clients; + break; + case cpld_group_c: + err = sysfs_create_group(&client->dev.kobj, &porsche_sfpC_group); + if (err) + goto err_clients; + break; + default: + printk(KERN_ALERT "i2c_check_CPLD failed\n"); + err = -EIO; + break; + } + + i2c_set_clientdata(client, data); + + return 0; + +err_clients: + kfree(data); + return err; +} + +static int porsche_sfp_device_remove(struct i2c_client *client) +{ + struct porsche_sfp_data *data; + int i; + + data = i2c_get_clientdata(client); + + switch(data->driver_data) + { + case cpld_group_a: + sysfs_remove_group(&client->dev.kobj, &porsche_sfpA_group); + break; + case cpld_group_b: + sysfs_remove_group(&client->dev.kobj, &porsche_sfpB_group); + break; + case cpld_group_c: + sysfs_remove_group(&client->dev.kobj, &porsche_sfpC_group); + break; + default: + dev_dbg(&client->dev, "i2c_remove_CPLD failed (0x%x)\n", client->addr); + break; + } + + + return 0; +} + +static const struct i2c_device_id porsche_sfp_id[] = { + { "porsche_sfpA", cpld_group_a }, + { "porsche_sfpB", cpld_group_b }, + { "porsche_sfpC", cpld_group_c }, + {} +}; +MODULE_DEVICE_TABLE(i2c, porsche_sfp_id); + +static struct i2c_driver porsche_sfp_driver = { + .driver = { + .name = "pegatron_porsche_sfp", + }, + .probe = porsche_sfp_device_probe, + .remove = porsche_sfp_device_remove, + .id_table = porsche_sfp_id, + .address_list = normal_i2c, +}; + +static int __init porsche_sfp_init(void) +{ + int i; + + /*SFP 1-12*/ + for(i=0; i"); +MODULE_DESCRIPTION("porsche_cpld_mux driver"); +MODULE_LICENSE("GPL"); + +module_init(porsche_sfp_init); +module_exit(porsche_sfp_exit); + diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/scripts/sensors b/platform/nephos/sonic-platform-modules-pegatron/porsche/scripts/sensors new file mode 100755 index 000000000000..7f9426a0c5ec --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/scripts/sensors @@ -0,0 +1,7 @@ +#!/bin/bash +docker exec -i pmon sensors "$@" + +#To probe sensors not part of lm-sensors +if [ -r /usr/local/bin/porsche_sensors.py ]; then + python /usr/local/bin/porsche_sensors.py get_sensors +fi diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/service/porsche-platform-init.service b/platform/nephos/sonic-platform-modules-pegatron/porsche/service/porsche-platform-init.service new file mode 100644 index 000000000000..8e6f4344715f --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/service/porsche-platform-init.service @@ -0,0 +1,13 @@ +[Unit] +Description=Pegastron porsche Platform initialization service +After=local-fs.target +DefaultDependencies=no + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/pegatron_porsche_util.py install +ExecStop=/usr/local/bin/pegatron_porsche_util.py uninstall +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py new file mode 100755 index 000000000000..16662081d0cb --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py @@ -0,0 +1,209 @@ +#!/usr/bin/env python +# +# Copyright (C) 2018 Pegatron, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import sys, getopt +import logging +import os +import commands +import threading + +DEBUG = False + +SFP_MAX_NUM = 48 +CPLDA_SFP_NUM = 24 +CPLDB_SFP_NUM = 12 +CPLDC_SFP_NUM = 18 + +kernel_module = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_porsche_cpld', 'pegatron_hwmon_mcu', 'pegatron_porsche_sfp'] +moduleID = ['pca9544', 'pca9544', '24c02', 'porsche_hwmon_mcu', 'porsche_cpld', 'porsche_cpld', 'porsche_cpld', 'porsche_sfpA', 'porsche_sfpB', 'porsche_sfpC'] +i2c_check_node = ['i2c-0', 'i2c-1'] +device_address = ['0x72', '0x73', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] +device_node= ['i2c-2', 'i2c-6', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] + +i2c_prefix = '/sys/bus/i2c/devices/' +cpld_bus = ['6-0074', '7-0075', '8-0076'] +led_nodes = ['sys_led', 'pwr_led', 'loc_led', 'fan_led', "cpld_allled_ctrl", "serial_led_enable"] + +def dbg_print(string): + if DEBUG == True: + print string + return + +def do_cmd(cmd, show): + logging.info('Run :' + cmd) + status, output = commands.getstatusoutput(cmd) + dbg_print(cmd + "with result:" + str(status)) + dbg_print("output:" + output) + if status: + logging.info('Failed :' + cmd) + if show: + print('Failed :' + cmd) + return status, output + +def check_device_position(num): + for i in range(0, len(i2c_check_node)): + status, output = do_cmd("echo " + moduleID[num] + " " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/new_device", 0) + status, output = do_cmd("ls " + i2c_prefix + device_node[num], 0) + device_node[num] = i2c_check_node[i] + + if status: + status, output = do_cmd("echo " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/delete_device", 0) + else: + return + + return + +def install_device(): + for i in range(0, len(moduleID)): + if moduleID[i] == "pca9544": + check_device_position(i) + else: + status, output = do_cmd("echo " + moduleID[i] + " " + device_address[i] + " > " + i2c_prefix + device_node[i] + "/new_device", 1) + + return + +def check_driver(): + for i in range(0, len(kernel_module)): + status, output = do_cmd("lsmod | grep " + kernel_module[i], 0) + if status: + status, output = do_cmd("modprobe " + kernel_module[i], 1) + + return + +def do_install(): + status, output = do_cmd("depmod -a", 1) + + check_driver() + install_device() + + return + +def do_uninstall(): + for i in range(0, len(kernel_module)): + status, output = do_cmd("modprobe -r " + kernel_module[i], 1) + + for i in range(0, len(moduleID)): + status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + i2c_check_node[i] + "/delete_device", 0) + + return + +led_command = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, + 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, + 'serial_led_enable': {'disable':'0', 'enable':'1'}} + +def set_led(args): + """ + Usage: %(scriptName)s set led object command + + object: + sys_led : set SYS led [command: off|green|amber|blink_green|blink_amber] + pwr_led : set PWR led [command: off|green|amber|blink_green|blink_amber] + loc_led : set LOCATOR led [command: off|on|blink] + fan_led : set FAN led [command: off|green|amber|blink_green|blink_amber] + """ + if args[0] not in led_command: + print set_led.__doc__ + sys.exit(0) + + for i in range(0,len(led_nodes)): + if args[0] == led_nodes[i]: + node = i2c_prefix + cpld_bus[1] + '/'+ led_nodes[i] + + command = led_command[args[0]] + data = command[args[1]] + + status, output = do_cmd("echo "+ str(data) + " > "+ node, 1) + + return + +def set_device(args): + """ + Usage: %(scriptName)s command object + + command: + led : set status led sys_led|pwr_led|loc_led|mst_led|fan_led|digit_led + """ + + if args[0] == 'led': + set_led(args[1:]) + return + else: + print set_device.__doc__ + + return + +device_init = {'led': [['led', 'sys_led', 'green'], ['led', 'pwr_led', 'green'], ['led', 'fan_led', 'green'], ['led', 'cpld_allled_ctrl', 'normal'], ['led', 'serial_led_enable', 'enable']]} + +def pega_init(): + #set led + for i in range(0,len(device_init['led'])): + set_device(device_init['led'][i]) + + #set tx_disable + for x in range(0, SFP_MAX_NUM-1): + if x < CPLDB_SFP_NUM: + bus = cpld_bus[1] + elif x < CPLDB_SFP_NUM + CPLDA_SFP_NUM: + bus = cpld_bus[0] + else: + bus = cpld_bus[2] + + nodes = i2c_prefix + bus + '/sfp' + str(x+1) + '_tx_disable' + dbg_print("SFP_TX_DISABLE NODES: " + nodes) + status, output = do_cmd("echo 0 > "+ nodes, 1) + + return + +def main(): + """ + Usage: %(scriptName)s command object + + command: + install : install drivers and generate related sysfs nodes + clean : uninstall drivers and remove related sysfs nodes + set : change board setting [led] + debug : debug info [on/off] + """ + + if len(sys.argv)<2: + print main.__doc__ + + for arg in sys.argv[1:]: + if arg == 'install': + do_install() + pega_init() + elif arg == 'uninstall': + do_uninstall() + elif arg == 'set': + if len(sys.argv[2:])<1: + print main.__doc__ + else: + set_device(sys.argv[2:]) + return + elif arg == 'debug': + if sys.argv[2] == 'on': + DEBUG = True + else: + DEBUG = False + else: + print main.__doc__ + +if __name__ == "__main__": + main() diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/porsche_sensors.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/porsche_sensors.py new file mode 100755 index 000000000000..40e23ef01b7e --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/porsche_sensors.py @@ -0,0 +1,141 @@ +#!/usr/bin/python + +import os +import sys +import logging + +FAN_NUM = 5 +sensors_path = '/sys/bus/i2c/devices/5-0070/' +sensors_nodes = {'fan_rpm': ['_inner_rpm', '_outer_rpm'], + 'fan_vol': ['ADC8_vol', 'ADC7_vol','ADC6_vol', 'ADC5_vol','ADC4_vol', 'ADC3_vol'], + 'temp':['lm75_49_temp', 'lm75_48_temp', 'SA56004_local_temp','SA56004_remote_temp']} +sensors_type = {'fan_rpm': ['Inner RPM', 'Outer RPM'], + 'fan_vol': ['P0.2', 'P0.6','P0.1', 'P1.5','P0.7', 'P1.6'], + 'temp':['lm75_49_temp', 'lm75_48_temp', 'SA56004_local_temp','SA56004_remote_temp']} + +# Get sysfs attribute +def get_attr_value(attr_path): + retval = 'ERR' + if (not os.path.isfile(attr_path)): + return retval + + try: + with open(attr_path, 'r') as fd: + retval = fd.read() + except Exception as error: + logging.error("Unable to open ", attr_path, " file !") + + retval = retval.rstrip('\r\n') + fd.close() + return retval + +def get_fan_status(number): + attr_value = get_attr_value(sensors_path + "fan" + str(number+1) + "_present") + if (attr_value != 'ERR'): + attr_value = int(attr_value, 16) + + if(attr_value == 0): + string = "Connect" + else: + string = "Disconnect" + return string + +def get_fan_alert(number): + attr_value = get_attr_value(sensors_path + "fan" + str(number+1) + "_status_alert") + if (attr_value != 'ERR'): + attr_value = int(attr_value, 16) + + if(attr_value == 0): + string = "Normal" + else: + string = "Abnormal" + return string + +def get_fan_inner_rpm(number): + return get_attr_value(sensors_path + "fan" + str(number+1) + "_inner_rpm") + +def get_fan_outer_rpm(number): + return get_attr_value(sensors_path + "fan" + str(number+1) + "_outer_rpm") + +def get_fan(): + for i in range(0,FAN_NUM): + print " " + #status + string = get_fan_status(i) + print "FAN " + str(i+1) + ":" + ' ' + string + if string=='Disconnect': + continue + + #alert + string = get_fan_alert(i) + print " Status:"+ ' ' + string + + #inner rpm + string = get_fan_inner_rpm(i) + print " Inner RPM:"+ string.rjust(10) + ' RPM' + + #outer rpm + string = get_fan_outer_rpm(i) + print " Outer RPM:"+ string.rjust(10) + ' RPM' + + return + +def get_hwmon(): + print " " + string = get_attr_value(sensors_path + "lm75_48_temp") + print "Sensor A: " + string + " C" + + string = get_attr_value(sensors_path + "lm75_49_temp") + print "Sensor B: " + string + " C" + + return + +def get_voltage(): + print " " + nodes = sensors_nodes['fan_vol'] + types = sensors_type['fan_vol'] + for i in range(0,len(nodes)): + string = get_attr_value(sensors_path + nodes[i]) + print types[i] + ': ' + string + " V" + + return + +def init_fan(): + return + +def main(): + """ + Usage: %(scriptName)s command object + + command: + install : install drivers and generate related sysfs nodes + clean : uninstall drivers and remove related sysfs nodes + show : show all systen status + set : change board setting with fan|led|sfp + """ + + if len(sys.argv)<2: + print main.__doc__ + + for arg in sys.argv[1:]: + if arg == 'fan_init': + init_fan() + elif arg == 'get_sensors': + ver = get_attr_value(sensors_path + "fb_hw_version") + print 'HW Version: ' + ver + ver = get_attr_value(sensors_path + "fb_fw_version") + print 'SW Version: ' + ver + get_fan() + get_hwmon() + get_voltage() + elif arg == 'fan_set': + if len(sys.argv[1:])<1: + print main.__doc__ + else: + set_fan(sys.argv[1:]) + return + else: + print main.__doc__ + +if __name__ == "__main__": + main() From 82640449d469d62d0a94c83843751f07fc61202e Mon Sep 17 00:00:00 2001 From: PeterLin Date: Wed, 7 Nov 2018 09:22:55 +0800 Subject: [PATCH 02/20] Fix indentation issue --- .../porsche/port_config.ini | 110 +++++++++--------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini index cc4cf6d44388..15fc60375941 100755 --- a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini +++ b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini @@ -1,55 +1,55 @@ -# name lanes alias index speed -Ethernet0 8 Ethernet1/1 0 10000 -Ethernet1 9 Ethernet2/1 1 10000 -Ethernet2 10 Ethernet3/1 2 10000 -Ethernet3 11 Ethernet4/1 3 10000 -Ethernet4 12 Ethernet5/1 4 10000 -Ethernet5 13 Ethernet6/1 5 10000 -Ethernet6 14 Ethernet7/1 6 10000 -Ethernet7 15 Ethernet8/1 7 10000 -Ethernet8 16 Ethernet9/1 8 10000 -Ethernet9 17 Ethernet10/1 9 10000 -Ethernet10 18 Ethernet11/1 10 10000 -Ethernet11 19 Ethernet12/1 11 10000 -Ethernet12 20 Ethernet13/1 12 10000 -Ethernet13 21 Ethernet14/1 13 10000 -Ethernet14 22 Ethernet15/1 14 10000 -Ethernet15 23 Ethernet16/1 15 10000 -Ethernet16 32 Ethernet17/1 16 10000 -Ethernet17 33 Ethernet18/1 17 10000 -Ethernet18 34 Ethernet19/1 18 10000 -Ethernet19 35 Ethernet20/1 19 10000 -Ethernet20 40 Ethernet21/1 20 10000 -Ethernet21 41 Ethernet22/1 21 10000 -Ethernet22 42 Ethernet23/1 22 10000 -Ethernet23 43 Ethernet24/1 23 10000 -Ethernet24 48 Ethernet25/1 24 10000 -Ethernet25 49 Ethernet26/1 25 10000 -Ethernet26 50 Ethernet27/1 26 10000 -Ethernet27 51 Ethernet28/1 27 10000 -Ethernet28 56 Ethernet29/1 28 10000 -Ethernet29 57 Ethernet30/1 29 10000 -Ethernet30 58 Ethernet31/1 30 10000 -Ethernet31 59 Ethernet32/1 31 10000 -Ethernet32 64 Ethernet33/1 32 10000 -Ethernet33 65 Ethernet34/1 33 10000 -Ethernet34 66 Ethernet35/1 34 10000 -Ethernet35 67 Ethernet36/1 35 10000 -Ethernet36 68 Ethernet37/1 36 10000 -Ethernet37 69 Ethernet38/1 37 10000 -Ethernet38 70 Ethernet39/1 38 10000 -Ethernet39 71 Ethernet40/1 39 10000 -Ethernet40 72 Ethernet41/1 40 10000 -Ethernet41 73 Ethernet42/1 41 10000 -Ethernet42 74 Ethernet43/1 42 10000 -Ethernet43 75 Ethernet44/1 43 10000 -Ethernet44 76 Ethernet45/1 44 10000 -Ethernet45 77 Ethernet46/1 45 10000 -Ethernet46 78 Ethernet47/1 46 10000 -Ethernet47 79 Ethernet48/1 47 10000 -Ethernet48 80,81,82,83 Ethernet49/1 48 100000 -Ethernet49 84,85,86,87 Ethernet50/1 49 100000 -Ethernet50 104,105,106,107 Ethernet51/1 50 100000 -Ethernet51 108,109,110,111 Ethernet52/1 51 100000 -Ethernet52 112,113,114,115 Ethernet53/1 52 100000 -Ethernet53 116,117,118,119 Ethernet54/1 53 100000 +#name lanes alias index speed +Ethernet0 8 Ethernet1/1 0 10000 +Ethernet1 9 Ethernet2/1 1 10000 +Ethernet2 10 Ethernet3/1 2 10000 +Ethernet3 11 Ethernet4/1 3 10000 +Ethernet4 12 Ethernet5/1 4 10000 +Ethernet5 13 Ethernet6/1 5 10000 +Ethernet6 14 Ethernet7/1 6 10000 +Ethernet7 15 Ethernet8/1 7 10000 +Ethernet8 16 Ethernet9/1 8 10000 +Ethernet9 17 Ethernet10/1 9 10000 +Ethernet10 18 Ethernet11/1 10 10000 +Ethernet11 19 Ethernet12/1 11 10000 +Ethernet12 20 Ethernet13/1 12 10000 +Ethernet13 21 Ethernet14/1 13 10000 +Ethernet14 22 Ethernet15/1 14 10000 +Ethernet15 23 Ethernet16/1 15 10000 +Ethernet16 32 Ethernet17/1 16 10000 +Ethernet17 33 Ethernet18/1 17 10000 +Ethernet18 34 Ethernet19/1 18 10000 +Ethernet19 35 Ethernet20/1 19 10000 +Ethernet20 40 Ethernet21/1 20 10000 +Ethernet21 41 Ethernet22/1 21 10000 +Ethernet22 42 Ethernet23/1 22 10000 +Ethernet23 43 Ethernet24/1 23 10000 +Ethernet24 48 Ethernet25/1 24 10000 +Ethernet25 49 Ethernet26/1 25 10000 +Ethernet26 50 Ethernet27/1 26 10000 +Ethernet27 51 Ethernet28/1 27 10000 +Ethernet28 56 Ethernet29/1 28 10000 +Ethernet29 57 Ethernet30/1 29 10000 +Ethernet30 58 Ethernet31/1 30 10000 +Ethernet31 59 Ethernet32/1 31 10000 +Ethernet32 64 Ethernet33/1 32 10000 +Ethernet33 65 Ethernet34/1 33 10000 +Ethernet34 66 Ethernet35/1 34 10000 +Ethernet35 67 Ethernet36/1 35 10000 +Ethernet36 68 Ethernet37/1 36 10000 +Ethernet37 69 Ethernet38/1 37 10000 +Ethernet38 70 Ethernet39/1 38 10000 +Ethernet39 71 Ethernet40/1 39 10000 +Ethernet40 72 Ethernet41/1 40 10000 +Ethernet41 73 Ethernet42/1 41 10000 +Ethernet42 74 Ethernet43/1 42 10000 +Ethernet43 75 Ethernet44/1 43 10000 +Ethernet44 76 Ethernet45/1 44 10000 +Ethernet45 77 Ethernet46/1 45 10000 +Ethernet46 78 Ethernet47/1 46 10000 +Ethernet47 79 Ethernet48/1 47 10000 +Ethernet48 80,81,82,83 Ethernet49/1 48 100000 +Ethernet49 84,85,86,87 Ethernet50/1 49 100000 +Ethernet50 104,105,106,107 Ethernet51/1 50 100000 +Ethernet51 108,109,110,111 Ethernet52/1 51 100000 +Ethernet52 112,113,114,115 Ethernet53/1 52 100000 +Ethernet53 116,117,118,119 Ethernet54/1 53 100000 From 01eb31519ba532ef8bf6dd1aa6942553c8841cdc Mon Sep 17 00:00:00 2001 From: PeterLin Date: Wed, 7 Nov 2018 09:31:13 +0800 Subject: [PATCH 03/20] Fix indentation issue --- platform/nephos/one-image.mk | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/platform/nephos/one-image.mk b/platform/nephos/one-image.mk index c29dac5a9a18..bd9e2e851889 100644 --- a/platform/nephos/one-image.mk +++ b/platform/nephos/one-image.mk @@ -5,8 +5,8 @@ $(SONIC_ONE_IMAGE)_MACHINE = nephos $(SONIC_ONE_IMAGE)_IMAGE_TYPE = onie $(SONIC_ONE_IMAGE)_INSTALLS += $(NEPHOS_NPS_KERNEL) $(SONIC_ONE_IMAGE)_LAZY_INSTALLS += $(INGRASYS_S9130_32X_PLATFORM_MODULE) \ - $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ - $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ - $(PEGATRON_PORSCHE_PLATFORM_MODULE) + $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ + $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ + $(PEGATRON_PORSCHE_PLATFORM_MODULE) $(SONIC_ONE_IMAGE)_DOCKERS += $(SONIC_INSTALL_DOCKER_IMAGES) SONIC_INSTALLERS += $(SONIC_ONE_IMAGE) From a97bcb11fce46d961f88862787a7156a79bca29e Mon Sep 17 00:00:00 2001 From: PeterLin Date: Tue, 13 Nov 2018 17:00:15 +0800 Subject: [PATCH 04/20] remove minigraph --- .../x86_64-pegatron_porsche-r0/minigraph.xml | 1074 ----------------- 1 file changed, 1074 deletions(-) delete mode 100755 device/pegatron/x86_64-pegatron_porsche-r0/minigraph.xml diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/minigraph.xml b/device/pegatron/x86_64-pegatron_porsche-r0/minigraph.xml deleted file mode 100755 index 2a4b6414d05a..000000000000 --- a/device/pegatron/x86_64-pegatron_porsche-r0/minigraph.xml +++ /dev/null @@ -1,1074 +0,0 @@ - - - - - - ARISTA01T0 - 10.0.0.33 - switch1 - 10.0.0.32 - 1 - 180 - 60 - - - switch1 - 10.0.0.0 - ARISTA01T2 - 10.0.0.1 - 1 - 180 - 60 - - - ARISTA02T0 - 10.0.0.35 - switch1 - 10.0.0.34 - 1 - 180 - 60 - - - switch1 - 10.0.0.2 - ARISTA02T2 - 10.0.0.3 - 1 - 180 - 60 - - - ARISTA03T0 - 10.0.0.37 - switch1 - 10.0.0.36 - 1 - 180 - 60 - - - switch1 - 10.0.0.4 - ARISTA03T2 - 10.0.0.5 - 1 - 180 - 60 - - - ARISTA04T0 - 10.0.0.39 - switch1 - 10.0.0.38 - 1 - 180 - 60 - - - switch1 - 10.0.0.6 - ARISTA04T2 - 10.0.0.7 - 1 - 180 - 60 - - - ARISTA05T0 - 10.0.0.41 - switch1 - 10.0.0.40 - 1 - 180 - 60 - - - switch1 - 10.0.0.8 - ARISTA05T2 - 10.0.0.9 - 1 - 180 - 60 - - - ARISTA06T0 - 10.0.0.43 - switch1 - 10.0.0.42 - 1 - 180 - 60 - - - switch1 - 10.0.0.10 - ARISTA06T2 - 10.0.0.11 - 1 - 180 - 60 - - - ARISTA07T0 - 10.0.0.45 - switch1 - 10.0.0.44 - 1 - 180 - 60 - - - switch1 - 10.0.0.12 - ARISTA07T2 - 10.0.0.13 - 1 - 180 - 60 - - - ARISTA08T0 - 10.0.0.47 - switch1 - 10.0.0.46 - 1 - 180 - 60 - - - switch1 - 10.0.0.14 - ARISTA08T2 - 10.0.0.15 - 1 - 180 - 60 - - - ARISTA09T0 - 10.0.0.49 - switch1 - 10.0.0.48 - 1 - 180 - 60 - - - switch1 - 10.0.0.16 - ARISTA09T2 - 10.0.0.17 - 1 - 180 - 60 - - - ARISTA10T0 - 10.0.0.51 - switch1 - 10.0.0.50 - 1 - 180 - 60 - - - switch1 - 10.0.0.18 - ARISTA10T2 - 10.0.0.19 - 1 - 180 - 60 - - - ARISTA11T0 - 10.0.0.53 - switch1 - 10.0.0.52 - 1 - 180 - 60 - - - switch1 - 10.0.0.20 - ARISTA11T2 - 10.0.0.21 - 1 - 180 - 60 - - - ARISTA12T0 - 10.0.0.55 - switch1 - 10.0.0.54 - 1 - 180 - 60 - - - switch1 - 10.0.0.22 - ARISTA12T2 - 10.0.0.23 - 1 - 180 - 60 - - - ARISTA13T0 - 10.0.0.57 - switch1 - 10.0.0.56 - 1 - 180 - 60 - - - switch1 - 10.0.0.24 - ARISTA13T2 - 10.0.0.25 - 1 - 180 - 60 - - - ARISTA14T0 - 10.0.0.59 - switch1 - 10.0.0.58 - 1 - 180 - 60 - - - switch1 - 10.0.0.26 - ARISTA14T2 - 10.0.0.27 - 1 - 180 - 60 - - - ARISTA15T0 - 10.0.0.61 - switch1 - 10.0.0.60 - 1 - 180 - 60 - - - switch1 - 10.0.0.28 - ARISTA15T2 - 10.0.0.29 - 1 - 180 - 60 - - - ARISTA16T0 - 10.0.0.63 - switch1 - 10.0.0.62 - 1 - 180 - 60 - - - switch1 - 10.0.0.30 - ARISTA16T2 - 10.0.0.31 - 1 - 180 - 60 - - - - - 65100 - switch - - -
10.0.0.33
- - -
- -
10.0.0.1
- - -
- -
10.0.0.35
- - -
- -
10.0.0.3
- - -
- -
10.0.0.37
- - -
- -
10.0.0.5
- - -
- -
10.0.0.39
- - -
- -
10.0.0.7
- - -
- -
10.0.0.41
- - -
- -
10.0.0.9
- - -
- -
10.0.0.43
- - -
- -
10.0.0.11
- - -
- -
10.0.0.45
- - -
- -
10.0.0.13
- - -
- -
10.0.0.47
- - -
- -
10.0.0.15
- - -
- -
10.0.0.49
- - -
- -
10.0.0.17
- - -
- -
10.0.0.51
- - -
- -
10.0.0.19
- - -
- -
10.0.0.53
- - -
- -
10.0.0.21
- - -
- -
10.0.0.55
- - -
- -
10.0.0.23
- - -
- -
10.0.0.57
- - -
- -
10.0.0.25
- - -
- -
10.0.0.59
- - -
- -
10.0.0.27
- - -
- -
10.0.0.61
- - -
- -
10.0.0.29
- - -
- -
10.0.0.63
- - -
- -
10.0.0.31
- - -
-
- -
- - 64001 - ARISTA01T0 - - - - 65200 - ARISTA01T2 - - - - 64002 - ARISTA02T0 - - - - 65200 - ARISTA02T2 - - - - 64003 - ARISTA03T0 - - - - 65200 - ARISTA03T2 - - - - 64004 - ARISTA04T0 - - - - 65200 - ARISTA04T2 - - - - 64005 - ARISTA05T0 - - - - 65200 - ARISTA05T2 - - - - 64006 - ARISTA06T0 - - - - 65200 - ARISTA06T2 - - - - 64007 - ARISTA07T0 - - - - 65200 - ARISTA07T2 - - - - 64008 - ARISTA08T0 - - - - 65200 - ARISTA08T2 - - - - 64009 - ARISTA09T0 - - - - 65200 - ARISTA09T2 - - - - 64010 - ARISTA10T0 - - - - 65200 - ARISTA10T2 - - - - 64011 - ARISTA11T0 - - - - 65200 - ARISTA11T2 - - - - 64012 - ARISTA12T0 - - - - 65200 - ARISTA12T2 - - - - 64013 - ARISTA13T0 - - - - 65200 - ARISTA13T2 - - - - 64014 - ARISTA14T0 - - - - 65200 - ARISTA14T2 - - - - 64015 - ARISTA15T0 - - - - 65200 - ARISTA15T2 - - - - 64016 - ARISTA16T0 - - - - 65200 - ARISTA16T2 - - -
-
- - - - - - HostIP - Loopback0 - - 10.1.0.32/32 - - 10.1.0.32/32 - - - - - - - - switch - - - - - - Ethernet0 - 10.0.0.0/31 - - - - Ethernet4 - 10.0.0.2/31 - - - - Ethernet8 - 10.0.0.4/31 - - - - Ethernet12 - 10.0.0.6/31 - - - - Ethernet16 - 10.0.0.8/31 - - - - Ethernet20 - 10.0.0.10/31 - - - - Ethernet24 - 10.0.0.12/31 - - - - Ethernet28 - 10.0.0.14/31 - - - - Ethernet32 - 10.0.0.16/31 - - - - Ethernet36 - 10.0.0.18/31 - - - - Ethernet40 - 10.0.0.20/31 - - - - Ethernet44 - 10.0.0.22/31 - - - - Ethernet48 - 10.0.0.24/31 - - - - Ethernet52 - 10.0.0.26/31 - - - - Ethernet56 - 10.0.0.28/31 - - - - Ethernet60 - 10.0.0.30/31 - - - - Ethernet64 - 10.0.0.32/31 - - - - Ethernet68 - 10.0.0.34/31 - - - - Ethernet72 - 10.0.0.36/31 - - - - Ethernet76 - 10.0.0.38/31 - - - - Ethernet80 - 10.0.0.40/31 - - - - Ethernet84 - 10.0.0.42/31 - - - - Ethernet88 - 10.0.0.44/31 - - - - Ethernet92 - 10.0.0.46/31 - - - - Ethernet96 - 10.0.0.48/31 - - - - Ethernet100 - 10.0.0.50/31 - - - - Ethernet104 - 10.0.0.52/31 - - - - Ethernet108 - 10.0.0.54/31 - - - - Ethernet112 - 10.0.0.56/31 - - - - Ethernet116 - 10.0.0.58/31 - - - - Ethernet120 - 10.0.0.60/31 - - - - Ethernet124 - 10.0.0.62/31 - - - - - - - - - - - - DeviceInterfaceLink - switch1 - Ethernet0 - ARISTA01T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet4 - ARISTA02T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet8 - ARISTA03T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet12 - ARISTA04T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet16 - ARISTA05T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet20 - ARISTA06T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet24 - ARISTA07T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet28 - ARISTA08T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet32 - ARISTA09T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet36 - ARISTA10T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet40 - ARISTA11T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet44 - ARISTA12T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet48 - ARISTA13T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet52 - ARISTA14T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet56 - ARISTA15T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet60 - ARISTA16T2 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet64 - ARISTA01T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet68 - ARISTA02T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet72 - ARISTA03T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet76 - ARISTA04T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet80 - ARISTA05T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet84 - ARISTA06T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet88 - ARISTA07T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet92 - ARISTA08T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet96 - ARISTA09T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet100 - ARISTA10T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet104 - ARISTA11T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet108 - ARISTA12T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet112 - ARISTA13T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet116 - ARISTA14T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet120 - ARISTA15T0 - Ethernet1 - - - DeviceInterfaceLink - switch1 - Ethernet124 - ARISTA16T0 - Ethernet1 - - - - - switch - porsche - - - - - - - switch1 - - - DhcpResources - - - - - NtpResources - - 0.debian.pool.ntp.org;1.debian.pool.ntp.org;2.debian.pool.ntp.org;3.debian.pool.ntp.org - - - SyslogResources - - - - - - - - - switch - porsche -
From f3b4eaa2b4b6e67c010e1fc79e9f592da1447511 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Thu, 15 Nov 2018 15:30:49 +0800 Subject: [PATCH 05/20] Upgrade kernel to 3.16.57-2 --- platform/nephos/sonic-platform-modules-pegatron/debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/control b/platform/nephos/sonic-platform-modules-pegatron/debian/control index 18e74be1455d..aa87c251f9c1 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/control +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/control @@ -7,6 +7,6 @@ Standards-Version: 3.9.3 Package: sonic-platform-pegatron-porsche Architecture: amd64 -Depends: linux-image-3.16.0-5-amd64 +Depends: linux-image-3.16.0-6-amd64 Description: kernel modules for platform devices such as fan, led, sfp From dba7940b19ba49e5c80f74774bf3e20f4a6fad05 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Thu, 15 Nov 2018 15:35:54 +0800 Subject: [PATCH 06/20] Upgrade Kernel to 4.9.11 --- platform/nephos/sonic-platform-modules-pegatron/debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/control b/platform/nephos/sonic-platform-modules-pegatron/debian/control index aa87c251f9c1..2c6d630da51e 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/control +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/control @@ -7,6 +7,6 @@ Standards-Version: 3.9.3 Package: sonic-platform-pegatron-porsche Architecture: amd64 -Depends: linux-image-3.16.0-6-amd64 +Depends: linux-image-4.9.0-7-amd64 Description: kernel modules for platform devices such as fan, led, sfp From c08f283c5cd30b249cc7a740792fc30a14f19bae Mon Sep 17 00:00:00 2001 From: PeterLin Date: Wed, 30 Jan 2019 11:20:18 +0800 Subject: [PATCH 07/20] 1. fix build error and change kernel to 4.9.08 2. modify port speed setting to 25G 3. modify pegatron uninstall --- .../plugins/sfputil.py | 15 ++- .../porsche/port_config.ini | 96 +++++++++--------- .../porsche/tau-porsche.dsh | 98 +++++++++---------- platform/nephos/one-image.mk | 3 +- platform/nephos/platform-modules-pegatron.mk | 2 +- platform/nephos/rules.mk | 1 + .../debian/control | 2 +- .../porsche/modules/pegatron_porsche_sfp.c | 6 +- .../porsche/utils/pegatron_porsche_util.py | 24 ++++- 9 files changed, 138 insertions(+), 109 deletions(-) diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py index 28909f00110c..9238d0f06fde 100755 --- a/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py +++ b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py @@ -187,9 +187,10 @@ def read_porttab_mappings(self, porttabfile): fp_port_index = portname.split("Ethernet").pop() fp_port_index = int(fp_port_index.split("s").pop(0))/4 - - if ((len(self.sfp_ports) > 0) and (fp_port_index not in self.sfp_ports)): - continue + + #Peter remove - 2018.04.13, this will cause can't show qsfp module when sfp_pot was set + #if ((len(self.sfp_ports) > 0) and (fp_port_index not in self.sfp_ports)): + #continue if first == 1: # Initialize last_[physical|logical]_port @@ -233,6 +234,14 @@ def read_porttab_mappings(self, porttabfile): print "logical to physical: " + self.logical_to_physical print "physical to logical: " + self.physical_to_logical """ + + def get_transceiver_change_event(self): + """ + TODO: This function need to be implemented + when decide to support monitoring SFP(Xcvrd) + on this platform. + """ + raise NotImplementedError diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini index 15fc60375941..405e44cd9e0c 100755 --- a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini +++ b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/port_config.ini @@ -1,52 +1,52 @@ #name lanes alias index speed -Ethernet0 8 Ethernet1/1 0 10000 -Ethernet1 9 Ethernet2/1 1 10000 -Ethernet2 10 Ethernet3/1 2 10000 -Ethernet3 11 Ethernet4/1 3 10000 -Ethernet4 12 Ethernet5/1 4 10000 -Ethernet5 13 Ethernet6/1 5 10000 -Ethernet6 14 Ethernet7/1 6 10000 -Ethernet7 15 Ethernet8/1 7 10000 -Ethernet8 16 Ethernet9/1 8 10000 -Ethernet9 17 Ethernet10/1 9 10000 -Ethernet10 18 Ethernet11/1 10 10000 -Ethernet11 19 Ethernet12/1 11 10000 -Ethernet12 20 Ethernet13/1 12 10000 -Ethernet13 21 Ethernet14/1 13 10000 -Ethernet14 22 Ethernet15/1 14 10000 -Ethernet15 23 Ethernet16/1 15 10000 -Ethernet16 32 Ethernet17/1 16 10000 -Ethernet17 33 Ethernet18/1 17 10000 -Ethernet18 34 Ethernet19/1 18 10000 -Ethernet19 35 Ethernet20/1 19 10000 -Ethernet20 40 Ethernet21/1 20 10000 -Ethernet21 41 Ethernet22/1 21 10000 -Ethernet22 42 Ethernet23/1 22 10000 -Ethernet23 43 Ethernet24/1 23 10000 -Ethernet24 48 Ethernet25/1 24 10000 -Ethernet25 49 Ethernet26/1 25 10000 -Ethernet26 50 Ethernet27/1 26 10000 -Ethernet27 51 Ethernet28/1 27 10000 -Ethernet28 56 Ethernet29/1 28 10000 -Ethernet29 57 Ethernet30/1 29 10000 -Ethernet30 58 Ethernet31/1 30 10000 -Ethernet31 59 Ethernet32/1 31 10000 -Ethernet32 64 Ethernet33/1 32 10000 -Ethernet33 65 Ethernet34/1 33 10000 -Ethernet34 66 Ethernet35/1 34 10000 -Ethernet35 67 Ethernet36/1 35 10000 -Ethernet36 68 Ethernet37/1 36 10000 -Ethernet37 69 Ethernet38/1 37 10000 -Ethernet38 70 Ethernet39/1 38 10000 -Ethernet39 71 Ethernet40/1 39 10000 -Ethernet40 72 Ethernet41/1 40 10000 -Ethernet41 73 Ethernet42/1 41 10000 -Ethernet42 74 Ethernet43/1 42 10000 -Ethernet43 75 Ethernet44/1 43 10000 -Ethernet44 76 Ethernet45/1 44 10000 -Ethernet45 77 Ethernet46/1 45 10000 -Ethernet46 78 Ethernet47/1 46 10000 -Ethernet47 79 Ethernet48/1 47 10000 +Ethernet0 8 Ethernet1/1 0 25000 +Ethernet1 9 Ethernet2/1 1 25000 +Ethernet2 10 Ethernet3/1 2 25000 +Ethernet3 11 Ethernet4/1 3 25000 +Ethernet4 12 Ethernet5/1 4 25000 +Ethernet5 13 Ethernet6/1 5 25000 +Ethernet6 14 Ethernet7/1 6 25000 +Ethernet7 15 Ethernet8/1 7 25000 +Ethernet8 16 Ethernet9/1 8 25000 +Ethernet9 17 Ethernet10/1 9 25000 +Ethernet10 18 Ethernet11/1 10 25000 +Ethernet11 19 Ethernet12/1 11 25000 +Ethernet12 20 Ethernet13/1 12 25000 +Ethernet13 21 Ethernet14/1 13 25000 +Ethernet14 22 Ethernet15/1 14 25000 +Ethernet15 23 Ethernet16/1 15 25000 +Ethernet16 32 Ethernet17/1 16 25000 +Ethernet17 33 Ethernet18/1 17 25000 +Ethernet18 34 Ethernet19/1 18 25000 +Ethernet19 35 Ethernet20/1 19 25000 +Ethernet20 40 Ethernet21/1 20 25000 +Ethernet21 41 Ethernet22/1 21 25000 +Ethernet22 42 Ethernet23/1 22 25000 +Ethernet23 43 Ethernet24/1 23 25000 +Ethernet24 48 Ethernet25/1 24 25000 +Ethernet25 49 Ethernet26/1 25 25000 +Ethernet26 50 Ethernet27/1 26 25000 +Ethernet27 51 Ethernet28/1 27 25000 +Ethernet28 56 Ethernet29/1 28 25000 +Ethernet29 57 Ethernet30/1 29 25000 +Ethernet30 58 Ethernet31/1 30 25000 +Ethernet31 59 Ethernet32/1 31 25000 +Ethernet32 64 Ethernet33/1 32 25000 +Ethernet33 65 Ethernet34/1 33 25000 +Ethernet34 66 Ethernet35/1 34 25000 +Ethernet35 67 Ethernet36/1 35 25000 +Ethernet36 68 Ethernet37/1 36 25000 +Ethernet37 69 Ethernet38/1 37 25000 +Ethernet38 70 Ethernet39/1 38 25000 +Ethernet39 71 Ethernet40/1 39 25000 +Ethernet40 72 Ethernet41/1 40 25000 +Ethernet41 73 Ethernet42/1 41 25000 +Ethernet42 74 Ethernet43/1 42 25000 +Ethernet43 75 Ethernet44/1 43 25000 +Ethernet44 76 Ethernet45/1 44 25000 +Ethernet45 77 Ethernet46/1 45 25000 +Ethernet46 78 Ethernet47/1 46 25000 +Ethernet47 79 Ethernet48/1 47 25000 Ethernet48 80,81,82,83 Ethernet49/1 48 100000 Ethernet49 84,85,86,87 Ethernet50/1 49 100000 Ethernet50 104,105,106,107 Ethernet51/1 50 100000 diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/tau-porsche.dsh b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/tau-porsche.dsh index b370fe83b837..5e6d4cd5a6dd 100755 --- a/device/pegatron/x86_64-pegatron_porsche-r0/porsche/tau-porsche.dsh +++ b/device/pegatron/x86_64-pegatron_porsche-r0/porsche/tau-porsche.dsh @@ -1,52 +1,52 @@ init start stage unit=0 low-level -init set port-map unit=0 port=0 eth-macro=2 lane=0 max-speed=10g active=true -init set port-map unit=0 port=1 eth-macro=2 lane=1 max-speed=10g active=true -init set port-map unit=0 port=2 eth-macro=2 lane=2 max-speed=10g active=true -init set port-map unit=0 port=3 eth-macro=2 lane=3 max-speed=10g active=true -init set port-map unit=0 port=4 eth-macro=3 lane=0 max-speed=10g active=true -init set port-map unit=0 port=5 eth-macro=3 lane=1 max-speed=10g active=true -init set port-map unit=0 port=6 eth-macro=3 lane=2 max-speed=10g active=true -init set port-map unit=0 port=7 eth-macro=3 lane=3 max-speed=10g active=true -init set port-map unit=0 port=8 eth-macro=4 lane=0 max-speed=10g active=true -init set port-map unit=0 port=9 eth-macro=4 lane=1 max-speed=10g active=true -init set port-map unit=0 port=10 eth-macro=4 lane=2 max-speed=10g active=true -init set port-map unit=0 port=11 eth-macro=4 lane=3 max-speed=10g active=true -init set port-map unit=0 port=12 eth-macro=5 lane=0 max-speed=10g active=true -init set port-map unit=0 port=13 eth-macro=5 lane=1 max-speed=10g active=true -init set port-map unit=0 port=14 eth-macro=5 lane=2 max-speed=10g active=true -init set port-map unit=0 port=15 eth-macro=5 lane=3 max-speed=10g active=true -init set port-map unit=0 port=16 eth-macro=8 lane=0 max-speed=10g active=true -init set port-map unit=0 port=17 eth-macro=8 lane=1 max-speed=10g active=true -init set port-map unit=0 port=18 eth-macro=8 lane=2 max-speed=10g active=true -init set port-map unit=0 port=19 eth-macro=8 lane=3 max-speed=10g active=true -init set port-map unit=0 port=20 eth-macro=10 lane=0 max-speed=10g active=true -init set port-map unit=0 port=21 eth-macro=10 lane=1 max-speed=10g active=true -init set port-map unit=0 port=22 eth-macro=10 lane=2 max-speed=10g active=true -init set port-map unit=0 port=23 eth-macro=10 lane=3 max-speed=10g active=true -init set port-map unit=0 port=24 eth-macro=12 lane=0 max-speed=10g active=true -init set port-map unit=0 port=25 eth-macro=12 lane=1 max-speed=10g active=true -init set port-map unit=0 port=26 eth-macro=12 lane=2 max-speed=10g active=true -init set port-map unit=0 port=27 eth-macro=12 lane=3 max-speed=10g active=true -init set port-map unit=0 port=28 eth-macro=14 lane=0 max-speed=10g active=true -init set port-map unit=0 port=29 eth-macro=14 lane=1 max-speed=10g active=true -init set port-map unit=0 port=30 eth-macro=14 lane=2 max-speed=10g active=true -init set port-map unit=0 port=31 eth-macro=14 lane=3 max-speed=10g active=true -init set port-map unit=0 port=32 eth-macro=16 lane=0 max-speed=10g active=true -init set port-map unit=0 port=33 eth-macro=16 lane=1 max-speed=10g active=true -init set port-map unit=0 port=34 eth-macro=16 lane=2 max-speed=10g active=true -init set port-map unit=0 port=35 eth-macro=16 lane=3 max-speed=10g active=true -init set port-map unit=0 port=36 eth-macro=17 lane=0 max-speed=10g active=true -init set port-map unit=0 port=37 eth-macro=17 lane=1 max-speed=10g active=true -init set port-map unit=0 port=38 eth-macro=17 lane=2 max-speed=10g active=true -init set port-map unit=0 port=39 eth-macro=17 lane=3 max-speed=10g active=true -init set port-map unit=0 port=40 eth-macro=18 lane=0 max-speed=10g active=true -init set port-map unit=0 port=41 eth-macro=18 lane=1 max-speed=10g active=true -init set port-map unit=0 port=42 eth-macro=18 lane=2 max-speed=10g active=true -init set port-map unit=0 port=43 eth-macro=18 lane=3 max-speed=10g active=true -init set port-map unit=0 port=44 eth-macro=19 lane=0 max-speed=10g active=true -init set port-map unit=0 port=45 eth-macro=19 lane=1 max-speed=10g active=true -init set port-map unit=0 port=46 eth-macro=19 lane=2 max-speed=10g active=true -init set port-map unit=0 port=47 eth-macro=19 lane=3 max-speed=10g active=true +init set port-map unit=0 port=0 eth-macro=2 lane=0 max-speed=25g active=true +init set port-map unit=0 port=1 eth-macro=2 lane=1 max-speed=25g active=true +init set port-map unit=0 port=2 eth-macro=2 lane=2 max-speed=25g active=true +init set port-map unit=0 port=3 eth-macro=2 lane=3 max-speed=25g active=true +init set port-map unit=0 port=4 eth-macro=3 lane=0 max-speed=25g active=true +init set port-map unit=0 port=5 eth-macro=3 lane=1 max-speed=25g active=true +init set port-map unit=0 port=6 eth-macro=3 lane=2 max-speed=25g active=true +init set port-map unit=0 port=7 eth-macro=3 lane=3 max-speed=25g active=true +init set port-map unit=0 port=8 eth-macro=4 lane=0 max-speed=25g active=true +init set port-map unit=0 port=9 eth-macro=4 lane=1 max-speed=25g active=true +init set port-map unit=0 port=10 eth-macro=4 lane=2 max-speed=25g active=true +init set port-map unit=0 port=11 eth-macro=4 lane=3 max-speed=25g active=true +init set port-map unit=0 port=12 eth-macro=5 lane=0 max-speed=25g active=true +init set port-map unit=0 port=13 eth-macro=5 lane=1 max-speed=25g active=true +init set port-map unit=0 port=14 eth-macro=5 lane=2 max-speed=25g active=true +init set port-map unit=0 port=15 eth-macro=5 lane=3 max-speed=25g active=true +init set port-map unit=0 port=16 eth-macro=8 lane=0 max-speed=25g active=true +init set port-map unit=0 port=17 eth-macro=8 lane=1 max-speed=25g active=true +init set port-map unit=0 port=18 eth-macro=8 lane=2 max-speed=25g active=true +init set port-map unit=0 port=19 eth-macro=8 lane=3 max-speed=25g active=true +init set port-map unit=0 port=20 eth-macro=10 lane=0 max-speed=25g active=true +init set port-map unit=0 port=21 eth-macro=10 lane=1 max-speed=25g active=true +init set port-map unit=0 port=22 eth-macro=10 lane=2 max-speed=25g active=true +init set port-map unit=0 port=23 eth-macro=10 lane=3 max-speed=25g active=true +init set port-map unit=0 port=24 eth-macro=12 lane=0 max-speed=25g active=true +init set port-map unit=0 port=25 eth-macro=12 lane=1 max-speed=25g active=true +init set port-map unit=0 port=26 eth-macro=12 lane=2 max-speed=25g active=true +init set port-map unit=0 port=27 eth-macro=12 lane=3 max-speed=25g active=true +init set port-map unit=0 port=28 eth-macro=14 lane=0 max-speed=25g active=true +init set port-map unit=0 port=29 eth-macro=14 lane=1 max-speed=25g active=true +init set port-map unit=0 port=30 eth-macro=14 lane=2 max-speed=25g active=true +init set port-map unit=0 port=31 eth-macro=14 lane=3 max-speed=25g active=true +init set port-map unit=0 port=32 eth-macro=16 lane=0 max-speed=25g active=true +init set port-map unit=0 port=33 eth-macro=16 lane=1 max-speed=25g active=true +init set port-map unit=0 port=34 eth-macro=16 lane=2 max-speed=25g active=true +init set port-map unit=0 port=35 eth-macro=16 lane=3 max-speed=25g active=true +init set port-map unit=0 port=36 eth-macro=17 lane=0 max-speed=25g active=true +init set port-map unit=0 port=37 eth-macro=17 lane=1 max-speed=25g active=true +init set port-map unit=0 port=38 eth-macro=17 lane=2 max-speed=25g active=true +init set port-map unit=0 port=39 eth-macro=17 lane=3 max-speed=25g active=true +init set port-map unit=0 port=40 eth-macro=18 lane=0 max-speed=25g active=true +init set port-map unit=0 port=41 eth-macro=18 lane=1 max-speed=25g active=true +init set port-map unit=0 port=42 eth-macro=18 lane=2 max-speed=25g active=true +init set port-map unit=0 port=43 eth-macro=18 lane=3 max-speed=25g active=true +init set port-map unit=0 port=44 eth-macro=19 lane=0 max-speed=25g active=true +init set port-map unit=0 port=45 eth-macro=19 lane=1 max-speed=25g active=true +init set port-map unit=0 port=46 eth-macro=19 lane=2 max-speed=25g active=true +init set port-map unit=0 port=47 eth-macro=19 lane=3 max-speed=25g active=true init set port-map unit=0 port=48 eth-macro=20 lane=0 max-speed=100g active=true init set port-map unit=0 port=49 eth-macro=21 lane=0 max-speed=100g active=true init set port-map unit=0 port=50 eth-macro=26 lane=0 max-speed=100g active=true @@ -488,7 +488,7 @@ phy set pre-emphasis unit=0 portlist=53 lane-cnt=4 property=c2 data=0x02.02.02.0 phy set pre-emphasis unit=0 portlist=53 lane-cnt=4 property=cn1 data=0x00.00.00.00 phy set pre-emphasis unit=0 portlist=53 lane-cnt=4 property=c0 data=0x1A.1A.1A.1A phy set pre-emphasis unit=0 portlist=53 lane-cnt=4 property=c1 data=0x07.07.07.07 -port set property unit=0 portlist=0-47 speed=10g +port set property unit=0 portlist=0-47 speed=25g port set property unit=0 portlist=0-47 medium-type=sr port set property unit=0 portlist=48-53 speed=100g port set property unit=0 portlist=48-53 medium-type=sr4 diff --git a/platform/nephos/one-image.mk b/platform/nephos/one-image.mk index ace042d5ce4f..bd9e2e851889 100644 --- a/platform/nephos/one-image.mk +++ b/platform/nephos/one-image.mk @@ -6,6 +6,7 @@ $(SONIC_ONE_IMAGE)_IMAGE_TYPE = onie $(SONIC_ONE_IMAGE)_INSTALLS += $(NEPHOS_NPS_KERNEL) $(SONIC_ONE_IMAGE)_LAZY_INSTALLS += $(INGRASYS_S9130_32X_PLATFORM_MODULE) \ $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ - $(ACCTON_AS7116_54X_PLATFORM_MODULE) + $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ + $(PEGATRON_PORSCHE_PLATFORM_MODULE) $(SONIC_ONE_IMAGE)_DOCKERS += $(SONIC_INSTALL_DOCKER_IMAGES) SONIC_INSTALLERS += $(SONIC_ONE_IMAGE) diff --git a/platform/nephos/platform-modules-pegatron.mk b/platform/nephos/platform-modules-pegatron.mk index 9a411763cec2..150cd3ac719f 100755 --- a/platform/nephos/platform-modules-pegatron.mk +++ b/platform/nephos/platform-modules-pegatron.mk @@ -10,4 +10,4 @@ $(PEGATRON_PORSCHE_PLATFORM_MODULE)_DEPENDS += $(LINUX_HEADERS) $(LINUX_HEADERS_ $(PEGATRON_PORSCHE_PLATFORM_MODULE)_PLATFORM = x86_64-pegatron_porsche-r0 SONIC_DPKG_DEBS += $(PEGATRON_PORSCHE_PLATFORM_MODULE) -$(eval $(call add_extra_package,$(PEGATRON_PORSCHE_PLATFORM_MODULE))) +SONIC_STRETCH_DEBS += $(PEGATRON_PORSCHE_PLATFORM_MODULE) diff --git a/platform/nephos/rules.mk b/platform/nephos/rules.mk index bf77ad0e6edf..57068ab8b54a 100644 --- a/platform/nephos/rules.mk +++ b/platform/nephos/rules.mk @@ -1,5 +1,6 @@ include $(PLATFORM_PATH)/sdk.mk include $(PLATFORM_PATH)/sai.mk +include $(PLATFORM_PATH)/platform-modules-pegatron.mk include $(PLATFORM_PATH)/platform-modules-ingrasys.mk include $(PLATFORM_PATH)/platform-modules-accton.mk include $(PLATFORM_PATH)/docker-orchagent-nephos.mk diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/control b/platform/nephos/sonic-platform-modules-pegatron/debian/control index 2c6d630da51e..d9d29a1624cd 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/control +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/control @@ -7,6 +7,6 @@ Standards-Version: 3.9.3 Package: sonic-platform-pegatron-porsche Architecture: amd64 -Depends: linux-image-4.9.0-7-amd64 +Depends: linux-image-4.9.0-8-amd64 Description: kernel modules for platform devices such as fan, led, sfp diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c index 5d5d64b15e1a..9e4b8dbb975d 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c @@ -62,9 +62,9 @@ enum cpld_croups { cpld_group_a, cpld_group_b, cpld_group_c}; static const unsigned short normal_i2c[] = { SFP_EEPROM_A0_ADDR, SFP_EEPROM_A2_ADDR, I2C_CLIENT_END }; -static char *SFP_CPLD_GROUPA_MAPPING[CPLDA_SFP_NUM][16]={0}; -static char *SFP_CPLD_GROUPB_MAPPING[CPLDB_SFP_NUM][16]={0}; -static char *SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPA_MAPPING[CPLDA_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPB_MAPPING[CPLDB_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; /* * This parameter is to help this driver avoid blocking other drivers out diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py index 16662081d0cb..d590dbbcbe32 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py @@ -31,6 +31,7 @@ kernel_module = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_porsche_cpld', 'pegatron_hwmon_mcu', 'pegatron_porsche_sfp'] moduleID = ['pca9544', 'pca9544', '24c02', 'porsche_hwmon_mcu', 'porsche_cpld', 'porsche_cpld', 'porsche_cpld', 'porsche_sfpA', 'porsche_sfpB', 'porsche_sfpC'] i2c_check_node = ['i2c-0', 'i2c-1'] +uninstall_check_node = ['-0072', '-0073'] device_address = ['0x72', '0x73', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] device_node= ['i2c-2', 'i2c-6', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] @@ -54,6 +55,16 @@ def do_cmd(cmd, show): print('Failed :' + cmd) return status, output +def install_driver(): + status, output = do_cmd("depmod -a", 1) + + for i in range(0, len(kernel_module)): + status, output = do_cmd("modprobe " + kernel_module[i], 1) + if status: + return status + + return + def check_device_position(num): for i in range(0, len(i2c_check_node)): status, output = do_cmd("echo " + moduleID[num] + " " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/new_device", 0) @@ -94,10 +105,17 @@ def do_install(): def do_uninstall(): for i in range(0, len(kernel_module)): - status, output = do_cmd("modprobe -r " + kernel_module[i], 1) + status, output = do_cmd("modprobe -rq " + kernel_module[i], 0) for i in range(0, len(moduleID)): - status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + i2c_check_node[i] + "/delete_device", 0) + if moduleID[i] == "pca9544": + for node in range(0, len(i2c_check_node)): + status, output = do_cmd("ls " + i2c_prefix + str(node) + uninstall_check_node[i], 0) + if not status: + status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + i2c_check_node[node] + "/delete_device", 0) + + else: + status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + device_node[i] + "/delete_device", 0) return @@ -177,7 +195,7 @@ def main(): command: install : install drivers and generate related sysfs nodes - clean : uninstall drivers and remove related sysfs nodes + uninstall : uninstall drivers and remove related sysfs nodes set : change board setting [led] debug : debug info [on/off] """ From d557710444d3d9acfec0adfc9419d6a1c705c726 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Mon, 1 Apr 2019 10:18:22 +0800 Subject: [PATCH 08/20] 1. add platfporm fn-6254-dn-5 2. fix porsch fiber link issus --- .../x86_64-pegatron_porsche-r0/plugins/sfputil.py | 2 +- platform/nephos/one-image.mk | 3 ++- platform/nephos/platform-modules-pegatron.mk | 11 ++++++++++- .../common/modules/pegatron_hwmon_mcu.c | 9 +-------- .../sonic-platform-modules-pegatron/debian/control | 4 ++++ .../sonic-platform-modules-pegatron/debian/rules | 2 +- .../porsche/modules/pegatron_porsche_cpld.c | 5 +++-- 7 files changed, 22 insertions(+), 14 deletions(-) diff --git a/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py index 9238d0f06fde..bfb6b590e43b 100755 --- a/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py +++ b/device/pegatron/x86_64-pegatron_porsche-r0/plugins/sfputil.py @@ -187,7 +187,7 @@ def read_porttab_mappings(self, porttabfile): fp_port_index = portname.split("Ethernet").pop() fp_port_index = int(fp_port_index.split("s").pop(0))/4 - + #Peter remove - 2018.04.13, this will cause can't show qsfp module when sfp_pot was set #if ((len(self.sfp_ports) > 0) and (fp_port_index not in self.sfp_ports)): #continue diff --git a/platform/nephos/one-image.mk b/platform/nephos/one-image.mk index bd9e2e851889..ad45faaa65b6 100644 --- a/platform/nephos/one-image.mk +++ b/platform/nephos/one-image.mk @@ -7,6 +7,7 @@ $(SONIC_ONE_IMAGE)_INSTALLS += $(NEPHOS_NPS_KERNEL) $(SONIC_ONE_IMAGE)_LAZY_INSTALLS += $(INGRASYS_S9130_32X_PLATFORM_MODULE) \ $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ - $(PEGATRON_PORSCHE_PLATFORM_MODULE) + $(PEGATRON_PORSCHE_PLATFORM_MODULE) \ + $(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE) $(SONIC_ONE_IMAGE)_DOCKERS += $(SONIC_INSTALL_DOCKER_IMAGES) SONIC_INSTALLERS += $(SONIC_ONE_IMAGE) diff --git a/platform/nephos/platform-modules-pegatron.mk b/platform/nephos/platform-modules-pegatron.mk index 150cd3ac719f..ac5bd16cf9c7 100755 --- a/platform/nephos/platform-modules-pegatron.mk +++ b/platform/nephos/platform-modules-pegatron.mk @@ -1,13 +1,22 @@ # Pegatron Platform modules PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION = 0.1 +PEGATRON_FN_6254_DN_F_PLATFORM_MODULE_VERSION = 0.1 export PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION +export PEGATRON_FN_6254_DN_F_PLATFORM_MODULE_VERSION PEGATRON_PORSCHE_PLATFORM_MODULE = sonic-platform-pegatron-porsche_$(PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION)_amd64.deb $(PEGATRON_PORSCHE_PLATFORM_MODULE)_SRC_PATH = $(PLATFORM_PATH)/sonic-platform-modules-pegatron $(PEGATRON_PORSCHE_PLATFORM_MODULE)_DEPENDS += $(LINUX_HEADERS) $(LINUX_HEADERS_COMMON) $(PEGATRON_PORSCHE_PLATFORM_MODULE)_PLATFORM = x86_64-pegatron_porsche-r0 SONIC_DPKG_DEBS += $(PEGATRON_PORSCHE_PLATFORM_MODULE) - SONIC_STRETCH_DEBS += $(PEGATRON_PORSCHE_PLATFORM_MODULE) + +PEGATRON_FN_6254_DN_F_PLATFORM_MODULE = sonic-platform-pegatron-fn-6254-dn-f_$(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE_VERSION)_amd64.deb +$(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE)_SRC_PATH = $(PLATFORM_PATH)/sonic-platform-modules-pegatron +$(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE)_DEPENDS += $(LINUX_HEADERS) $(LINUX_HEADERS_COMMON) +$(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE)_PLATFORM = x86_64-pegatron_fn_6254_dn_f-r0 +SONIC_DPKG_DEBS += $(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE) +SONIC_STRETCH_DEBS += $(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE) + diff --git a/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c b/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c index 76cbd8844708..01b64bce9019 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c +++ b/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c @@ -55,13 +55,6 @@ #define SET_BIT(data, bit) data |= (1 << bit) #define CLEAR_BIT(data, bit) data &= ~(1 << bit) -enum chips -{ - mercedes3 = 0, - cadillac, - porsche, -}; - enum fan_alert { FAN_OUTER_RPM_OVER_ALERT_BIT = 0, @@ -1339,7 +1332,7 @@ static int pega_hwmon_mcu_remove(struct i2c_client *client) } static const struct i2c_device_id pega_hwmon_mcu_id[] = { - { "porsche_hwmon_mcu", porsche }, + { "pega_hwmon_mcu", 0 }, {} }; MODULE_DEVICE_TABLE(i2c, pega_hwmon_mcu_id); diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/control b/platform/nephos/sonic-platform-modules-pegatron/debian/control index d9d29a1624cd..c24275f6b94e 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/control +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/control @@ -10,3 +10,7 @@ Architecture: amd64 Depends: linux-image-4.9.0-8-amd64 Description: kernel modules for platform devices such as fan, led, sfp +Package: sonic-platform-pegatron-fn-6254-dn-f +Architecture: amd64 +Depends: linux-image-4.9.0-8-amd64 +Description: kernel modules for platform devices such as fan, led, sfp diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/rules b/platform/nephos/sonic-platform-modules-pegatron/debian/rules index 472ec939a47c..9fbb702c47a2 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/rules +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/rules @@ -19,7 +19,7 @@ PACKAGE_PRE_NAME := sonic-platform-pegatron KVERSION ?= $(shell uname -r) KERNEL_SRC := /lib/modules/$(KVERSION) MOD_SRC_DIR:= $(shell pwd) -MODULE_DIRS:= porsche +MODULE_DIRS:= porsche fn-6254-dn-f MODULE_DIR := modules UTILS_DIR := utils SERVICE_DIR := service diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_cpld.c b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_cpld.c index 154a68dcb836..2bccd04ada49 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_cpld.c +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_cpld.c @@ -30,7 +30,6 @@ #include #undef pegatron_porsche_DEBUG -/*#define pegatron_porsche_DEBUG*/ #ifdef pegatron_porsche_DEBUG #define DBG(x) x #else @@ -567,7 +566,9 @@ static ssize_t set_qsfp_reset(struct device *dev, struct device_attribute *da, data = pegatron_porsche_cpld_read(client->addr, reg); DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); - data = (val & 0x3) << ((attr->index % QSFP_FIRST_PORT % 4)*2); + CLEAR_BIT(data, (attr->index % 4)*2); + CLEAR_BIT(data, (attr->index % 4)*2+1); + data |= (val & 0x3) << ((attr->index % QSFP_FIRST_PORT % 4)*2); pegatron_porsche_cpld_write(client->addr, reg, data); From 9f80cded793a170d176f72dfe41499e4900c2dbb Mon Sep 17 00:00:00 2001 From: PeterLin Date: Tue, 2 Apr 2019 14:12:04 +0800 Subject: [PATCH 09/20] add missing folder --- .../default_sku | 1 + .../fn-6254-dn-f/port_config.ini | 55 + .../fn-6254-dn-f/sai.profile | 2 + .../fn-6254-dn-f/tau-fn-6254-dn-f.dsh | 633 +++ .../installer.conf | 3 + .../plugins/eeprom.py | 21 + .../plugins/psuutil.py | 92 + .../plugins/sfputil.py | 247 + .../tau-fn-6254-dn-f.cfg | 23 + .../0001-update-Intel-ixgbe-x550-driver.patch | 4648 +++++++++++++++++ .../fn-6254-dn-f/modules/Makefile | 1 + .../modules/pegatron_fn_6254_dn_f_cpld.c | 1133 ++++ .../modules/pegatron_fn_6254_dn_f_sfp.c | 431 ++ .../fn-6254-dn-f/modules/pegatron_hwmon_mcu.c | 1 + .../fn-6254-dn-f/scripts/sensors | 7 + .../fn_6254_dn_f-platform-init.service | 13 + .../utils/fn_6254_dn_f_sensors.py | 141 + .../utils/pegatron_fn_6254_dn_f_util.py | 233 + 18 files changed, 7685 insertions(+) create mode 100644 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/default_sku create mode 100755 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/port_config.ini create mode 100755 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/sai.profile create mode 100755 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/tau-fn-6254-dn-f.dsh create mode 100755 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/installer.conf create mode 100755 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/eeprom.py create mode 100755 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/psuutil.py create mode 100755 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/sfputil.py create mode 100755 device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/tau-fn-6254-dn-f.cfg create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-x550-driver.patch create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/Makefile create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_cpld.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_sfp.c create mode 120000 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_hwmon_mcu.c create mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service create mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/fn_6254_dn_f_sensors.py create mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/default_sku b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/default_sku new file mode 100644 index 000000000000..bc4d84f9c0e2 --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/default_sku @@ -0,0 +1 @@ +fn-6254-dn-f t1 diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/port_config.ini b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/port_config.ini new file mode 100755 index 000000000000..405e44cd9e0c --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/port_config.ini @@ -0,0 +1,55 @@ +#name lanes alias index speed +Ethernet0 8 Ethernet1/1 0 25000 +Ethernet1 9 Ethernet2/1 1 25000 +Ethernet2 10 Ethernet3/1 2 25000 +Ethernet3 11 Ethernet4/1 3 25000 +Ethernet4 12 Ethernet5/1 4 25000 +Ethernet5 13 Ethernet6/1 5 25000 +Ethernet6 14 Ethernet7/1 6 25000 +Ethernet7 15 Ethernet8/1 7 25000 +Ethernet8 16 Ethernet9/1 8 25000 +Ethernet9 17 Ethernet10/1 9 25000 +Ethernet10 18 Ethernet11/1 10 25000 +Ethernet11 19 Ethernet12/1 11 25000 +Ethernet12 20 Ethernet13/1 12 25000 +Ethernet13 21 Ethernet14/1 13 25000 +Ethernet14 22 Ethernet15/1 14 25000 +Ethernet15 23 Ethernet16/1 15 25000 +Ethernet16 32 Ethernet17/1 16 25000 +Ethernet17 33 Ethernet18/1 17 25000 +Ethernet18 34 Ethernet19/1 18 25000 +Ethernet19 35 Ethernet20/1 19 25000 +Ethernet20 40 Ethernet21/1 20 25000 +Ethernet21 41 Ethernet22/1 21 25000 +Ethernet22 42 Ethernet23/1 22 25000 +Ethernet23 43 Ethernet24/1 23 25000 +Ethernet24 48 Ethernet25/1 24 25000 +Ethernet25 49 Ethernet26/1 25 25000 +Ethernet26 50 Ethernet27/1 26 25000 +Ethernet27 51 Ethernet28/1 27 25000 +Ethernet28 56 Ethernet29/1 28 25000 +Ethernet29 57 Ethernet30/1 29 25000 +Ethernet30 58 Ethernet31/1 30 25000 +Ethernet31 59 Ethernet32/1 31 25000 +Ethernet32 64 Ethernet33/1 32 25000 +Ethernet33 65 Ethernet34/1 33 25000 +Ethernet34 66 Ethernet35/1 34 25000 +Ethernet35 67 Ethernet36/1 35 25000 +Ethernet36 68 Ethernet37/1 36 25000 +Ethernet37 69 Ethernet38/1 37 25000 +Ethernet38 70 Ethernet39/1 38 25000 +Ethernet39 71 Ethernet40/1 39 25000 +Ethernet40 72 Ethernet41/1 40 25000 +Ethernet41 73 Ethernet42/1 41 25000 +Ethernet42 74 Ethernet43/1 42 25000 +Ethernet43 75 Ethernet44/1 43 25000 +Ethernet44 76 Ethernet45/1 44 25000 +Ethernet45 77 Ethernet46/1 45 25000 +Ethernet46 78 Ethernet47/1 46 25000 +Ethernet47 79 Ethernet48/1 47 25000 +Ethernet48 80,81,82,83 Ethernet49/1 48 100000 +Ethernet49 84,85,86,87 Ethernet50/1 49 100000 +Ethernet50 104,105,106,107 Ethernet51/1 50 100000 +Ethernet51 108,109,110,111 Ethernet52/1 51 100000 +Ethernet52 112,113,114,115 Ethernet53/1 52 100000 +Ethernet53 116,117,118,119 Ethernet54/1 53 100000 diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/sai.profile b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/sai.profile new file mode 100755 index 000000000000..a7ca856a324a --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/sai.profile @@ -0,0 +1,2 @@ +SAI_INIT_CONFIG_FILE=/usr/share/sonic/platform/tau-fn-6254-dn-f.cfg +SAI_DSH_CONFIG_FILE=/usr/share/sonic/hwsku/tau-fn-6254-dn-f.dsh diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/tau-fn-6254-dn-f.dsh b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/tau-fn-6254-dn-f.dsh new file mode 100755 index 000000000000..efe3bbf04e88 --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/tau-fn-6254-dn-f.dsh @@ -0,0 +1,633 @@ +init start stage low-level + +init set port-map port=0 eth-macro=2 lane=0 max-speed=25g active=true +init set port-map port=1 eth-macro=2 lane=1 max-speed=25g active=true +init set port-map port=2 eth-macro=2 lane=2 max-speed=25g active=true +init set port-map port=3 eth-macro=2 lane=3 max-speed=25g active=true +init set port-map port=4 eth-macro=3 lane=0 max-speed=25g active=true +init set port-map port=5 eth-macro=3 lane=1 max-speed=25g active=true +init set port-map port=6 eth-macro=3 lane=2 max-speed=25g active=true +init set port-map port=7 eth-macro=3 lane=3 max-speed=25g active=true +init set port-map port=8 eth-macro=4 lane=0 max-speed=25g active=true +init set port-map port=9 eth-macro=4 lane=1 max-speed=25g active=true +init set port-map port=10 eth-macro=4 lane=2 max-speed=25g active=true +init set port-map port=11 eth-macro=4 lane=3 max-speed=25g active=true +init set port-map port=12 eth-macro=5 lane=0 max-speed=25g active=true +init set port-map port=13 eth-macro=5 lane=1 max-speed=25g active=true +init set port-map port=14 eth-macro=5 lane=2 max-speed=25g active=true +init set port-map port=15 eth-macro=5 lane=3 max-speed=25g active=true +init set port-map port=16 eth-macro=8 lane=0 max-speed=25g active=true +init set port-map port=17 eth-macro=8 lane=1 max-speed=25g active=true +init set port-map port=18 eth-macro=8 lane=2 max-speed=25g active=true +init set port-map port=19 eth-macro=8 lane=3 max-speed=25g active=true +init set port-map port=20 eth-macro=10 lane=0 max-speed=25g active=true +init set port-map port=21 eth-macro=10 lane=1 max-speed=25g active=true +init set port-map port=22 eth-macro=10 lane=2 max-speed=25g active=true +init set port-map port=23 eth-macro=10 lane=3 max-speed=25g active=true +init set port-map port=24 eth-macro=12 lane=0 max-speed=25g active=true +init set port-map port=25 eth-macro=12 lane=1 max-speed=25g active=true +init set port-map port=26 eth-macro=12 lane=2 max-speed=25g active=true +init set port-map port=27 eth-macro=12 lane=3 max-speed=25g active=true +init set port-map port=28 eth-macro=14 lane=0 max-speed=25g active=true +init set port-map port=29 eth-macro=14 lane=1 max-speed=25g active=true +init set port-map port=30 eth-macro=14 lane=2 max-speed=25g active=true +init set port-map port=31 eth-macro=14 lane=3 max-speed=25g active=true +init set port-map port=32 eth-macro=16 lane=0 max-speed=25g active=true +init set port-map port=33 eth-macro=16 lane=1 max-speed=25g active=true +init set port-map port=34 eth-macro=16 lane=2 max-speed=25g active=true +init set port-map port=35 eth-macro=16 lane=3 max-speed=25g active=true +init set port-map port=36 eth-macro=17 lane=0 max-speed=25g active=true +init set port-map port=37 eth-macro=17 lane=1 max-speed=25g active=true +init set port-map port=38 eth-macro=17 lane=2 max-speed=25g active=true +init set port-map port=39 eth-macro=17 lane=3 max-speed=25g active=true +init set port-map port=40 eth-macro=18 lane=0 max-speed=25g active=true +init set port-map port=41 eth-macro=18 lane=1 max-speed=25g active=true +init set port-map port=42 eth-macro=18 lane=2 max-speed=25g active=true +init set port-map port=43 eth-macro=18 lane=3 max-speed=25g active=true +init set port-map port=44 eth-macro=19 lane=0 max-speed=25g active=true +init set port-map port=45 eth-macro=19 lane=1 max-speed=25g active=true +init set port-map port=46 eth-macro=19 lane=2 max-speed=25g active=true +init set port-map port=47 eth-macro=19 lane=3 max-speed=25g active=true +init set port-map port=48 eth-macro=20 lane=0 max-speed=100g active=true +init set port-map port=49 eth-macro=21 lane=0 max-speed=100g active=true +init set port-map port=50 eth-macro=26 lane=0 max-speed=100g active=true +init set port-map port=51 eth-macro=27 lane=0 max-speed=100g active=true +init set port-map port=52 eth-macro=28 lane=0 max-speed=100g active=true +init set port-map port=53 eth-macro=29 lane=0 max-speed=100g active=true +init set port-map port=129 eth-macro=0 lane=0 max-speed=10g active=true guarantee=true cpi=true +init set port-map port=130 eth-macro=0 lane=1 max-speed=10g active=true guarantee=true cpi=true init-done=true + +init start stage task-rsrc +init start stage module +init start stage task +phy set lane-swap portlist=0 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=1 lane-cnt=1 property=tx data=0x01 +phy set lane-swap portlist=2 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=3 lane-cnt=1 property=tx data=0x03 + +phy set lane-swap portlist=4 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=5 lane-cnt=1 property=tx data=0x03 +phy set lane-swap portlist=6 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=7 lane-cnt=1 property=tx data=0x01 + +phy set lane-swap portlist=8 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=9 lane-cnt=1 property=tx data=0x01 +phy set lane-swap portlist=10 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=11 lane-cnt=1 property=tx data=0x03 + +phy set lane-swap portlist=12 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=13 lane-cnt=1 property=tx data=0x03 +phy set lane-swap portlist=14 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=15 lane-cnt=1 property=tx data=0x01 + +phy set lane-swap portlist=16 lane-cnt=1 property=tx data=0x03 +phy set lane-swap portlist=17 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=18 lane-cnt=1 property=tx data=0x01 +phy set lane-swap portlist=19 lane-cnt=1 property=tx data=0x00 + +phy set lane-swap portlist=20 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=21 lane-cnt=1 property=tx data=0x03 +phy set lane-swap portlist=22 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=23 lane-cnt=1 property=tx data=0x01 + +phy set lane-swap portlist=24 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=25 lane-cnt=1 property=tx data=0x03 +phy set lane-swap portlist=26 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=27 lane-cnt=1 property=tx data=0x01 + +phy set lane-swap portlist=28 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=29 lane-cnt=1 property=tx data=0x03 +phy set lane-swap portlist=30 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=31 lane-cnt=1 property=tx data=0x01 + +phy set lane-swap portlist=32 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=33 lane-cnt=1 property=tx data=0x01 +phy set lane-swap portlist=34 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=35 lane-cnt=1 property=tx data=0x03 + +phy set lane-swap portlist=36 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=37 lane-cnt=1 property=tx data=0x01 +phy set lane-swap portlist=38 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=39 lane-cnt=1 property=tx data=0x03 + +phy set lane-swap portlist=40 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=41 lane-cnt=1 property=tx data=0x01 +phy set lane-swap portlist=42 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=43 lane-cnt=1 property=tx data=0x03 + +phy set lane-swap portlist=44 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=45 lane-cnt=1 property=tx data=0x01 +phy set lane-swap portlist=46 lane-cnt=1 property=tx data=0x02 +phy set lane-swap portlist=47 lane-cnt=1 property=tx data=0x03 + +phy set lane-swap portlist=48 lane-cnt=4 property=tx data=0x03.02.01.00 +phy set lane-swap portlist=49 lane-cnt=4 property=tx data=0x01.02.03.00 +phy set lane-swap portlist=50 lane-cnt=4 property=tx data=0x01.02.03.00 +phy set lane-swap portlist=51 lane-cnt=4 property=tx data=0x03.02.01.00 +phy set lane-swap portlist=52 lane-cnt=4 property=tx data=0x03.02.01.00 +phy set lane-swap portlist=53 lane-cnt=4 property=tx data=0x01.02.03.00 + +phy set lane-swap portlist=129 lane-cnt=1 property=tx data=0x00 +phy set lane-swap portlist=130 lane-cnt=1 property=tx data=0x01 + +phy set lane-swap portlist=0 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=1 lane-cnt=1 property=rx data=0x01 +phy set lane-swap portlist=2 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=3 lane-cnt=1 property=rx data=0x03 + +phy set lane-swap portlist=4 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=5 lane-cnt=1 property=rx data=0x03 +phy set lane-swap portlist=6 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=7 lane-cnt=1 property=rx data=0x01 + +phy set lane-swap portlist=8 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=9 lane-cnt=1 property=rx data=0x01 +phy set lane-swap portlist=10 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=11 lane-cnt=1 property=rx data=0x03 + +phy set lane-swap portlist=12 lane-cnt=1 property=rx data=0x03 +phy set lane-swap portlist=13 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=14 lane-cnt=1 property=rx data=0x01 +phy set lane-swap portlist=15 lane-cnt=1 property=rx data=0x00 + +phy set lane-swap portlist=16 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=17 lane-cnt=1 property=rx data=0x03 +phy set lane-swap portlist=18 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=19 lane-cnt=1 property=rx data=0x01 + +phy set lane-swap portlist=20 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=21 lane-cnt=1 property=rx data=0x03 +phy set lane-swap portlist=22 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=23 lane-cnt=1 property=rx data=0x01 + +phy set lane-swap portlist=24 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=25 lane-cnt=1 property=rx data=0x03 +phy set lane-swap portlist=26 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=27 lane-cnt=1 property=rx data=0x01 + +phy set lane-swap portlist=28 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=29 lane-cnt=1 property=rx data=0x03 +phy set lane-swap portlist=30 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=31 lane-cnt=1 property=rx data=0x01 + +phy set lane-swap portlist=32 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=33 lane-cnt=1 property=rx data=0x01 +phy set lane-swap portlist=34 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=35 lane-cnt=1 property=rx data=0x03 + +phy set lane-swap portlist=36 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=37 lane-cnt=1 property=rx data=0x01 +phy set lane-swap portlist=38 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=39 lane-cnt=1 property=rx data=0x03 + +phy set lane-swap portlist=40 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=41 lane-cnt=1 property=rx data=0x01 +phy set lane-swap portlist=42 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=43 lane-cnt=1 property=rx data=0x03 + +phy set lane-swap portlist=44 lane-cnt=1 property=rx data=0x02 +phy set lane-swap portlist=45 lane-cnt=1 property=rx data=0x01 +phy set lane-swap portlist=46 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=47 lane-cnt=1 property=rx data=0x03 + +phy set lane-swap portlist=48 lane-cnt=4 property=rx data=0x03.00.01.02 +phy set lane-swap portlist=49 lane-cnt=4 property=rx data=0x03.00.01.02 +phy set lane-swap portlist=50 lane-cnt=4 property=rx data=0x03.01.02.00 +phy set lane-swap portlist=51 lane-cnt=4 property=rx data=0x03.02.01.00 +phy set lane-swap portlist=52 lane-cnt=4 property=rx data=0x03.02.01.00 +phy set lane-swap portlist=53 lane-cnt=4 property=rx data=0x00.01.02.03 + +phy set lane-swap portlist=129 lane-cnt=1 property=rx data=0x00 +phy set lane-swap portlist=130 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=0 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=1 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=2 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=3 lane-cnt=1 property=tx data=0x01 + +phy set polarity-rev portlist=4 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=5 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=6 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=7 lane-cnt=1 property=tx data=0x00 + +phy set polarity-rev portlist=8 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=9 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=10 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=11 lane-cnt=1 property=tx data=0x01 + +phy set polarity-rev portlist=12 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=13 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=14 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=15 lane-cnt=1 property=tx data=0x00 + +phy set polarity-rev portlist=16 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=17 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=18 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=19 lane-cnt=1 property=tx data=0x01 + +phy set polarity-rev portlist=20 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=21 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=22 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=23 lane-cnt=1 property=tx data=0x00 + +phy set polarity-rev portlist=24 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=25 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=26 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=27 lane-cnt=1 property=tx data=0x00 + +phy set polarity-rev portlist=28 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=29 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=30 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=31 lane-cnt=1 property=tx data=0x00 + +phy set polarity-rev portlist=32 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=33 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=34 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=35 lane-cnt=1 property=tx data=0x01 + +phy set polarity-rev portlist=36 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=37 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=38 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=39 lane-cnt=1 property=tx data=0x01 + +phy set polarity-rev portlist=40 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=41 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=42 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=43 lane-cnt=1 property=tx data=0x01 + +phy set polarity-rev portlist=44 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=45 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=46 lane-cnt=1 property=tx data=0x01 +phy set polarity-rev portlist=47 lane-cnt=1 property=tx data=0x01 + +phy set polarity-rev portlist=48 lane-cnt=4 property=tx data=0x00.01.00.00 +phy set polarity-rev portlist=49 lane-cnt=4 property=tx data=0x00.00.01.00 +phy set polarity-rev portlist=50 lane-cnt=4 property=tx data=0x01.00.01.01 +phy set polarity-rev portlist=51 lane-cnt=4 property=tx data=0x01.01.01.01 +phy set polarity-rev portlist=52 lane-cnt=4 property=tx data=0x01.00.00.00 +phy set polarity-rev portlist=53 lane-cnt=4 property=tx data=0x00.00.01.00 + +phy set polarity-rev portlist=129 lane-cnt=1 property=tx data=0x00 +phy set polarity-rev portlist=130 lane-cnt=1 property=tx data=0x00 + +phy set polarity-rev portlist=0 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=1 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=2 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=3 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=4 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=5 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=6 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=7 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=8 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=9 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=10 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=11 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=12 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=13 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=14 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=15 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=16 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=17 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=18 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=19 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=20 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=21 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=22 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=23 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=24 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=25 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=26 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=27 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=28 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=29 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=30 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=31 lane-cnt=1 property=rx data=0x01 + +phy set polarity-rev portlist=32 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=33 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=34 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=35 lane-cnt=1 property=rx data=0x00 + +phy set polarity-rev portlist=36 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=37 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=38 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=39 lane-cnt=1 property=rx data=0x00 + +phy set polarity-rev portlist=40 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=41 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=42 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=43 lane-cnt=1 property=rx data=0x00 + +phy set polarity-rev portlist=44 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=45 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=46 lane-cnt=1 property=rx data=0x01 +phy set polarity-rev portlist=47 lane-cnt=1 property=rx data=0x00 + +phy set polarity-rev portlist=48 lane-cnt=4 property=rx data=0x00.01.00.00 +phy set polarity-rev portlist=49 lane-cnt=4 property=rx data=0x00.00.01.00 +phy set polarity-rev portlist=50 lane-cnt=4 property=rx data=0x00.00.01.01 +phy set polarity-rev portlist=51 lane-cnt=4 property=rx data=0x00.01.00.01 +phy set polarity-rev portlist=52 lane-cnt=4 property=rx data=0x00.01.00.01 +phy set polarity-rev portlist=53 lane-cnt=4 property=rx data=0x01.01.01.01 + +phy set polarity-rev portlist=129 lane-cnt=1 property=rx data=0x00 +phy set polarity-rev portlist=130 lane-cnt=1 property=rx data=0x00 + + +phy set pre-emphasis portlist=0 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=0 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=0 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=0 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=1 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=1 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=1 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=1 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=2 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=2 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=2 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=2 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=3 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=3 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=3 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=3 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=4 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=4 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=4 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=4 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=5 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=5 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=5 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=5 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=6 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=6 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=6 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=6 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=7 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=7 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=7 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=7 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=8 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=8 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=8 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=8 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=9 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=9 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=9 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=9 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=10 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=10 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=10 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=10 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=11 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=11 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=11 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=11 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=12 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=12 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=12 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=12 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=13 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=13 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=13 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=13 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=14 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=14 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=14 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=14 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=15 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=15 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=15 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=15 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=16 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=16 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=16 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=16 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=17 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=17 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=17 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=17 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=18 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=18 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=18 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=18 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=19 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=19 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=19 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=19 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=20 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=20 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=20 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=20 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=21 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=21 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=21 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=21 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=22 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=22 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=22 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=22 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=23 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=23 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=23 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=23 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=24 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=24 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=24 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=24 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=25 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=25 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=25 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=25 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=26 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=26 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=26 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=26 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=27 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=27 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=27 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=27 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=28 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=28 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=28 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=28 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=29 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=29 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=29 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=29 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=30 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=30 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=30 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=30 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=31 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=31 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=31 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=31 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=32 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=32 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=32 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=32 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=33 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=33 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=33 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=33 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=34 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=34 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=34 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=34 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=35 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=35 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=35 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=35 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=36 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=36 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=36 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=36 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=37 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=37 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=37 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=37 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=38 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=38 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=38 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=38 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=39 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=39 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=39 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=39 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=40 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=40 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=40 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=40 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=41 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=41 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=41 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=41 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=42 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=42 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=42 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=42 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=43 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=43 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=43 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=43 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=44 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=44 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=44 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=44 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=45 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=45 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=45 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=45 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=46 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=46 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=46 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=46 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=47 lane-cnt=1 property=c0 data=0x1d +phy set pre-emphasis portlist=47 lane-cnt=1 property=c1 data=0x7 +phy set pre-emphasis portlist=47 lane-cnt=1 property=cn1 data=0x0 +phy set pre-emphasis portlist=47 lane-cnt=1 property=c2 data=0x0 + +phy set pre-emphasis portlist=48 lane-cnt=4 property=c0 data=0x1d.1d.1d.1d +phy set pre-emphasis portlist=48 lane-cnt=4 property=c1 data=0x7.7.7.7 +phy set pre-emphasis portlist=48 lane-cnt=4 property=cn1 data=0x0.0.0.0 +phy set pre-emphasis portlist=48 lane-cnt=4 property=c2 data=0x0.0.0.0 + +phy set pre-emphasis portlist=49 lane-cnt=4 property=c0 data=0x1b.1d.1b.1d +phy set pre-emphasis portlist=49 lane-cnt=4 property=c1 data=0x9.7.9.7 +phy set pre-emphasis portlist=49 lane-cnt=4 property=cn1 data=0x0.0.0.0 +phy set pre-emphasis portlist=49 lane-cnt=4 property=c2 data=0x0.0.0.0 + +phy set pre-emphasis portlist=50 lane-cnt=4 property=c0 data=0x1d.1d.1d.1d +phy set pre-emphasis portlist=50 lane-cnt=4 property=c1 data=0x7.7.7.7 +phy set pre-emphasis portlist=50 lane-cnt=4 property=cn1 data=0x0.0.0.0 +phy set pre-emphasis portlist=50 lane-cnt=4 property=c2 data=0x0.0.0.0 + +phy set pre-emphasis portlist=51 lane-cnt=4 property=c0 data=0x1d.1d.1d.1d +phy set pre-emphasis portlist=51 lane-cnt=4 property=c1 data=0x7.7.7.7 +phy set pre-emphasis portlist=51 lane-cnt=4 property=cn1 data=0x0.0.0.0 +phy set pre-emphasis portlist=51 lane-cnt=4 property=c2 data=0x0.0.0.0 + +phy set pre-emphasis portlist=52 lane-cnt=4 property=c0 data=0x1d.1d.1d.1d +phy set pre-emphasis portlist=52 lane-cnt=4 property=c1 data=0x7.7.7.7 +phy set pre-emphasis portlist=52 lane-cnt=4 property=cn1 data=0x0.0.0.0 +phy set pre-emphasis portlist=52 lane-cnt=4 property=c2 data=0x0.0.0.0 + +phy set pre-emphasis portlist=53 lane-cnt=4 property=c0 data=0x1d.1d.1d.1d +phy set pre-emphasis portlist=53 lane-cnt=4 property=c1 data=0x7.7.7.7 +phy set pre-emphasis portlist=53 lane-cnt=4 property=cn1 data=0x0.0.0.0 +phy set pre-emphasis portlist=53 lane-cnt=4 property=c2 data=0x0.0.0.0 + +phy set pre-emphasis portlist=129 lane-cnt=1 property=c2 data=0x01 +phy set pre-emphasis portlist=129 lane-cnt=1 property=cn1 data=0x01 +phy set pre-emphasis portlist=129 lane-cnt=1 property=c1 data=0x03 +phy set pre-emphasis portlist=129 lane-cnt=1 property=c0 data=0x02 + +phy set pre-emphasis portlist=130 lane-cnt=1 property=c2 data=0x01 +phy set pre-emphasis portlist=130 lane-cnt=1 property=cn1 data=0x01 +phy set pre-emphasis portlist=130 lane-cnt=1 property=c1 data=0x03 +phy set pre-emphasis portlist=130 lane-cnt=1 property=c0 data=0x02 + +port set property portlist=0-47 speed=25g +port set property portlist=48-52,53 speed=100g +port set property portlist=129-130 speed=10g +port set property portlist=0-47 medium-type=sr +port set property portlist=48-52,53 medium-type=sr4 +port set property portlist=129-130 medium-type=kr +port set property portlist=0-53 fec=disable +port set adver portlist=129-130 speed-10g-kr +port set property portlist=129-130 an=enable +port set property portlist=0-53,129-130 admin=enable diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/installer.conf b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/installer.conf new file mode 100755 index 000000000000..925a32fc0c3a --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/installer.conf @@ -0,0 +1,3 @@ +CONSOLE_PORT=0x3f8 +CONSOLE_DEV=0 +CONSOLE_SPEED=115200 diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/eeprom.py b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/eeprom.py new file mode 100755 index 000000000000..6964c6bade4f --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/eeprom.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python + +try: + import exceptions + import binascii + import time + import optparse + import warnings + import os + import sys + from sonic_eeprom import eeprom_base + from sonic_eeprom import eeprom_tlvinfo + import subprocess +except ImportError, e: + raise ImportError (str(e) + "- required module not found") + +class board(eeprom_tlvinfo.TlvInfoDecoder): + _TLV_INFO_MAX_LEN = 256 + def __init__(self, name, path, cpld_root, ro): + self.eeprom_path = "/sys/bus/i2c/devices/4-0054/eeprom" + super(board, self).__init__(self.eeprom_path, 0, '', True) diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/psuutil.py b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/psuutil.py new file mode 100755 index 000000000000..a23a7b7fe73e --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/psuutil.py @@ -0,0 +1,92 @@ +# +# psuutil.py +# Platform-specific PSU status interface for SONiC +# + + +import os.path + +try: + from sonic_psu.psu_base import PsuBase +except ImportError as e: + raise ImportError(str(e) + "- required module not found") + + +class PsuUtil(PsuBase): + """Platform-specific PSUutil class""" + + SYSFS_PSU_DIR = "/sys/bus/i2c/devices/7-0075" + + def __init__(self): + PsuBase.__init__(self) + + + # Get sysfs attribute + def get_attr_value(self, attr_path): + + retval = 'ERR' + if (not os.path.isfile(attr_path)): + return retval + + try: + with open(attr_path, 'r') as fd: + retval = fd.read() + except Exception as error: + logging.error("Unable to open ", attr_path, " file !") + + retval = retval.rstrip('\r\n') + + fd.close() + return retval + + def get_num_psus(self): + """ + Retrieves the number of PSUs available on the device + :return: An integer, the number of PSUs available on the device + """ + MAX_PSUS = 2 + return MAX_PSUS + + def get_psu_status(self, index): + """ + Retrieves the oprational status of power supply unit (PSU) defined + by index + :param index: An integer, index of the PSU of which to query status + :return: Boolean, True if PSU is operating properly, False if PSU is\ + faulty + """ + status = 0 + attr_file = 'psu_'+str(index)+'_status' + attr_path = self.SYSFS_PSU_DIR +'/' + attr_file + + attr_value = self.get_attr_value(attr_path) + + if (attr_value != 'ERR'): + attr_value = int(attr_value, 16) + # Check for PSU status + if (attr_value == 1): + status = 1 + + return status + + def get_psu_presence(self, index): + """ + Retrieves the presence status of power supply unit (PSU) defined + by index + :param index: An integer, index of the PSU of which to query status + :return: Boolean, True if PSU is plugged, False if not + """ + status = 0 + attr_file = 'psu_'+str(index)+'_present' + attr_path = self.SYSFS_PSU_DIR +'/' + attr_file + + attr_value = self.get_attr_value(attr_path) + + if (attr_value != 'ERR'): + attr_value = int(attr_value, 16) + # Check for PSU presence + if (attr_value == 0): + status = 1 + + return status + diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/sfputil.py b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/sfputil.py new file mode 100755 index 000000000000..9238d0f06fde --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/plugins/sfputil.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python + +try: + import os + import re + import time + from sonic_sfp.sfputilbase import SfpUtilBase +except ImportError, e: + raise ImportError (str(e) + "- required module not found") + + +class SfpUtil(SfpUtilBase): + """Platform specific sfputil class""" + + port_start = 0 + port_end = 53 + ports_in_block = 54 + cplda_sfp_num = 24 + cpldb_sfp_num = 12 + cpldc_sfp_num = 18 + + port_to_eeprom_mapping = {} + port_to_i2c_mapping = {} + sfp_ports = range(0, ports_in_block) + qsfp_ports = range(ports_in_block - 6, ports_in_block) + + + def __init__(self): + for x in range(self.port_start, self.port_end + 1): + if x < self.cpldb_sfp_num: + self.port_to_i2c_mapping.update({x:7}) + elif x < self.cplda_sfp_num + self.cpldb_sfp_num: + self.port_to_i2c_mapping.update({x:6}) + else: + self.port_to_i2c_mapping.update({x:8}) + + for x in range(self.port_start, self.port_end+1): + eeprom_path = '/sys/bus/i2c/devices/{0}-0050/sfp'+str(x+1)+'_eeprom' + port_eeprom_path = eeprom_path.format(self.port_to_i2c_mapping[x]) + self.port_to_eeprom_mapping[x] = port_eeprom_path + SfpUtilBase.__init__(self) + + + def get_presence(self, port_num): + if port_num < self.port_start or port_num > self.port_end: + return False + + if port_num < self.cpldb_sfp_num: + presence_path = '/sys/bus/i2c/devices/7-0075/sfp'+str(port_num+1)+'_present' + elif port_num < self.cpldb_sfp_num + self.cplda_sfp_num: + presence_path = '/sys/bus/i2c/devices/6-0074/sfp'+str(port_num+1)+'_present' + else: + presence_path = '/sys/bus/i2c/devices/8-0076/sfp'+str(port_num+1)+'_present' + + try: + file = open(presence_path) + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + value = int(file.readline().rstrip()) + + file.close() + if value == 0: + return True + + return False + + def get_low_power_mode(self, port_num): + if port_num not in self.qsfp_ports: + return False + + lowpower_path = '/sys/bus/i2c/devices/8-0076/sfp'+str(port_num+1)+'_lowpower' + + try: + file = open(lowpower_path) + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + value = int(file.readline().rstrip()) + + file.close() + if value == 1: + return True + + return False + + def set_low_power_mode(self, port_num, lpmode): + if port_num not in self.qsfp_ports: + return False + + lowpower_path = '/sys/bus/i2c/devices/8-0076/sfp'+str(port_num+1)+'_lowpower' + + # LPMode is active high; set or clear the bit accordingly + if lpmode is True: + value = 1 + else: + value = 0 + + try: + file = open(lowpower_path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + file.seek(0) + file.write(str(value)) + file.close() + + return True + + def reset(self, port_num): + if port_num not in self.qsfp_ports: + return False + reset_path = '/sys/bus/i2c/devices/8-0076/sfp'+str(port_num+1)+'_reset' + + try: + file = open(reset_path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + file.seek(0) + file.write(str(2)) + file.close() + + # Sleep 1 second to allow it to settle + time.sleep(1) + + try: + file = open(reset_path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + file.seek(0) + file.write(str(1)) + file.close() + + return True + + def read_porttab_mappings(self, porttabfile): + logical = [] + logical_to_bcm = {} + logical_to_physical = {} + physical_to_logical = {} + last_fp_port_index = 0 + last_portname = "" + first = 1 + port_pos_in_file = 0 + parse_fmt_port_config_ini = False + + try: + f = open(porttabfile) + except: + raise + + parse_fmt_port_config_ini = (os.path.basename(porttabfile) == "port_config.ini") + + # Read the porttab file and generate dicts + # with mapping for future reference. + # XXX: move the porttab + # parsing stuff to a separate module, or reuse + # if something already exists + for line in f: + line.strip() + if re.search("^#", line) is not None: + continue + + # Parsing logic for 'port_config.ini' file + if (parse_fmt_port_config_ini): + # bcm_port is not explicitly listed in port_config.ini format + # Currently we assume ports are listed in numerical order according to bcm_port + # so we use the port's position in the file (zero-based) as bcm_port + portname = line.split()[0] + + bcm_port = str(port_pos_in_file) + + if len(line.split()) >= 4: + fp_port_index = int(line.split()[3]) + else: + fp_port_index = portname.split("Ethernet").pop() + fp_port_index = int(fp_port_index.split("s").pop(0))/4 + else: # Parsing logic for older 'portmap.ini' file + (portname, bcm_port) = line.split("=")[1].split(",")[:2] + + fp_port_index = portname.split("Ethernet").pop() + fp_port_index = int(fp_port_index.split("s").pop(0))/4 + + #Peter remove - 2018.04.13, this will cause can't show qsfp module when sfp_pot was set + #if ((len(self.sfp_ports) > 0) and (fp_port_index not in self.sfp_ports)): + #continue + + if first == 1: + # Initialize last_[physical|logical]_port + # to the first valid port + last_fp_port_index = fp_port_index + last_portname = portname + first = 0 + + logical.append(portname) + + logical_to_bcm[portname] = "xe" + bcm_port + logical_to_physical[portname] = [fp_port_index] + if physical_to_logical.get(fp_port_index) is None: + physical_to_logical[fp_port_index] = [portname] + else: + physical_to_logical[fp_port_index].append( + portname) + + if (fp_port_index - last_fp_port_index) > 1: + # last port was a gang port + for p in range(last_fp_port_index+1, fp_port_index): + logical_to_physical[last_portname].append(p) + if physical_to_logical.get(p) is None: + physical_to_logical[p] = [last_portname] + else: + physical_to_logical[p].append(last_portname) + + last_fp_port_index = fp_port_index + last_portname = portname + + port_pos_in_file += 1 + + self.logical = logical + self.logical_to_bcm = logical_to_bcm + self.logical_to_physical = logical_to_physical + self.physical_to_logical = physical_to_logical + + """ + print "logical: " + self.logical + print "logical to bcm: " + self.logical_to_bcm + print "logical to physical: " + self.logical_to_physical + print "physical to logical: " + self.physical_to_logical + """ + + def get_transceiver_change_event(self): + """ + TODO: This function need to be implemented + when decide to support monitoring SFP(Xcvrd) + on this platform. + """ + raise NotImplementedError + + + diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/tau-fn-6254-dn-f.cfg b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/tau-fn-6254-dn-f.cfg new file mode 100755 index 000000000000..bbd7c8f80ff5 --- /dev/null +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/tau-fn-6254-dn-f.cfg @@ -0,0 +1,23 @@ +#This configuration file is for customer init value feature. Please refer to mtk_cfg.h/mtk_cfg.c for detail. +#1. The lines beginning with # are comment lines. The lines beginning with number are the setting lines. +#2. There are five parameters which can be set. +# 1) the first is unit. +# 2) the second is NPS_CFG_TYPE_XXX. Refer to NPS_CFG_TYPE_T. +# 3) the 3-5 are {param0, param1, value} pairs. Refer to NPS_CFG_VALUE_T. Support HEX format. +# 4) the (unit, NPS_CFG_TYPE_XXX, param0, param1) group is the key to get the correspingding value. +# There should be no same (unit, NPS_CFG_TYPE_XXX, param0, param1) group. +#3. User must follow correct format to apply the setting. Please refer to below commentted example(#0 NPS_CFG_TYPE_L2_ADDR_MODE 0 0 1); +#4. Usage under the linux shell: +# 1) ./image-path/image-name -c cfg-path/NPS_Ari_EVB_24.cfg : mamually specify directory path if they are not in current work dirctory. +# 2) ./image-name -c NPS_Ari_EVB_24.cfg : the image and the NPS_Ari_EVB_24.cfg are in the current work directory. + +#unit NPS_CFG_TYPE_XXX param0 param1 value +#---- ---------------- ------ ------ ----- +0 NPS_CFG_TYPE_USE_UNIT_PORT 0 0 1 +0 NPS_CFG_TYPE_LED_CFG 0 0 3 +0 NPS_CFG_TYPE_CPI_PORT_MODE 129 0 1 +0 NPS_CFG_TYPE_CPI_PORT_MODE 130 0 1 +0 NPS_CFG_TYPE_USER_BUF_CTRL 0 0 1 +0 NPS_CFG_TYPE_HASH_L2_FDB_REGION_ENTRY_NUM 0 0 49152 +0 NPS_CFG_TYPE_HASH_L3_WITH_IPV6_PREFIX_64_REGION_ENTRY_NUM 0 0 32768 + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-x550-driver.patch b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-x550-driver.patch new file mode 100644 index 000000000000..cc9417d0d21d --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-x550-driver.patch @@ -0,0 +1,4648 @@ +From 80be203669d5cb1c5755e6195ab3d319547b4f55 Mon Sep 17 00:00:00 2001 +From: PeterLin +Date: Fri, 29 Mar 2019 09:22:35 +0800 +Subject: [PATCH] update Intel ixgbe x550 driver + +--- + drivers/net/ethernet/intel/ixgbe/ixgbe.h | 10 + + drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 28 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 15 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 439 ++++-- + drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 7 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 103 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 75 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 407 +++--- + drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 27 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 153 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 20 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 1668 +++++++++++++++++----- + 12 files changed, 2272 insertions(+), 680 deletions(-) + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h +index b06e32d..255ec3b 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h +@@ -89,6 +89,7 @@ + + /* Supported Rx Buffer Sizes */ + #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ ++#define IXGBE_RXBUFFER_1536 1536 + #define IXGBE_RXBUFFER_2K 2048 + #define IXGBE_RXBUFFER_3K 3072 + #define IXGBE_RXBUFFER_4K 4096 +@@ -661,6 +662,9 @@ struct ixgbe_adapter { + #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) + #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) + #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) ++#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) ++#define IXGBE_FLAG2_EEE_ENABLED BIT(15) ++#define IXGBE_FLAG2_RX_LEGACY BIT(16) + + /* Tx fast path data */ + int num_tx_queues; +@@ -861,7 +865,9 @@ enum ixgbe_boards { + board_X540, + board_X550, + board_X550EM_x, ++ board_x550em_x_fw, + board_x550em_a, ++ board_x550em_a_fw, + }; + + extern const struct ixgbe_info ixgbe_82598_info; +@@ -869,7 +875,9 @@ extern const struct ixgbe_info ixgbe_82599_info; + extern const struct ixgbe_info ixgbe_X540_info; + extern const struct ixgbe_info ixgbe_X550_info; + extern const struct ixgbe_info ixgbe_X550EM_x_info; ++extern const struct ixgbe_info ixgbe_x550em_x_fw_info; + extern const struct ixgbe_info ixgbe_x550em_a_info; ++extern const struct ixgbe_info ixgbe_x550em_a_fw_info; + #ifdef CONFIG_IXGBE_DCB + extern const struct dcbnl_rtnl_ops dcbnl_ops; + #endif +@@ -1027,4 +1035,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_ring *tx_ring); + u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); + void ixgbe_store_reta(struct ixgbe_adapter *adapter); ++s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, ++ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); + #endif /* _IXGBE_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +index fb51be7..8a32eb7 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +@@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) + case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.check_link = &ixgbe_check_phy_link_tnx; +- phy->ops.get_firmware_version = +- &ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = &ixgbe_reset_phy_nl; +@@ -177,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) + **/ + static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) + { +-#ifndef CONFIG_SPARC +- u32 regval; +- u32 i; +-#endif + s32 ret_val; + + ret_val = ixgbe_start_hw_generic(hw); +- +-#ifndef CONFIG_SPARC +- /* Disable relaxed ordering */ +- for (i = 0; ((i < hw->mac.max_tx_queues) && +- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { +- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); +- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; +- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); +- } +- +- for (i = 0; ((i < hw->mac.max_rx_queues) && +- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { +- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | +- IXGBE_DCA_RXCTRL_HEAD_WRO_EN); +- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); +- } +-#endif + if (ret_val) + return ret_val; + +@@ -367,7 +343,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) + } + + /* Negotiate the fc mode to use */ +- ixgbe_fc_autoneg(hw); ++ hw->mac.ops.fc_autoneg(hw); + + /* Disable any previous flow control settings */ + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); +@@ -1179,6 +1155,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { + .get_link_capabilities = &ixgbe_get_link_capabilities_82598, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, +@@ -1193,6 +1170,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { + .set_vfta = &ixgbe_set_vfta_82598, + .fc_enable = &ixgbe_fc_enable_82598, + .setup_fc = ixgbe_setup_fc_generic, ++ .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = NULL, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +index 63b2500..d602637 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +@@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) + case ixgbe_phy_tn: + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; +- phy->ops.get_firmware_version = +- &ixgbe_get_phy_firmware_version_tnx; + break; + default: + break; +@@ -1451,7 +1449,7 @@ do { \ + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * +- * This function serves two main purposes. First it applys the input_mask ++ * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for +@@ -1591,15 +1589,17 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + + switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: +- /* mask VLAN ID, fall through to mask VLAN priority */ ++ /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; ++ /* fall through */ + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: +- /* mask VLAN ID only, fall through */ ++ /* mask VLAN ID only */ + fdirm |= IXGBE_FDIRM_VLANID; ++ /* fall through */ + case 0xEFFF: + /* no VLAN fields masked */ + break; +@@ -1610,8 +1610,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: +- /* Mask Flex Bytes, fall through */ ++ /* Mask Flex Bytes */ + fdirm |= IXGBE_FDIRM_FLEX; ++ /* fall through */ + case 0xFFFF: + break; + default: +@@ -2204,6 +2205,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { + .get_link_capabilities = &ixgbe_get_link_capabilities_82599, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, +@@ -2219,6 +2221,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, ++ .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = &ixgbe_setup_sfp_modules_82599, +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index ad33622..fd055cc 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -79,16 +79,28 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: +- hw->mac.ops.check_link(hw, &speed, &link_up, false); +- /* if link is down, assume supported */ +- if (link_up) +- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? ++ /* flow control autoneg black list */ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_SFP: ++ case IXGBE_DEV_ID_X550EM_A_SFP_N: ++ supported = false; ++ break; ++ default: ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ /* if link is down, assume supported */ ++ if (link_up) ++ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; +- else +- supported = true; ++ else ++ supported = true; ++ } ++ + break; + case ixgbe_media_type_backplane: +- supported = true; ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) ++ supported = false; ++ else ++ supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ +@@ -100,6 +112,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_X550T1: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: + supported = true; + break; + default: +@@ -109,6 +123,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + break; + } + ++ if (!supported) ++ hw_dbg(hw, "Device %x does not support flow control autoneg\n", ++ hw->device_id); ++ + return supported; + } + +@@ -153,7 +171,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) + if (ret_val) + return ret_val; + +- /* only backplane uses autoc so fall though */ ++ /* fall through - only backplane uses autoc */ + case ixgbe_media_type_fiber: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + +@@ -279,6 +297,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + s32 ret_val; + u32 ctrl_ext; + u16 device_caps; ++#if 1 //by hilbert ++ s32 rc; ++ u16 regVal=0; ++#endif + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); +@@ -298,10 +320,12 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + +- /* Setup flow control */ +- ret_val = hw->mac.ops.setup_fc(hw); +- if (ret_val) +- return ret_val; ++ /* Setup flow control if method for doing so */ ++ if (hw->mac.ops.setup_fc) { ++ ret_val = hw->mac.ops.setup_fc(hw); ++ if (ret_val) ++ return ret_val; ++ } + + /* Cashe bit indicating need for crosstalk fix */ + switch (hw->mac.type) { +@@ -322,6 +346,67 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + ++#if 1 /* To modify speed LED polarity and configure led on only for speed 1G in M88E1512 ++ * for Porsche2 platform. By hilbert ++ * From 88E1512 datasheet: ++ * Page register: 0x16 ++ * LED functon control register: 0x10 in page 3 ++ * LED polarity control register: 0x11 in page 3 ++ */ ++ ++ if (hw->mac.type == ixgbe_mac_x550em_a && ++ (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { ++ /* For M88E1512, to select page 3 in register 0x16 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++#if 0 //for debug ++ /* For M88E1512, read from register 0x16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x16, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "phy register read failed, rc:%x\n", rc); ++ } ++ hw_err(hw, "####read phy register 0x16 again, value:%x\n", regVal); ++#endif ++ /* For M88E1512, read from page 3, register 0x11 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x11, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led polarity register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 0x11 with polarity bit set */ ++ regVal |= 0x01; ++ rc = hw->phy.ops.write_reg(hw, 0x11, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led polarity register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with only 1000M led on */ ++ regVal = (regVal & 0xFFF0) | 0x0007; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } ++#endif + return 0; + } + +@@ -346,25 +431,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) + } + IXGBE_WRITE_FLUSH(hw); + +-#ifndef CONFIG_SPARC +- /* Disable relaxed ordering */ +- for (i = 0; i < hw->mac.max_tx_queues; i++) { +- u32 regval; +- +- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); +- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; +- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); +- } +- +- for (i = 0; i < hw->mac.max_rx_queues; i++) { +- u32 regval; +- +- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | +- IXGBE_DCA_RXCTRL_HEAD_WRO_EN); +- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); +- } +-#endif + return 0; + } + +@@ -390,6 +456,10 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) + status = hw->mac.ops.start_hw(hw); + } + ++ /* Initialize the LED link active for LED blink support */ ++ if (hw->mac.ops.init_led_link_act) ++ hw->mac.ops.init_led_link_act(hw); ++ + return status; + } + +@@ -773,22 +843,100 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) + } + + /** ++ * ixgbe_init_led_link_act_generic - Store the LED index link/activity. ++ * @hw: pointer to hardware structure ++ * ++ * Store the index for the link active LED. This will be used to support ++ * blinking the LED. ++ **/ ++s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 led_reg, led_mode; ++ u16 i; ++ ++ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ ++ /* Get LED link active from the LEDCTL register */ ++ for (i = 0; i < 4; i++) { ++ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); ++ ++ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == ++ IXGBE_LED_LINK_ACTIVE) { ++ mac->led_link_act = i; ++ return 0; ++ } ++ } ++ ++ /* If LEDCTL register does not have the LED link active set, then use ++ * known MAC defaults. ++ */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_x550em_a: ++ mac->led_link_act = 0; ++ break; ++ case ixgbe_mac_X550EM_x: ++ mac->led_link_act = 1; ++ break; ++ default: ++ mac->led_link_act = 2; ++ } ++ ++ return 0; ++} ++ ++/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ + s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) + { +- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +- +- if (index > 3) +- return IXGBE_ERR_PARAM; +- +- /* To turn on the LED, set mode to ON. */ +- led_reg &= ~IXGBE_LED_MODE_MASK(index); +- led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); +- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +- IXGBE_WRITE_FLUSH(hw); ++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ s32 rc; ++ u16 regVal; ++ ++ /* following led behavior was modified by hilbert, ++ * to force led on through C22 MDI command. ++ */ ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0099; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } else { ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ /* To turn on the LED, set mode to ON. */ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ } + + return 0; + } +@@ -801,15 +949,50 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) + s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) + { + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +- +- if (index > 3) +- return IXGBE_ERR_PARAM; +- +- /* To turn off the LED, set mode to OFF. */ +- led_reg &= ~IXGBE_LED_MODE_MASK(index); +- led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); +- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +- IXGBE_WRITE_FLUSH(hw); ++ s32 rc; ++ u16 regVal; ++ ++ /* following led behavior was modified by hilbert, ++ * to force led on through C22 MDI command. ++ */ ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0088; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } else { ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ /* To turn off the LED, set mode to OFF. */ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ } + + return 0; + } +@@ -2127,7 +2310,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + } + + /* Negotiate the fc mode to use */ +- ixgbe_fc_autoneg(hw); ++ hw->mac.ops.fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); +@@ -2231,8 +2414,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, +- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) ++s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, ++ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) + { + if ((!(adv_reg)) || (!(lp_reg))) + return IXGBE_ERR_FC_NOT_NEGOTIATED; +@@ -3334,6 +3517,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + else + *speed = IXGBE_LINK_SPEED_100_FULL; + break; ++ case IXGBE_LINKS_SPEED_10_X550EM_A: ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || ++ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { ++ *speed = IXGBE_LINK_SPEED_10_FULL; ++ } ++ break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } +@@ -3491,7 +3681,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); +- /* Fall through to configure remaining packet buffers */ ++ /* fall through - configure remaining packet buffers */ + case (PBA_STRATEGY_EQUAL): + /* Divide the remaining Rx packet buffer evenly among the TCs */ + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; +@@ -3530,7 +3720,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) ++u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) + { + u32 i; + u8 sum = 0; +@@ -3545,43 +3735,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) + } + + /** +- * ixgbe_host_interface_command - Issue command to manageability block ++ * ixgbe_hic_unlocked - Issue command to manageability block unlocked + * @hw: pointer to the HW structure +- * @buffer: contains the command to write and where the return status will +- * be placed ++ * @buffer: command to write and where the return status will be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion +- * @return_data: read and return data from the buffer (true) or not (false) +- * Needed because FW structures are big endian and decoding of +- * these fields can be 8 bit or 16 bit based on command. Decoding +- * is not easily understood without making a table of commands. +- * So we will leave this up to the caller to read back the data +- * in these cases. + * +- * Communicates with the manageability block. On success return 0 +- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. ++ * Communicates with the manageability block. On success return 0 ++ * else returns semaphore error when encountering an error acquiring ++ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. ++ * ++ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held ++ * by the caller. + **/ +-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, +- u32 length, u32 timeout, +- bool return_data) ++s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, ++ u32 timeout) + { +- u32 hdr_size = sizeof(struct ixgbe_hic_hdr); +- u32 hicr, i, bi, fwsts; +- u16 buf_len, dword_len; +- union { +- struct ixgbe_hic_hdr hdr; +- u32 u32arr[1]; +- } *bp = buffer; +- s32 status; ++ u32 hicr, i, fwsts; ++ u16 dword_len; + + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } +- /* Take management host interface semaphore */ +- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); +- if (status) +- return status; + + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); +@@ -3591,15 +3767,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_EN)) { + hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); +- status = IXGBE_ERR_HOST_INTERFACE_COMMAND; +- goto rel_out; ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if (length % sizeof(u32)) { + hw_dbg(hw, "Buffer length failure, not aligned to dword"); +- status = IXGBE_ERR_INVALID_ARGUMENT; +- goto rel_out; ++ return IXGBE_ERR_INVALID_ARGUMENT; + } + + dword_len = length >> 2; +@@ -3609,7 +3783,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, +- i, cpu_to_le32(bp->u32arr[i])); ++ i, cpu_to_le32(buffer[i])); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); +@@ -3623,11 +3797,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + + /* Check command successful completion. */ + if ((timeout && i == timeout) || +- !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { +- hw_dbg(hw, "Command has failed with no status valid.\n"); +- status = IXGBE_ERR_HOST_INTERFACE_COMMAND; +- goto rel_out; ++ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_host_interface_command - Issue command to manageability block ++ * @hw: pointer to the HW structure ++ * @buffer: contains the command to write and where the return status will ++ * be placed ++ * @length: length of buffer, must be multiple of 4 bytes ++ * @timeout: time in ms to wait for command completion ++ * @return_data: read and return data from the buffer (true) or not (false) ++ * Needed because FW structures are big endian and decoding of ++ * these fields can be 8 bit or 16 bit based on command. Decoding ++ * is not easily understood without making a table of commands. ++ * So we will leave this up to the caller to read back the data ++ * in these cases. ++ * ++ * Communicates with the manageability block. On success return 0 ++ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. ++ **/ ++s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, ++ u32 length, u32 timeout, ++ bool return_data) ++{ ++ u32 hdr_size = sizeof(struct ixgbe_hic_hdr); ++ union { ++ struct ixgbe_hic_hdr hdr; ++ u32 u32arr[1]; ++ } *bp = buffer; ++ u16 buf_len, dword_len; ++ s32 status; ++ u32 bi; ++ ++ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { ++ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } ++ /* Take management host interface semaphore */ ++ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); ++ if (status) ++ return status; ++ ++ status = ixgbe_hic_unlocked(hw, buffer, length, timeout); ++ if (status) ++ goto rel_out; + + if (!return_data) + goto rel_out; +@@ -3674,6 +3891,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number ++ * @len: length of driver_ver string ++ * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 +@@ -3681,7 +3900,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ + s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +- u8 build, u8 sub) ++ u8 build, u8 sub, __always_unused u16 len, ++ __always_unused const char *driver_ver) + { + struct ixgbe_hic_drv_info fw_cmd; + int i; +@@ -4033,15 +4253,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + +- /* If we already have link at this speed, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +- false); +- if (status) +- return status; +- +- if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up) +- goto out; +- + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: +@@ -4093,15 +4304,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + +- /* If we already have link at this speed, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +- false); +- if (status) +- return status; +- +- if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up) +- goto out; +- + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: +@@ -4208,4 +4410,23 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); + return; + } ++ ++ /* Set RS1 */ ++ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, ++ IXGBE_I2C_EEPROM_DEV_ADDR2, ++ &eeprom_data); ++ if (status) { ++ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); ++ return; ++ } ++ ++ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; ++ ++ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, ++ IXGBE_I2C_EEPROM_DEV_ADDR2, ++ eeprom_data); ++ if (status) { ++ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); ++ return; ++ } + } +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +index 6d4c260..e083732 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +@@ -49,6 +49,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + + s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); + s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); + + s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); + s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +@@ -110,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); + void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); + s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); + s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +- u8 build, u8 ver); ++ u8 build, u8 ver, u16 len, const char *str); ++u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); + s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, + u32 timeout, bool return_data); ++s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); ++s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, ++ u32 (*data)[FW_PHY_ACT_DATA_COUNT]); + void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); + bool ixgbe_mng_present(struct ixgbe_hw *hw); + bool ixgbe_mng_enabled(struct ixgbe_hw *hw); +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +index a137e06..6b23b74 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +@@ -172,6 +172,7 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_X_XFI: + return SUPPORTED_10000baseKR_Full; + default: + return SUPPORTED_10000baseKX4_Full | +@@ -237,6 +238,7 @@ static int ixgbe_get_settings(struct net_device *netdev, + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_x550em_ext_t: ++ case ixgbe_phy_fw: + case ixgbe_phy_cu_unknown: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; +@@ -394,6 +396,9 @@ static int ixgbe_set_settings(struct net_device *netdev, + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + ++ if (ecmd->advertising & ADVERTISED_10baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_10_FULL; ++ + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ +@@ -491,6 +496,59 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; ++ ++ /* 2018/11/14 pega-julia modified start */ ++ /* Purpose : Add for light OOB LED static. */ ++ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u16 regVal; ++ s32 rc; ++ ++ /* For M88E1512, write 3 in (page 0,register 22)[Page Address Register] to goto page 3 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ ++ /* For M88E1512, read from (page 3, register 16)[LED Function Control Register] */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ /*hw_err(hw, "[Pega Debug] : current register value = 0x%x\n", regVal);*/ ++ if (rc) ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ ++ if (data == 0) /* Turn off OOB LED. */ ++ { ++ /* For M88E1512, write to (page 3, register 16) with force led off */ ++ regVal = (regVal & 0xFF00) | 0x0088; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ else if (data == 1) /* Turn on OOB LED. */ ++ { ++ /* For M88E1512, write to (page 3, register 16) with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0099; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ else /* Switch OOB LED back to normal. */ ++ { ++ /* For M88E1512, set led back to nornmal in (page 3, register 16). */ ++ regVal = (regVal & 0xFF00) | 0x0017; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write 0 in (page 0, register 22) to back to page 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ ++ /* 2018/11/14 pega-julia modified end */ + } + + static int ixgbe_get_regs_len(struct net_device *netdev) +@@ -2219,22 +2277,61 @@ static int ixgbe_set_phys_id(struct net_device *netdev, + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + ++ /* Modified by hilbert for C22 MDI directly access */ ++ s32 rc; ++ u16 regVal; ++ /* Modified by hilbert done */ ++ ++ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) ++ return -EOPNOTSUPP; ++ + switch (state) { + case ETHTOOL_ID_ACTIVE: + adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + return 2; + + case ETHTOOL_ID_ON: +- hw->mac.ops.led_on(hw, hw->bus.func); ++ hw->mac.ops.led_on(hw, hw->mac.led_link_act); + break; + + case ETHTOOL_ID_OFF: +- hw->mac.ops.led_off(hw, hw->bus.func); ++ hw->mac.ops.led_off(hw, hw->mac.led_link_act); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); ++ /* Modified by hilbert for C22 MDI directly access */ ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0017; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } else { ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); ++ } + break; + } + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index a5428b6..d6d3a78 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -84,7 +84,9 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { + [board_X540] = &ixgbe_X540_info, + [board_X550] = &ixgbe_X550_info, + [board_X550EM_x] = &ixgbe_X550EM_x_info, ++ [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, + [board_x550em_a] = &ixgbe_x550em_a_info, ++ [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, + }; + + /* ixgbe_pci_tbl - PCI Device ID Table +@@ -129,9 +131,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, +@@ -139,6 +143,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, + /* required last entry */ + {0, } + }; +@@ -179,6 +185,7 @@ MODULE_VERSION(DRV_VERSION); + static struct workqueue_struct *ixgbe_wq; + + static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); ++static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); + + static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, + u32 reg, u16 *value) +@@ -374,7 +381,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) + if (ixgbe_removed(reg_addr)) + return IXGBE_FAILED_READ_REG; + if (unlikely(hw->phy.nw_mng_if_sel & +- IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { ++ IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { + struct ixgbe_adapter *adapter; + int i; + +@@ -2446,6 +2453,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; ++ s32 rc; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; +@@ -2484,6 +2492,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) + return; + + break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ rc = hw->phy.ops.check_overtemp(hw); ++ if (rc != IXGBE_ERR_OVERTEMP) ++ return; ++ break; + default: + if (adapter->hw.mac.type >= ixgbe_mac_X540) + return; +@@ -2530,6 +2544,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) + return; + } + return; ++ case ixgbe_mac_x550em_a: ++ if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { ++ adapter->interrupt_event = eicr; ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; ++ ixgbe_service_event_schedule(adapter); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ++ IXGBE_EICR_GPI_SDP0_X550EM_a); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, ++ IXGBE_EICR_GPI_SDP0_X550EM_a); ++ } ++ return; ++ case ixgbe_mac_X550: + case ixgbe_mac_X540: + if (!(eicr & IXGBE_EICR_TS)) + return; +@@ -5035,7 +5061,7 @@ static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) + static void ixgbe_configure(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; +- ++ + ixgbe_configure_pb(adapter); + #ifdef CONFIG_IXGBE_DCB + ixgbe_configure_dcb(adapter); +@@ -5045,10 +5071,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter); +- + ixgbe_set_rx_mode(adapter->netdev); + ixgbe_restore_vlan(adapter); +- ++ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: +@@ -5075,7 +5100,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) + default: + break; + } +- + #ifdef CONFIG_IXGBE_DCA + /* configure DCA */ + if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) +@@ -5291,6 +5315,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); ++ if (adapter->hw.phy.type == ixgbe_phy_fw) ++ ixgbe_watchdog_link_is_down(adapter); + ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter +@@ -5706,6 +5732,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) + break; + case ixgbe_mac_x550em_a: + adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; ++ break; ++ default: ++ break; ++ } + /* fall through */ + case ixgbe_mac_X550EM_x: + #ifdef CONFIG_IXGBE_DCB +@@ -5719,6 +5753,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) + #endif /* IXGBE_FCOE */ + /* Fall Through */ + case ixgbe_mac_X550: ++ if (hw->mac.type == ixgbe_mac_X550) ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + #ifdef CONFIG_IXGBE_DCA + adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; + #endif +@@ -6093,29 +6129,28 @@ int ixgbe_open(struct net_device *netdev) + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err, queues; +- ++ + /* disallow open during test */ + if (test_bit(__IXGBE_TESTING, &adapter->state)) + return -EBUSY; +- ++ + netif_carrier_off(netdev); +- ++ + /* allocate transmit descriptors */ + err = ixgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; +- ++ + /* allocate receive descriptors */ + err = ixgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; +- ++ + ixgbe_configure(adapter); +- +- err = ixgbe_request_irq(adapter); ++ err = ixgbe_request_irq(adapter); + if (err) + goto err_req_irq; +- ++ + /* Notify the stack of the actual queue counts. */ + if (adapter->num_rx_pools > 1) + queues = adapter->num_rx_queues_per_pool; +@@ -6791,6 +6826,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) + case IXGBE_LINK_SPEED_100_FULL: + speed_str = "100 Mbps"; + break; ++ case IXGBE_LINK_SPEED_10_FULL: ++ speed_str = "10 Mbps"; ++ break; + default: + speed_str = "unknown speed"; + break; +@@ -8013,6 +8051,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) + return ixgbe_ptp_set_ts_config(adapter, req); + case SIOCGHWTSTAMP: + return ixgbe_ptp_get_ts_config(adapter, req); ++ case SIOCGMIIPHY: ++ if (!adapter->hw.phy.ops.read_reg) ++ return -EOPNOTSUPP; ++ /* fall through */ + default: + return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); + } +@@ -9480,6 +9522,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + hw->mac.ops = *ii->mac_ops; + hw->mac.type = ii->mac; + hw->mvals = ii->mvals; ++ if (ii->link_ops) ++ hw->link.ops = *ii->link_ops; + + /* EEPROM */ + hw->eeprom.ops = *ii->eeprom_ops; +@@ -9777,8 +9821,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + * since os does not support feature + */ + if (hw->mac.ops.set_fw_drv_ver) +- hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, +- 0xFF); ++ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, ++ sizeof(ixgbe_driver_version) - 1, ++ ixgbe_driver_version); + + /* add san mac addr to netdev */ + ixgbe_add_sanmac_netdev(netdev); +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +index b17464e..d914b40 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +@@ -109,8 +109,8 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) + * + * Returns an error code on error. + */ +-static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val, bool lock) ++s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 *val, bool lock) + { + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 3; +@@ -178,36 +178,6 @@ static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + } + + /** +- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation +- * @hw: pointer to the hardware structure +- * @addr: I2C bus address to read from +- * @reg: I2C device register to read from +- * @val: pointer to location to receive read value +- * +- * Returns an error code on error. +- */ +-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val) +-{ +- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +-} +- +-/** +- * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined +- * @hw: pointer to the hardware structure +- * @addr: I2C bus address to read from +- * @reg: I2C device register to read from +- * @val: pointer to location to receive read value +- * +- * Returns an error code on error. +- */ +-s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val) +-{ +- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +-} +- +-/** + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to +@@ -217,8 +187,8 @@ s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + * + * Returns an error code on error. + */ +-static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 val, bool lock) ++s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 val, bool lock) + { + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 1; +@@ -273,33 +243,41 @@ static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + } + + /** +- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation +- * @hw: pointer to the hardware structure +- * @addr: I2C bus address to write to +- * @reg: I2C device register to write to +- * @val: value to write ++ * ixgbe_probe_phy - Probe a single address for a PHY ++ * @hw: pointer to hardware structure ++ * @phy_addr: PHY address to probe + * +- * Returns an error code on error. +- */ +-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, +- u8 addr, u16 reg, u16 val) ++ * Returns true if PHY found ++ **/ ++static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) + { +- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +-} ++ u16 ext_ability = 0; + +-/** +- * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined +- * @hw: pointer to the hardware structure +- * @addr: I2C bus address to write to +- * @reg: I2C device register to write to +- * @val: value to write +- * +- * Returns an error code on error. +- */ +-s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, +- u8 addr, u16 reg, u16 val) +-{ +- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); ++ hw->phy.mdio.prtad = phy_addr; ++ if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0) { ++ return false; ++ } ++ ++ if (ixgbe_get_phy_id(hw)) { ++ return false; ++ } ++ ++ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ hw->phy.ops.read_reg(hw, ++ MDIO_PMA_EXTABLE, ++ MDIO_MMD_PMAPMD, ++ &ext_ability); ++ if (ext_ability & ++ (MDIO_PMA_EXTABLE_10GBT | ++ MDIO_PMA_EXTABLE_1000BT)) ++ hw->phy.type = ixgbe_phy_cu_unknown; ++ else ++ hw->phy.type = ixgbe_phy_generic; ++ } ++ ++ return true; + } + + /** +@@ -311,7 +289,7 @@ s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) + { + u32 phy_addr; +- u16 ext_ability = 0; ++ u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + + if (!hw->phy.phy_semaphore_mask) { + if (hw->bus.lan_id) +@@ -320,37 +298,34 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + +- if (hw->phy.type == ixgbe_phy_unknown) { +- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { +- hw->phy.mdio.prtad = phy_addr; +- if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { +- ixgbe_get_phy_id(hw); +- hw->phy.type = +- ixgbe_get_phy_type_from_id(hw->phy.id); +- +- if (hw->phy.type == ixgbe_phy_unknown) { +- hw->phy.ops.read_reg(hw, +- MDIO_PMA_EXTABLE, +- MDIO_MMD_PMAPMD, +- &ext_ability); +- if (ext_ability & +- (MDIO_PMA_EXTABLE_10GBT | +- MDIO_PMA_EXTABLE_1000BT)) +- hw->phy.type = +- ixgbe_phy_cu_unknown; +- else +- hw->phy.type = +- ixgbe_phy_generic; +- } ++ if (hw->phy.type != ixgbe_phy_unknown) ++ return 0; + +- return 0; +- } ++ if (hw->phy.nw_mng_if_sel) { ++ phy_addr = (hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> ++ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; ++ if (ixgbe_probe_phy(hw, phy_addr)) ++ return 0; ++ else ++ return IXGBE_ERR_PHY_ADDR_INVALID; ++ } ++ ++ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { ++ if (ixgbe_probe_phy(hw, phy_addr)) { ++ status = 0; ++ break; + } +- /* indicate no PHY found */ +- hw->phy.mdio.prtad = MDIO_PRTAD_NONE; +- return IXGBE_ERR_PHY_ADDR_INVALID; + } +- return 0; ++ ++ /* Certain media types do not have a phy so an address will not ++ * be found and the code will take this path. Caller has to ++ * decide if it is an error or not. ++ */ ++ if (status) ++ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; ++ ++ return status; + } + + /** +@@ -416,7 +391,8 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; +- case X550_PHY_ID: ++ case X550_PHY_ID2: ++ case X550_PHY_ID3: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; +@@ -427,6 +403,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + phy_type = ixgbe_phy_nl; + break; + case X557_PHY_ID: ++ case X557_PHY_ID2: + phy_type = ixgbe_phy_x550em_ext_t; + break; + default: +@@ -477,11 +454,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) + */ + for (i = 0; i < 30; i++) { + msleep(100); +- hw->phy.ops.read_reg(hw, MDIO_CTRL1, +- MDIO_MMD_PHYXS, &ctrl); +- if (!(ctrl & MDIO_CTRL1_RESET)) { +- udelay(2); +- break; ++ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_TX_VENDOR_ALARMS_3, ++ MDIO_MMD_PMAPMD, &ctrl); ++ if (status) ++ return status; ++ ++ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { ++ udelay(2); ++ break; ++ } ++ } else { ++ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, ++ MDIO_MMD_PHYXS, &ctrl); ++ if (status) ++ return status; ++ ++ if (!(ctrl & MDIO_CTRL1_RESET)) { ++ udelay(2); ++ break; ++ } + } + } + +@@ -494,6 +487,98 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) + } + + /** ++ * ixgbe_read_phy_mdio - Reads a value from a specified PHY register without ++ * the SWFW lock. This Clasue 22 API is patched by Hilbert ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit address of PHY register to read ++ * @phy_data: Pointer to read data from PHY register ++ **/ ++s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 *phy_data) ++{ ++ u32 i, data, command; ++ ++ /* Setup and write the read command */ ++ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | ++ IXGBE_MSCA_MDI_COMMAND; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* Check every 10 usec to see if the address cycle completed. ++ * The MDI Command bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ udelay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) ++ break; ++ } ++ ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ++ hw_dbg(hw, "PHY address command did not complete.\n"); ++ return IXGBE_ERR_PHY; ++ } ++ ++ /* Read operation is complete. Get the data ++ * from MSRWD ++ */ ++ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); ++ data >>= IXGBE_MSRWD_READ_DATA_SHIFT; ++ *phy_data = (u16)(data); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_write_phy_reg_mdio - Writes a value to specified PHY register ++ * without SWFW lock. This Clause 22 API is patched by Hilbert ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: 5 bit device type ++ * @phy_data: Data to write to the PHY register ++ **/ ++s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data) ++{ ++ u32 i, command; ++ ++ /* Put the data in the MDI single read and write data register*/ ++ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); ++ ++ /* Setup and write the write command */ ++ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | ++ IXGBE_MSCA_MDI_COMMAND; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* ++ * Check every 10 usec to see if the address cycle completed. ++ * The MDI Command bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ udelay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) ++ break; ++ } ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ++ hw_dbg(hw, "PHY write cmd didn't complete\n"); ++ return IXGBE_ERR_PHY; ++ } ++ ++ return 0; ++} ++/** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure +@@ -705,53 +790,52 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + +- if (speed & IXGBE_LINK_SPEED_10GB_FULL) { +- /* Set or unset auto-negotiation 10G advertisement */ +- hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, +- MDIO_MMD_AN, +- &autoneg_reg); ++ /* Set or unset auto-negotiation 10G advertisement */ ++ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); + +- autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +- autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; ++ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_10GB_FULL)) ++ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + +- hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, +- MDIO_MMD_AN, +- autoneg_reg); +- } +- +- if (speed & IXGBE_LINK_SPEED_1GB_FULL) { +- /* Set or unset auto-negotiation 1G advertisement */ +- hw->phy.ops.read_reg(hw, +- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +- MDIO_MMD_AN, +- &autoneg_reg); ++ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); + +- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; ++ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ MDIO_MMD_AN, &autoneg_reg); + +- hw->phy.ops.write_reg(hw, +- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +- MDIO_MMD_AN, +- autoneg_reg); ++ if (hw->mac.type == ixgbe_mac_X550) { ++ /* Set or unset auto-negotiation 5G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_5GB_FULL)) ++ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; ++ ++ /* Set or unset auto-negotiation 2.5G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & ++ IXGBE_LINK_SPEED_2_5GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) ++ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; + } + +- if (speed & IXGBE_LINK_SPEED_100_FULL) { +- /* Set or unset auto-negotiation 100M advertisement */ +- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, +- &autoneg_reg); ++ /* Set or unset auto-negotiation 1G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_1GB_FULL)) ++ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + +- autoneg_reg &= ~(ADVERTISE_100FULL | +- ADVERTISE_100HALF); +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) +- autoneg_reg |= ADVERTISE_100FULL; ++ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ MDIO_MMD_AN, autoneg_reg); + +- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, +- autoneg_reg); +- } ++ /* Set or unset auto-negotiation 100M advertisement */ ++ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); ++ ++ autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && ++ (speed & IXGBE_LINK_SPEED_100_FULL)) ++ autoneg_reg |= ADVERTISE_100FULL; ++ ++ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) +@@ -778,9 +862,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) + { +- +- /* +- * Clear autoneg_advertised and set new values based on input link ++ /* Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; +@@ -788,14 +870,24 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + ++ if (speed & IXGBE_LINK_SPEED_5GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; ++ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + ++ if (speed & IXGBE_LINK_SPEED_10_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; ++ + /* Setup link based on the new speed settings */ +- hw->phy.ops.setup_link(hw); ++ if (hw->phy.ops.setup_link) ++ hw->phy.ops.setup_link(hw); + + return 0; + } +@@ -830,6 +922,7 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + break; + case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_x550em_a: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; + break; + default: +@@ -986,40 +1079,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) + } + + /** +- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version +- * @hw: pointer to hardware structure +- * @firmware_version: pointer to the PHY Firmware Version +- **/ +-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, +- u16 *firmware_version) +-{ +- s32 status; +- +- status = hw->phy.ops.read_reg(hw, TNX_FW_REV, +- MDIO_MMD_VEND1, +- firmware_version); +- +- return status; +-} +- +-/** +- * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version +- * @hw: pointer to hardware structure +- * @firmware_version: pointer to the PHY Firmware Version +- **/ +-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, +- u16 *firmware_version) +-{ +- s32 status; +- +- status = hw->phy.ops.read_reg(hw, AQ_FW_REV, +- MDIO_MMD_VEND1, +- firmware_version); +- +- return status; +-} +- +-/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +@@ -2398,9 +2457,7 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) + if (!on && ixgbe_mng_present(hw)) + return 0; + +- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, +- ®); ++ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®); + if (status) + return status; + +@@ -2412,8 +2469,6 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) + reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } + +- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, +- reg); ++ status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg); + return status; + } +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +index cc735ec..e9f94ee 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +@@ -84,8 +84,9 @@ + #define IXGBE_CS4227_GLOBAL_ID_LSB 0 + #define IXGBE_CS4227_GLOBAL_ID_MSB 1 + #define IXGBE_CS4227_SCRATCH 2 +-#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */ +-#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */ ++#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F ++#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ ++#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ + #define IXGBE_CS4227_RESET_PENDING 0x1357 + #define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 + #define IXGBE_CS4227_RETRIES 15 +@@ -154,6 +155,12 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); + s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); ++#if 1 //by hilbert ++s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data); ++s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data); ++#endif + s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); + s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, +@@ -168,10 +175,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); + s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, +- u16 *firmware_version); +-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, +- u16 *firmware_version); + + s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); + s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); +@@ -195,12 +198,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); + s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val); +-s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val); +-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 val); +-s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 val); ++s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 *val, bool lock); ++s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 val, bool lock); + #endif /* _IXGBE_PHY_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +index 31d82e3..531990b 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +@@ -85,6 +85,7 @@ + #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC + #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD + #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE ++#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 + #define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 + #define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 + #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +@@ -92,6 +93,8 @@ + #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 + #define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 + #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE ++#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 ++#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 + + /* VF Device IDs */ + #define IXGBE_DEV_ID_82599_VF 0x10ED +@@ -1393,8 +1396,10 @@ struct ixgbe_thermal_sensor_data { + #define TN1010_PHY_ID 0x00A19410 + #define TNX_FW_REV 0xB + #define X540_PHY_ID 0x01540200 +-#define X550_PHY_ID 0x01540220 ++#define X550_PHY_ID2 0x01540223 ++#define X550_PHY_ID3 0x01540221 + #define X557_PHY_ID 0x01540240 ++#define X557_PHY_ID2 0x01540250 + #define QT2022_PHY_ID 0x0043A400 + #define ATH_PHY_ID 0x03429050 + #define AQ_FW_REV 0x20 +@@ -1513,6 +1518,8 @@ enum { + #define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + + /* VMOLR bitmasks */ ++#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ ++#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ + #define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ + #define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ + #define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +@@ -1928,6 +1935,7 @@ enum { + #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 + #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 + #define IXGBE_LINKS_SPEED_100_82599 0x10000000 ++#define IXGBE_LINKS_SPEED_10_X550EM_A 0 + #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ + #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +@@ -2633,6 +2641,7 @@ enum ixgbe_fdir_pballoc_type { + #define FW_CEM_UNUSED_VER 0x0 + #define FW_CEM_MAX_RETRIES 3 + #define FW_CEM_RESP_STATUS_SUCCESS 0x1 ++#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ + #define FW_READ_SHADOW_RAM_CMD 0x31 + #define FW_READ_SHADOW_RAM_LEN 0x6 + #define FW_WRITE_SHADOW_RAM_CMD 0x33 +@@ -2658,6 +2667,59 @@ enum ixgbe_fdir_pballoc_type { + #define FW_INT_PHY_REQ_LEN 10 + #define FW_INT_PHY_REQ_READ 0 + #define FW_INT_PHY_REQ_WRITE 1 ++#define FW_PHY_ACT_REQ_CMD 5 ++#define FW_PHY_ACT_DATA_COUNT 4 ++#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) ++#define FW_PHY_ACT_INIT_PHY 1 ++#define FW_PHY_ACT_SETUP_LINK 2 ++#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) ++#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) ++#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) ++#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) ++#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) ++#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) ++#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) ++#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) ++#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) ++#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) ++#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ ++ HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u ++#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) ++#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) ++#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) ++#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) ++#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) ++#define FW_PHY_ACT_GET_LINK_INFO 3 ++#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) ++#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) ++#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) ++#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) ++#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) ++#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) ++#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) ++#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) ++#define FW_PHY_ACT_FORCE_LINK_DOWN 4 ++#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) ++#define FW_PHY_ACT_PHY_SW_RESET 5 ++#define FW_PHY_ACT_PHY_HW_RESET 6 ++#define FW_PHY_ACT_GET_PHY_INFO 7 ++#define FW_PHY_ACT_UD_2 0x1002 ++#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) ++#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) ++#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) ++#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) ++#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) ++#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) ++#define FW_PHY_ACT_RETRIES 50 ++#define FW_PHY_INFO_SPEED_MASK 0xFFFu ++#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u ++#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu + + /* Host Interface Command Structures */ + struct ixgbe_hic_hdr { +@@ -2700,6 +2762,16 @@ struct ixgbe_hic_drv_info { + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ + }; + ++struct ixgbe_hic_drv_info2 { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_num; ++ u8 ver_sub; ++ u8 ver_build; ++ u8 ver_min; ++ u8 ver_maj; ++ char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; ++}; ++ + /* These need to be dword aligned */ + struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; +@@ -2748,6 +2820,19 @@ struct ixgbe_hic_internal_phy_resp { + __be32 read_data; + }; + ++struct ixgbe_hic_phy_activity_req { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_number; ++ u8 pad; ++ __le16 activity_id; ++ __be32 data[FW_PHY_ACT_DATA_COUNT]; ++}; ++ ++struct ixgbe_hic_phy_activity_resp { ++ struct ixgbe_hic_hdr hdr; ++ __be32 data[FW_PHY_ACT_DATA_COUNT]; ++}; ++ + /* Transmit Descriptor - Advanced */ + union ixgbe_adv_tx_desc { + struct { +@@ -2863,6 +2948,7 @@ typedef u32 ixgbe_autoneg_advertised; + /* Link speed */ + typedef u32 ixgbe_link_speed; + #define IXGBE_LINK_SPEED_UNKNOWN 0 ++#define IXGBE_LINK_SPEED_10_FULL 0x0002 + #define IXGBE_LINK_SPEED_100_FULL 0x0008 + #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 + #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +@@ -3059,7 +3145,9 @@ enum ixgbe_phy_type { + ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, ++ ixgbe_phy_x550em_xfi, + ixgbe_phy_x550em_ext_t, ++ ixgbe_phy_ext_1g_t, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, +@@ -3078,6 +3166,7 @@ enum ixgbe_phy_type { + ixgbe_phy_qsfp_unknown, + ixgbe_phy_sfp_unsupported, + ixgbe_phy_sgmii, ++ ixgbe_phy_fw, + ixgbe_phy_generic + }; + +@@ -3352,6 +3441,7 @@ struct ixgbe_mac_operations { + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); ++ s32 (*init_led_link_act)(struct ixgbe_hw *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); +@@ -3372,9 +3462,11 @@ struct ixgbe_mac_operations { + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); ++ void (*fc_autoneg)(struct ixgbe_hw *); + + /* Manageability interface */ +- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); ++ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, ++ const char *); + s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*disable_rx)(struct ixgbe_hw *hw); +@@ -3416,10 +3508,24 @@ struct ixgbe_phy_operations { + s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*enter_lplu)(struct ixgbe_hw *); + s32 (*handle_lasi)(struct ixgbe_hw *hw); +- s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, +- u16 *value); +- s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, +- u16 value); ++ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, ++ u8 *value); ++ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, ++ u8 value); ++}; ++ ++struct ixgbe_link_operations { ++ s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); ++ s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 *val); ++ s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); ++ s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 val); ++}; ++ ++struct ixgbe_link_info { ++ struct ixgbe_link_operations ops; ++ u8 addr; + }; + + struct ixgbe_eeprom_info { +@@ -3462,6 +3568,7 @@ struct ixgbe_mac_info { + u8 san_mac_rar_index; + struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool set_lben; ++ u8 led_link_act; + }; + + struct ixgbe_phy_info { +@@ -3477,6 +3584,8 @@ struct ixgbe_phy_info { + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; ++ ixgbe_link_speed eee_speeds_supported; ++ ixgbe_link_speed eee_speeds_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; +@@ -3523,6 +3632,7 @@ struct ixgbe_hw { + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; ++ struct ixgbe_link_info link; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; +@@ -3546,6 +3656,7 @@ struct ixgbe_info { + const struct ixgbe_eeprom_operations *eeprom_ops; + const struct ixgbe_phy_operations *phy_ops; + const struct ixgbe_mbx_operations *mbx_ops; ++ const struct ixgbe_link_operations *link_ops; + const u32 *mvals; + }; + +@@ -3593,17 +3704,35 @@ struct ixgbe_info { + #define IXGBE_FUSES0_REV_MASK (3u << 6) + + #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) ++#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) + #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) + #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) + #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) + #define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) + #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) + #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) + #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) + #define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) + #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) + #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) + ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31) ++ + #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) + #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) + +@@ -3618,6 +3747,7 @@ struct ixgbe_info { + #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) + #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) + #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) ++#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28) + #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) + #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) + +@@ -3627,6 +3757,8 @@ struct ixgbe_info { + #define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) + #define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) + ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10) ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11) + #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) + #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) + +@@ -3675,8 +3807,13 @@ struct ixgbe_info { + + #define IXGBE_NW_MNG_IF_SEL 0x00011178 + #define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) +-#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) +-#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) ++#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) ++#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ + #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 + #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +index f2b1d48..6ea0d6a 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +@@ -95,6 +95,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) + { + s32 status; + u32 ctrl, i; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); +@@ -105,10 +106,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) + ixgbe_clear_tx_pending(hw); + + mac_reset_top: ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status) { ++ hw_dbg(hw, "semaphore failed with %d", status); ++ return IXGBE_ERR_SWFW_SYNC; ++ } ++ + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(1000, 1200); + + /* Poll for reset bit to self-clear indicating reset is complete */ +@@ -780,8 +788,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) + ixgbe_link_speed speed; + bool link_up; + +- /* +- * Link should be up in order for the blink bit in the LED control ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ /* Link should be up in order for the blink bit in the LED control + * register to work. Force link and speed in the MAC if link is down. + * This will be reversed when we stop the blinking. + */ +@@ -814,6 +824,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) + u32 macc_reg; + u32 ledctl_reg; + ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); +@@ -851,6 +864,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_X540, + .blink_led_stop = &ixgbe_blink_led_stop_X540, + .set_rar = &ixgbe_set_rar_generic, +@@ -866,6 +880,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, ++ .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = NULL, +@@ -911,7 +926,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = { + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, + .set_phy_power = &ixgbe_set_copper_phy_power, +- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + }; + + static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +index 77a60aa..3236248 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +@@ -28,11 +28,15 @@ + + static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); + static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); ++static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); ++static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); ++static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); + + static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) + { + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; ++ struct ixgbe_link_info *link = &hw->link; + + /* Start with X540 invariants, since so simular */ + ixgbe_get_invariants_X540(hw); +@@ -40,6 +44,46 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + ++ link->addr = IXGBE_CS4227; ++ ++ return 0; ++} ++ ++static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_phy_info *phy = &hw->phy; ++ ++ /* Start with X540 invariants, since so similar */ ++ ixgbe_get_invariants_X540(hw); ++ ++ phy->ops.set_phy_power = NULL; ++ ++ return 0; ++} ++ ++static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ ++ /* Start with X540 invariants, since so simular */ ++ ixgbe_get_invariants_X540(hw); ++ ++ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) ++ phy->ops.set_phy_power = NULL; ++ ++ return 0; ++} ++ ++static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_phy_info *phy = &hw->phy; ++ ++ /* Start with X540 invariants, since so similar */ ++ ixgbe_get_invariants_X540(hw); ++ ++ phy->ops.set_phy_power = NULL; ++ + return 0; + } + +@@ -69,8 +113,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) + */ + static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) + { +- return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, +- value); ++ return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); + } + + /** +@@ -83,8 +126,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) + */ + static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) + { +- return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, +- value); ++ return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); + } + + /** +@@ -290,6 +332,9 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ hw->phy.type = ixgbe_phy_x550em_xfi; ++ break; + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: +@@ -301,9 +346,21 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + /* Fallthrough */ +- case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); ++ case IXGBE_DEV_ID_X550EM_X_1G_T: ++ hw->phy.type = ixgbe_phy_ext_1g_t; ++ break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ hw->phy.type = ixgbe_phy_fw; ++ hw->phy.ops.read_reg = NULL; ++ hw->phy.ops.write_reg = NULL; ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; ++ break; + default: + break; + } +@@ -322,6 +379,280 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + return IXGBE_NOT_IMPLEMENTED; + } + ++/** ++ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to read from ++ * @reg: I2C device register to read from ++ * @val: pointer to location to receive read value ++ * ++ * Returns an error code on error. ++ **/ ++static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 *val) ++{ ++ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); ++} ++ ++/** ++ * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to read from ++ * @reg: I2C device register to read from ++ * @val: pointer to location to receive read value ++ * ++ * Returns an error code on error. ++ **/ ++static s32 ++ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 *val) ++{ ++ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); ++} ++ ++/** ++ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to write to ++ * @reg: I2C device register to write to ++ * @val: value to write ++ * ++ * Returns an error code on error. ++ **/ ++static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, ++ u8 addr, u16 reg, u16 val) ++{ ++ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); ++} ++ ++/** ++ * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to write to ++ * @reg: I2C device register to write to ++ * @val: value to write ++ * ++ * Returns an error code on error. ++ **/ ++static s32 ++ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, ++ u8 addr, u16 reg, u16 val) ++{ ++ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); ++} ++ ++/** ++ * ixgbe_fw_phy_activity - Perform an activity on a PHY ++ * @hw: pointer to hardware structure ++ * @activity: activity to perform ++ * @data: Pointer to 4 32-bit words of data ++ */ ++s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, ++ u32 (*data)[FW_PHY_ACT_DATA_COUNT]) ++{ ++ union { ++ struct ixgbe_hic_phy_activity_req cmd; ++ struct ixgbe_hic_phy_activity_resp rsp; ++ } hic; ++ u16 retries = FW_PHY_ACT_RETRIES; ++ s32 rc; ++ u32 i; ++ ++ do { ++ memset(&hic, 0, sizeof(hic)); ++ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; ++ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; ++ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; ++ hic.cmd.port_number = hw->bus.lan_id; ++ hic.cmd.activity_id = cpu_to_le16(activity); ++ for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) ++ hic.cmd.data[i] = cpu_to_be32((*data)[i]); ++ ++ rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (rc) ++ return rc; ++ if (hic.rsp.hdr.cmd_or_resp.ret_status == ++ FW_CEM_RESP_STATUS_SUCCESS) { ++ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) ++ (*data)[i] = be32_to_cpu(hic.rsp.data[i]); ++ return 0; ++ } ++ usleep_range(20, 30); ++ --retries; ++ } while (retries > 0); ++ ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++} ++ ++static const struct { ++ u16 fw_speed; ++ ixgbe_link_speed phy_speed; ++} ixgbe_fw_map[] = { ++ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, ++}; ++ ++/** ++ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) ++{ ++ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ u16 phy_speeds; ++ u16 phy_id_lo; ++ s32 rc; ++ u16 i; ++ ++ if (hw->phy.id) ++ return 0; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); ++ if (rc) ++ return rc; ++ ++ hw->phy.speeds_supported = 0; ++ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; ++ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { ++ if (phy_speeds & ixgbe_fw_map[i].fw_speed) ++ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; ++ } ++ ++ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; ++ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; ++ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; ++ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; ++ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) ++ return IXGBE_ERR_PHY_ADDR_INVALID; ++ ++ hw->phy.autoneg_advertised = hw->phy.speeds_supported; ++ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; ++ return 0; ++} ++ ++static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data); ++/** ++ * ixgbe_identify_phy_fw - Get PHY type based on firmware command ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) ++{ ++ s32 rc; ++ u16 value=0; ++ ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; ++ ++#if 0 /* Try also to get PHY ID through MDIO by using C22 in read_reg op. ++ * By hilbert ++ */ ++ rc = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &value); ++ hw_err(hw, "####rc:%x, PHY ID-1:%x\n", rc, value); ++#endif ++ ++ hw->phy.type = ixgbe_phy_fw; ++#if 0 /* We still need read/write ops later, don't NULL it. By hilbert */ ++ hw->phy.ops.read_reg = NULL; ++ hw->phy.ops.write_reg = NULL; ++#endif ++ return ixgbe_get_phy_id_fw(hw); ++} ++ ++/** ++ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) ++{ ++ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ ++ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; ++ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); ++} ++ ++/** ++ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) ++{ ++ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ u16 i; ++ ++ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) ++ return 0; ++ ++ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ++ hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); ++ return IXGBE_ERR_INVALID_LINK_SETTINGS; ++ } ++ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_full: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ case ixgbe_fc_rx_pause: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ case ixgbe_fc_tx_pause: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ default: ++ break; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { ++ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) ++ setup[0] |= ixgbe_fw_map[i].fw_speed; ++ } ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; ++ ++ if (hw->phy.eee_speeds_advertised) ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); ++ if (rc) ++ return rc; ++ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) ++ return IXGBE_ERR_OVERTEMP; ++ return 0; ++} ++ ++/** ++ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ */ ++static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) ++{ ++ if (hw->fc.requested_mode == ixgbe_fc_default) ++ hw->fc.requested_mode = ixgbe_fc_full; ++ ++ return ixgbe_setup_fw_link(hw); ++} ++ + /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * +@@ -544,41 +875,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + return status; + } + +-/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface +- * command assuming that the semaphore is already obtained. +- * @hw: pointer to hardware structure +- * @offset: offset of word in the EEPROM to read +- * @data: word read from the EEPROM +- * +- * Reads a 16 bit word from the EEPROM using the hostif. +- **/ +-static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, +- u16 *data) +-{ +- s32 status; +- struct ixgbe_hic_read_shadow_ram buffer; +- +- buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; +- buffer.hdr.req.buf_lenh = 0; +- buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; +- buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; +- +- /* convert offset from words to bytes */ +- buffer.address = cpu_to_be32(offset * 2); +- /* one word */ +- buffer.length = cpu_to_be16(sizeof(u16)); +- +- status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), +- IXGBE_HI_COMMAND_TIMEOUT, false); +- if (status) +- return status; +- +- *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, +- FW_NVM_DATA_OFFSET); +- +- return 0; +-} +- + /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read +@@ -590,6 +886,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) + { ++ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; +@@ -597,7 +894,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u32 i; + + /* Take semaphore for the entire operation. */ +- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) { + hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); + return status; +@@ -620,10 +917,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + buffer.pad2 = 0; + buffer.pad3 = 0; + +- status = ixgbe_host_interface_command(hw, &buffer, +- sizeof(buffer), +- IXGBE_HI_COMMAND_TIMEOUT, +- false); ++ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), ++ IXGBE_HI_COMMAND_TIMEOUT); + if (status) { + hw_dbg(hw, "Host interface command failed\n"); + goto out; +@@ -647,7 +942,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + } + + out: +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ hw->mac.ops.release_swfw_sync(hw, mask); + return status; + } + +@@ -818,15 +1113,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) + **/ + static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) + { +- s32 status = 0; ++ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; ++ struct ixgbe_hic_read_shadow_ram buffer; ++ s32 status; + +- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { +- status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +- } else { +- status = IXGBE_ERR_SWFW_SYNC; ++ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; ++ buffer.hdr.req.buf_lenh = 0; ++ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; ++ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; ++ ++ /* convert offset from words to bytes */ ++ buffer.address = cpu_to_be32(offset * 2); ++ /* one word */ ++ buffer.length = cpu_to_be16(sizeof(u16)); ++ ++ status = hw->mac.ops.acquire_swfw_sync(hw, mask); ++ if (status) ++ return status; ++ ++ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), ++ IXGBE_HI_COMMAND_TIMEOUT); ++ if (!status) { ++ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, ++ FW_NVM_DATA_OFFSET); + } + ++ hw->mac.ops.release_swfw_sync(hw, mask); + return status; + } + +@@ -1130,47 +1442,17 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + return ret; + } + +-/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. ++/** ++ * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration + * @hw: pointer to hardware structure +- * @speed: the link speed to force + * +- * Configures the integrated KR PHY to use iXFI mode. Used to connect an +- * internal and external PHY at a specific speed, without autonegotiation. ++ * iXfI configuration needed for ixgbe_mac_X550EM_x devices. + **/ +-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) ++static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) + { + s32 status; + u32 reg_val; + +- /* Disable AN and force speed to 10G Serial. */ +- status = ixgbe_read_iosf_sb_reg_x550(hw, +- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); +- if (status) +- return status; +- +- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; +- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; +- +- /* Select forced link speed for internal PHY. */ +- switch (*speed) { +- case IXGBE_LINK_SPEED_10GB_FULL: +- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; +- break; +- case IXGBE_LINK_SPEED_1GB_FULL: +- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; +- break; +- default: +- /* Other link speeds are not supported by internal KR PHY. */ +- return IXGBE_ERR_LINK_SETUP; +- } +- +- status = ixgbe_write_iosf_sb_reg_x550(hw, +- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); +- if (status) +- return status; +- + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), +@@ -1230,20 +1512,111 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); +- if (status) +- return status; ++ return status; ++} + +- /* Toggle port SW reset by AN reset. */ +- status = ixgbe_read_iosf_sb_reg_x550(hw, ++/** ++ * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the ++ * internal PHY ++ * @hw: pointer to hardware structure ++ **/ ++static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ u32 link_ctrl; ++ ++ /* Restart auto-negotiation. */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-negotiation did not complete\n"); ++ return status; ++ } ++ ++ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); ++ ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ u32 flx_mask_st20; ++ ++ /* Indicate to FW that AN restart has been asserted */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-negotiation did not complete\n"); ++ return status; ++ } ++ ++ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); ++ } ++ ++ return status; ++} ++ ++/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. ++ * @hw: pointer to hardware structure ++ * @speed: the link speed to force ++ * ++ * Configures the integrated KR PHY to use iXFI mode. Used to connect an ++ * internal and external PHY at a specific speed, without autonegotiation. ++ **/ ++static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ s32 status; ++ u32 reg_val; ++ ++ /* iXFI is only supported with X552 */ ++ if (mac->type != ixgbe_mac_X550EM_x) ++ return IXGBE_ERR_LINK_SETUP; ++ ++ /* Disable AN and force speed to 10G Serial. */ ++ status = ixgbe_read_iosf_sb_reg_x550(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + +- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; ++ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; ++ ++ /* Select forced link speed for internal PHY. */ ++ switch (*speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; ++ break; ++ default: ++ /* Other link speeds are not supported by internal KR PHY. */ ++ return IXGBE_ERR_LINK_SETUP; ++ } ++ + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status) ++ return status; ++ ++ /* Additional configuration needed for x550em_x */ ++ if (hw->mac.type == ixgbe_mac_X550EM_x) { ++ status = ixgbe_setup_ixfi_x550em_x(hw); ++ if (status) ++ return status; ++ } ++ ++ /* Toggle port SW reset by AN reset. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; + } +@@ -1294,7 +1667,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + __always_unused bool autoneg_wait_to_complete) + { + s32 status; +- u16 slice, value; ++ u16 reg_slice, reg_val; + bool setup_linear = false; + + /* Check if SFP module is supported and linear */ +@@ -1310,71 +1683,68 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + if (status) + return status; + +- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { +- /* Configure CS4227 LINE side to 10G SR. */ +- slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); +- value = IXGBE_CS4227_SPEED_10G; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; ++ /* Configure internal PHY for KR/KX. */ ++ ixgbe_setup_kr_speed_x550em(hw, speed); + +- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); +- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; +- +- /* Configure CS4227 for HOST connection rate then type. */ +- slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); +- value = speed & IXGBE_LINK_SPEED_10GB_FULL ? +- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; ++ /* Configure CS4227 LINE side to proper mode. */ ++ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); ++ if (setup_linear) ++ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; ++ else ++ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + +- slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); +- if (setup_linear) +- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; +- else +- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; ++ status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, ++ reg_val); + +- /* Setup XFI internal link. */ +- status = ixgbe_setup_ixfi_x550em(hw, &speed); +- if (status) { +- hw_dbg(hw, "setup_ixfi failed with %d\n", status); +- return status; +- } +- } else { +- /* Configure internal PHY for KR/KX. */ +- status = ixgbe_setup_kr_speed_x550em(hw, speed); +- if (status) { +- hw_dbg(hw, "setup_kr_speed failed with %d\n", status); +- return status; +- } ++ return status; ++} + +- /* Configure CS4227 LINE side to proper mode. */ +- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); +- if (setup_linear) +- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; +- else +- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; ++/** ++ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode ++ * @hw: pointer to hardware structure ++ * @speed: the link speed to force ++ * ++ * Configures the integrated PHY for native SFI mode. Used to connect the ++ * internal PHY directly to an SFP cage, without autonegotiation. ++ **/ ++static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ s32 status; ++ u32 reg_val; ++ ++ /* Disable all AN and force speed to 10G Serial. */ ++ status = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status) ++ return status; ++ ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ ++ /* Select forced link speed for internal PHY. */ ++ switch (*speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; ++ break; ++ default: ++ /* Other link speeds are not supported by internal PHY. */ ++ return IXGBE_ERR_LINK_SETUP; + } + +- return 0; ++ status = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ ++ /* Toggle port SW reset by AN reset. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); + +-i2c_err: +- hw_dbg(hw, "combined i2c access failed with %d\n", status); + return status; + } + +@@ -1390,45 +1760,39 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, + { + bool setup_linear = false; + u32 reg_phy_int; +- s32 rc; ++ s32 ret_val; + + /* Check if SFP module is supported and linear */ +- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); ++ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ +- if (rc == IXGBE_ERR_SFP_NOT_PRESENT) ++ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + +- if (!rc) +- return rc; ++ if (ret_val) ++ return ret_val; + +- /* Configure internal PHY for native SFI */ +- rc = hw->mac.ops.read_iosf_sb_reg(hw, +- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, +- ®_phy_int); +- if (rc) +- return rc; ++ /* Configure internal PHY for native SFI based on module type */ ++ ret_val = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); ++ if (ret_val) ++ return ret_val; + +- if (setup_linear) { +- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING; +- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR; +- } else { +- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING; +- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR; +- } ++ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; ++ if (!setup_linear) ++ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; + +- rc = hw->mac.ops.write_iosf_sb_reg(hw, +- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, +- reg_phy_int); +- if (rc) +- return rc; ++ ret_val = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); ++ if (ret_val) ++ return ret_val; + +- /* Setup XFI/SFI internal link */ +- return ixgbe_setup_ixfi_x550em(hw, &speed); ++ /* Setup SFI internal link. */ ++ return ixgbe_setup_sfi_x550a(hw, &speed); + } + + /** +@@ -1444,19 +1808,19 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + u32 reg_slice, slice_offset; + bool setup_linear = false; + u16 reg_phy_ext; +- s32 rc; ++ s32 ret_val; + + /* Check if SFP module is supported and linear */ +- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); ++ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ +- if (rc == IXGBE_ERR_SFP_NOT_PRESENT) ++ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + +- if (!rc) +- return rc; ++ if (ret_val) ++ return ret_val; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); +@@ -1464,16 +1828,16 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) + return IXGBE_ERR_PHY_ADDR_INVALID; + +- /* Get external PHY device id */ +- rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB, +- IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); +- if (rc) +- return rc; ++ /* Get external PHY SKU id */ ++ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); ++ if (ret_val) ++ return ret_val; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ +- if (reg_phy_ext == IXGBE_CS4223_PHY_ID) ++ if (reg_phy_ext == IXGBE_CS4223_SKU_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else +@@ -1481,12 +1845,28 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; ++ ++ ret_val = hw->phy.ops.read_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); ++ if (ret_val) ++ return ret_val; ++ ++ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | ++ (IXGBE_CS4227_EDC_MODE_SR << 1)); ++ + if (setup_linear) + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; +- return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, +- reg_phy_ext); ++ ++ ret_val = hw->phy.ops.write_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); ++ if (ret_val) ++ return ret_val; ++ ++ /* Flush previous write with a read */ ++ return hw->phy.ops.read_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + } + + /** +@@ -1515,8 +1895,10 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + +- /* If internal link mode is XFI, then setup XFI internal link. */ +- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { ++ /* If X552 and internal link mode is XFI, then setup XFI internal link. ++ */ ++ if (hw->mac.type == ixgbe_mac_X550EM_x && ++ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status) +@@ -1540,7 +1922,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, + bool link_up_wait_to_complete) + { + u32 status; +- u16 autoneg_status; ++ u16 i, autoneg_status; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; +@@ -1552,14 +1934,18 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, + if (status || !(*link_up)) + return status; + +- /* MAC link is up, so check external PHY link. +- * Read this twice back to back to indicate current status. +- */ +- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, +- &autoneg_status); +- if (status) +- return status; ++ /* MAC link is up, so check external PHY link. ++ * Link status is latching low, and can only be used to detect link ++ * drop, and not the current status of the link without performing ++ * back-to-back reads. ++ */ ++ for (i = 0; i < 2; i++) { ++ status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, ++ &autoneg_status); ++ ++ if (status) ++ return status; ++ } + + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) +@@ -1577,7 +1963,7 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) + { + struct ixgbe_mac_info *mac = &hw->mac; +- u32 lval, sval; ++ u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, +@@ -1611,12 +1997,183 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, + if (rc) + return rc; + +- lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); ++ if (rc) ++ return rc; ++ ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); ++ if (rc) ++ return rc; ++ ++ rc = ixgbe_restart_an_internal_phy_x550em(hw); ++ return rc; ++} ++ ++/** ++ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 lval, sval, flx_val; ++ s32 rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); ++ if (rc) ++ return rc; ++ ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, lval); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); ++ if (rc) ++ return rc; ++ ++ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; ++ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, sval); ++ if (rc) ++ return rc; ++ + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); ++ if (rc) ++ return rc; + +- return rc; ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); ++ if (rc) ++ return rc; ++ ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); ++ if (rc) ++ return rc; ++ ++ ixgbe_restart_an_internal_phy_x550em(hw); ++ ++ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); ++} ++ ++/** ++ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 ++ * @hw: pointer to hardware structure ++ * ++ * Enable flow control according to IEEE clause 37. ++ */ ++static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ ixgbe_link_speed speed; ++ bool link_up; ++ ++ /* AN should have completed when the cable was plugged in. ++ * Look for reasons to bail out. Bail out if: ++ * - FC autoneg is disabled, or if ++ * - link is not up. ++ */ ++ if (hw->fc.disable_fc_autoneg) ++ goto out; ++ ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ if (!link_up) ++ goto out; ++ ++ /* Check if auto-negotiation has completed */ ++ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); ++ if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { ++ status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ ++ /* Negotiate the flow control */ ++ status = ixgbe_negotiate_fc(hw, info[0], info[0], ++ FW_PHY_ACT_GET_LINK_INFO_FC_RX, ++ FW_PHY_ACT_GET_LINK_INFO_FC_TX, ++ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, ++ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); ++ ++out: ++ if (!status) { ++ hw->fc.fc_was_autonegged = true; ++ } else { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ } ++} ++ ++/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers ++ * @hw: pointer to hardware structure ++ **/ ++static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ ++ switch (mac->ops.get_media_type(hw)) { ++ case ixgbe_media_type_fiber: ++ mac->ops.setup_fc = NULL; ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; ++ break; ++ case ixgbe_media_type_copper: ++ if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && ++ hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { ++ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; ++ break; ++ } ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; ++ mac->ops.setup_fc = ixgbe_fc_autoneg_fw; ++ mac->ops.setup_link = ixgbe_setup_sgmii_fw; ++ mac->ops.check_link = ixgbe_check_mac_link_generic; ++ break; ++ case ixgbe_media_type_backplane: ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; ++ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; ++ break; ++ default: ++ break; ++ } + } + + /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers +@@ -1654,10 +2211,12 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) + ixgbe_set_soft_rate_select_speed; + break; + case ixgbe_media_type_copper: ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) ++ break; + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.setup_fc = ixgbe_setup_fc_generic; + mac->ops.check_link = ixgbe_check_link_t_X550em; +- return; ++ break; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) +@@ -1666,6 +2225,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) + default: + break; + } ++ ++ /* Additional modification for X550em_a devices */ ++ if (hw->mac.type == ixgbe_mac_x550em_a) ++ ixgbe_init_mac_link_ops_X550em_a(hw); + } + + /** ixgbe_setup_sfp_modules_X550em - Setup SFP module +@@ -1696,6 +2259,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) + { ++ if (hw->phy.type == ixgbe_phy_fw) { ++ *autoneg = true; ++ *speed = hw->phy.speeds_supported; ++ return 0; ++ } ++ + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ +@@ -1714,8 +2283,39 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { +- *speed = IXGBE_LINK_SPEED_10GB_FULL | +- IXGBE_LINK_SPEED_1GB_FULL; ++ switch (hw->phy.type) { ++ case ixgbe_phy_x550em_kx4: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL | ++ IXGBE_LINK_SPEED_2_5GB_FULL | ++ IXGBE_LINK_SPEED_10GB_FULL; ++ break; ++ case ixgbe_phy_x550em_xfi: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL | ++ IXGBE_LINK_SPEED_10GB_FULL; ++ break; ++ case ixgbe_phy_ext_1g_t: ++ case ixgbe_phy_sgmii: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ case ixgbe_phy_x550em_kr: ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* check different backplane modes */ ++ if (hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { ++ *speed = IXGBE_LINK_SPEED_2_5GB_FULL; ++ break; ++ } else if (hw->device_id == ++ IXGBE_DEV_ID_X550EM_A_KR_L) { ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ } ++ } ++ /* fall through */ ++ default: ++ *speed = IXGBE_LINK_SPEED_10GB_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ } + *autoneg = true; + } + return 0; +@@ -1742,7 +2342,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) +@@ -1750,7 +2350,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + + if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | +@@ -1759,7 +2359,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + + /* Global alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + + if (status) +@@ -1774,7 +2374,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -1789,14 +2389,14 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); ++ MDIO_MMD_AN, ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); ++ MDIO_MMD_AN, ®); + + if (status) + return status; +@@ -1827,21 +2427,34 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + /* Enable link status change alarm */ +- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); +- if (status) +- return status; + +- reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; ++ /* Enable the LASI interrupts on X552 devices to receive notifications ++ * of the link configurations of the external PHY and correspondingly ++ * support the configuration of the internal iXFI link, since iXFI does ++ * not support auto-negotiation. This is not required for X553 devices ++ * having KR support, which performs auto-negotiations and which is used ++ * as the internal link to the external PHY. Hence adding a check here ++ * to avoid enabling LASI interrupts for X553 devices. ++ */ ++ if (hw->mac.type != ixgbe_mac_x550em_a) { ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, ++ MDIO_MMD_AN, ®); ++ if (status) ++ return status; + +- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); +- if (status) +- return status; ++ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; ++ ++ status = hw->phy.ops.write_reg(hw, ++ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, ++ MDIO_MMD_AN, reg); ++ if (status) ++ return status; ++ } + + /* Enable high temperature failure and global fault alarms */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -1850,14 +2463,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + reg); + if (status) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -1866,14 +2479,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + reg); + if (status) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -1881,7 +2494,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + reg); + + return status; +@@ -1945,51 +2558,31 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + +- /* Restart auto-negotiation. */ +- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + +- return status; +-} +- +-/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. +- * @hw: pointer to hardware structure +- * +- * Configures the integrated KX4 PHY. +- **/ +-static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) +-{ +- s32 status; +- u32 reg_val; +- +- status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, +- IXGBE_SB_IOSF_TARGET_KX4_PCS0 + +- hw->bus.lan_id, ®_val); +- if (status) +- return status; +- +- reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | +- IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); +- +- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* Set lane mode to KR auto negotiation */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + +- /* Advertise 10G support. */ +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; ++ if (status) ++ return status; + +- /* Advertise 1G support. */ +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + +- /* Restart auto-negotiation. */ +- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; +- status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, +- IXGBE_SB_IOSF_TARGET_KX4_PCS0 + +- hw->bus.lan_id, reg_val); ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ } + +- return status; ++ return ixgbe_restart_an_internal_phy_x550em(hw); + } + + /** +@@ -2002,6 +2595,9 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + return 0; + ++ if (ixgbe_check_reset_blocked(hw)) ++ return 0; ++ + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); + } + +@@ -2019,14 +2615,12 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) + *link_up = false; + + /* read this twice back to back to indicate current status */ +- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, + &autoneg_status); + if (ret) + return ret; + +- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, + &autoneg_status); + if (ret) + return ret; +@@ -2057,7 +2651,8 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + +- if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { ++ if (!(hw->mac.type == ixgbe_mac_X550EM_x && ++ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); +@@ -2072,7 +2667,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) + return 0; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &speed); + if (status) + return status; +@@ -2133,10 +2728,10 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); ++ MDIO_MMD_VEND1, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); ++ MDIO_MMD_VEND1, phy_data); + + return 0; + } +@@ -2155,14 +2750,70 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); ++ MDIO_MMD_VEND1, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); ++ MDIO_MMD_VEND1, phy_data); + + return 0; + } + ++/** ++ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware ++ * @hw: pointer to the HW structure ++ * @maj: driver version major number ++ * @min: driver version minor number ++ * @build: driver version build number ++ * @sub: driver version sub build number ++ * @len: length of driver_ver string ++ * @driver_ver: driver string ++ * ++ * Sends driver version number to firmware through the manageability ++ * block. On success return 0 ++ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring ++ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. ++ **/ ++static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, ++ u8 build, u8 sub, u16 len, ++ const char *driver_ver) ++{ ++ struct ixgbe_hic_drv_info2 fw_cmd; ++ s32 ret_val; ++ int i; ++ ++ if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) ++ return IXGBE_ERR_INVALID_ARGUMENT; ++ ++ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; ++ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; ++ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; ++ fw_cmd.port_num = (u8)hw->bus.func; ++ fw_cmd.ver_maj = maj; ++ fw_cmd.ver_min = min; ++ fw_cmd.ver_build = build; ++ fw_cmd.ver_sub = sub; ++ fw_cmd.hdr.checksum = 0; ++ memcpy(fw_cmd.driver_string, driver_ver, len); ++ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, ++ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); ++ ++ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ++ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, ++ sizeof(fw_cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (ret_val) ++ continue; ++ ++ if (fw_cmd.hdr.cmd_or_resp.ret_status != ++ FW_CEM_RESP_STATUS_SUCCESS) ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++ return 0; ++ } ++ ++ return ret_val; ++} ++ + /** ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed +@@ -2179,7 +2830,7 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &an_lp_status); + if (status) + return status; +@@ -2208,7 +2859,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) + { + bool pause, asm_dir; + u32 reg_val; +- s32 rc; ++ s32 rc = 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { +@@ -2251,33 +2902,122 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) + return IXGBE_ERR_CONFIG; + } + +- if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && +- hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && +- hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) +- return 0; ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR_L: ++ rc = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ++ ®_val); ++ if (rc) ++ return rc; ++ ++ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); ++ if (pause) ++ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; ++ if (asm_dir) ++ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ rc = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ++ reg_val); ++ ++ /* This device does not fully support AN. */ ++ hw->fc.disable_fc_autoneg = true; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ hw->fc.disable_fc_autoneg = true; ++ break; ++ default: ++ break; ++ } ++ return rc; ++} + +- rc = hw->mac.ops.read_iosf_sb_reg(hw, +- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, +- ®_val); +- if (rc) +- return rc; ++/** ++ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 ++ * @hw: pointer to hardware structure ++ **/ ++static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) ++{ ++ u32 link_s1, lp_an_page_low, an_cntl_1; ++ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ ixgbe_link_speed speed; ++ bool link_up; + +- reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | +- IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); +- if (pause) +- reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; +- if (asm_dir) +- reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; +- rc = hw->mac.ops.write_iosf_sb_reg(hw, +- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, +- reg_val); ++ /* AN should have completed when the cable was plugged in. ++ * Look for reasons to bail out. Bail out if: ++ * - FC autoneg is disabled, or if ++ * - link is not up. ++ */ ++ if (hw->fc.disable_fc_autoneg) { ++ hw_err(hw, "Flow control autoneg is disabled"); ++ goto out; ++ } + +- /* This device does not fully support AN. */ +- hw->fc.disable_fc_autoneg = true; ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ if (!link_up) { ++ hw_err(hw, "The link is down"); ++ goto out; ++ } + +- return rc; ++ /* Check at auto-negotiation has completed */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_S1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); ++ ++ if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { ++ hw_dbg(hw, "Auto-Negotiation did not complete\n"); ++ status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ ++ /* Read the 10g AN autoc and LP ability registers and resolve ++ * local flow control settings accordingly ++ */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-Negotiation did not complete\n"); ++ goto out; ++ } ++ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-Negotiation did not complete\n"); ++ goto out; ++ } ++ ++ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, ++ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); ++ ++out: ++ if (!status) { ++ hw->fc.fc_was_autonegged = true; ++ } else { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ } ++} ++ ++/** ++ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings ++ * @hw: pointer to hardware structure ++ **/ ++static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) ++{ ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; + } + + /** ixgbe_enter_lplu_x550em - Transition to low power states +@@ -2326,7 +3066,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) + return ixgbe_set_copper_phy_power(hw, false); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &speed); + if (status) + return status; +@@ -2348,20 +3088,20 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &autoneg_reg); + if (status) + return status; + +- status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, ++ MDIO_MMD_AN, + &an_10g_cntl_reg); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &autoneg_reg); + if (status) + return status; +@@ -2378,6 +3118,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) + } + + /** ++ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) ++{ ++ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ ++ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) ++ return 0; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); ++ if (rc) ++ return rc; ++ memset(store, 0, sizeof(store)); ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); ++ if (rc) ++ return rc; ++ ++ return ixgbe_setup_fw_link(hw); ++} ++ ++/** ++ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) ++{ ++ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); ++ if (rc) ++ return rc; ++ ++ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { ++ ixgbe_shutdown_fw_phy(hw); ++ return IXGBE_ERR_OVERTEMP; ++ } ++ return 0; ++} ++ ++/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register + * @hw: pointer to hardware structure + * +@@ -2398,6 +3182,18 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) + hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; ++#if 1 /* Since by Intel FW(LEK8),LAN controller 1 default set port 0 use phy address 0 ++ * and port 1 use phy address 1, we swap it for Porsche2 platform. ++ * By hilbert. ++ */ ++ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { ++ /*hw_err(hw, "####swap phy address used for different lan id in LAN conroller-1\n");*/ ++ hw->phy.mdio.prtad = (hw->bus.lan_id == 0) ? (1) : (0); ++ /*hw_err(hw, "####lan id: %d, phy address:%d\n", ++ hw->bus.lan_id, ++ hw->phy.mdio.prtad);*/ ++ } ++#endif + } + } + +@@ -2433,7 +3229,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: +- phy->ops.setup_link = ixgbe_setup_kx4_x550em; ++ phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; +@@ -2442,6 +3238,12 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; ++ case ixgbe_phy_x550em_xfi: ++ /* link is managed by HW */ ++ phy->ops.setup_link = NULL; ++ phy->ops.read_reg = ixgbe_read_phy_reg_x550em; ++ phy->ops.write_reg = ixgbe_write_phy_reg_x550em; ++ break; + case ixgbe_phy_x550em_ext_t: + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode +@@ -2463,6 +3265,19 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; + break; ++ case ixgbe_phy_sgmii: ++ phy->ops.setup_link = NULL; ++ break; ++ case ixgbe_phy_fw: ++ phy->ops.setup_link = ixgbe_setup_fw_link; ++ phy->ops.reset = ixgbe_reset_phy_fw; ++ break; ++ case ixgbe_phy_ext_1g_t: ++ phy->ops.setup_link = NULL; ++ phy->ops.read_reg = NULL; ++ phy->ops.write_reg = NULL; ++ phy->ops.reset = NULL; ++ break; + default: + break; + } +@@ -2488,6 +3303,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) + /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: ++ case IXGBE_DEV_ID_X550EM_X_XFI: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + media_type = ixgbe_media_type_backplane; +@@ -2500,6 +3316,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: + media_type = ixgbe_media_type_copper; + break; + default: +@@ -2519,7 +3337,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, +- IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ MDIO_MMD_PMAPMD, + ®); + if (status) + return status; +@@ -2530,7 +3348,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -2539,7 +3357,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + reg); + if (status) + return status; +@@ -2567,6 +3385,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ /* Select fast MDIO clock speed for these devices */ ++ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); ++ hlreg0 |= IXGBE_HLREG0_MDCSPD; ++ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); ++ break; + default: + break; + } +@@ -2586,6 +3411,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) + u32 ctrl = 0; + u32 i; + bool link_up = false; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); +@@ -2613,6 +3439,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) + hw->phy.sfp_setup_needed = false; + } + ++ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) ++ return status; ++ + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); +@@ -2631,9 +3460,16 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) + ctrl = IXGBE_CTRL_RST; + } + ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status) { ++ hw_dbg(hw, "semaphore failed with %d", status); ++ return IXGBE_ERR_SWFW_SYNC; ++ } ++ + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(1000, 1200); + + /* Poll for reset bit to self-clear meaning reset is complete */ +@@ -2728,6 +3564,90 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, + } + + /** ++ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ **/ ++static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ u32 an_cntl = 0; ++ ++ /* Validate the requested mode */ ++ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ++ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ++ return IXGBE_ERR_INVALID_LINK_SETTINGS; ++ } ++ ++ if (hw->fc.requested_mode == ixgbe_fc_default) ++ hw->fc.requested_mode = ixgbe_fc_full; ++ ++ /* Set up the 1G and 10G flow control advertisement registers so the ++ * HW will be able to do FC autoneg once the cable is plugged in. If ++ * we link at 10G, the 1G advertisement is harmless and vice versa. ++ */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-Negotiation did not complete\n"); ++ return status; ++ } ++ ++ /* The possible values of fc.requested_mode are: ++ * 0: Flow control is completely disabled ++ * 1: Rx flow control is enabled (we can receive pause frames, ++ * but not send pause frames). ++ * 2: Tx flow control is enabled (we can send pause frames but ++ * we do not support receiving pause frames). ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. ++ * other: Invalid. ++ */ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_none: ++ /* Flow control completely disabled by software override. */ ++ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); ++ break; ++ case ixgbe_fc_tx_pause: ++ /* Tx Flow control is enabled, and Rx Flow control is ++ * disabled by software override. ++ */ ++ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; ++ break; ++ case ixgbe_fc_rx_pause: ++ /* Rx Flow control is enabled and Tx Flow control is ++ * disabled by software override. Since there really ++ * isn't a way to advertise that we are capable of RX ++ * Pause ONLY, we will advertise that we support both ++ * symmetric and asymmetric Rx PAUSE, as such we fall ++ * through to the fc_full statement. Later, we will ++ * disable the adapter's ability to send PAUSE frames. ++ */ ++ case ixgbe_fc_full: ++ /* Flow control (both Rx and Tx) is enabled by SW override. */ ++ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ break; ++ default: ++ hw_err(hw, "Flow control param set incorrectly\n"); ++ return IXGBE_ERR_CONFIG; ++ } ++ ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); ++ ++ /* Restart auto-negotiation. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); ++ ++ return status; ++} ++ ++/** + * ixgbe_set_mux - Set mux for port 1 access with CS4227 + * @hw: pointer to hardware structure + * @state: set mux if 1, clear if 0 +@@ -2881,7 +3801,13 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + ++#if 0 /* To use C22 MDI access function created by our own. ++ * By hilbert ++ */ + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); ++#else ++ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); ++#endif + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +@@ -2914,7 +3840,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + .clear_vfta = &ixgbe_clear_vfta_generic, \ + .set_vfta = &ixgbe_set_vfta_generic, \ + .fc_enable = &ixgbe_fc_enable_generic, \ +- .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ ++ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ + .init_uta_tables = &ixgbe_init_uta_tables_generic, \ + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ +@@ -2933,6 +3859,7 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = &ixgbe_reset_hw_X540, + .get_media_type = &ixgbe_get_media_type_X540, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, +@@ -2947,12 +3874,14 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { + .prot_autoc_read = prot_autoc_read_generic, + .prot_autoc_write = prot_autoc_write_generic, + .setup_fc = ixgbe_setup_fc_generic, ++ .fc_autoneg = ixgbe_fc_autoneg, + }; + + static const struct ixgbe_mac_operations mac_ops_X550EM_x = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = &ixgbe_reset_hw_X550em, + .get_media_type = &ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, +@@ -2965,6 +3894,29 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { + .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .setup_fc = NULL, /* defined later */ ++ .fc_autoneg = ixgbe_fc_autoneg, ++ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, ++ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, ++}; ++ ++static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { ++ X550_COMMON_MAC ++ .led_on = NULL, ++ .led_off = NULL, ++ .init_led_link_act = NULL, ++ .reset_hw = &ixgbe_reset_hw_X550em, ++ .get_media_type = &ixgbe_get_media_type_X550em, ++ .get_san_mac_addr = NULL, ++ .get_wwn_prefix = NULL, ++ .setup_link = &ixgbe_setup_mac_link_X540, ++ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, ++ .get_bus_info = &ixgbe_get_bus_info_X550em, ++ .setup_sfp = ixgbe_setup_sfp_modules_X550em, ++ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, ++ .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, ++ .init_swfw_sync = &ixgbe_init_swfw_sync_X540, ++ .setup_fc = NULL, ++ .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, + }; +@@ -2973,6 +3925,28 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, ++ .reset_hw = ixgbe_reset_hw_X550em, ++ .get_media_type = ixgbe_get_media_type_X550em, ++ .get_san_mac_addr = NULL, ++ .get_wwn_prefix = NULL, ++ .setup_link = &ixgbe_setup_mac_link_X540, ++ .get_link_capabilities = ixgbe_get_link_capabilities_X550em, ++ .get_bus_info = ixgbe_get_bus_info_X550em, ++ .setup_sfp = ixgbe_setup_sfp_modules_X550em, ++ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, ++ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, ++ .setup_fc = ixgbe_setup_fc_x550em, ++ .fc_autoneg = ixgbe_fc_autoneg, ++ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, ++ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, ++}; ++ ++static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { ++ X550_COMMON_MAC ++ .led_on = ixgbe_led_on_generic, ++ .led_off = ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, +@@ -2984,6 +3958,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, ++ .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, + }; +@@ -3017,12 +3992,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .setup_link = &ixgbe_setup_phy_link_generic, \ +- .set_phy_power = NULL, \ +- .check_overtemp = &ixgbe_tn_check_overtemp, \ +- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, ++ .set_phy_power = NULL, + + static const struct ixgbe_phy_operations phy_ops_X550 = { + X550_COMMON_PHY ++ .check_overtemp = &ixgbe_tn_check_overtemp, + .init = NULL, + .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, +@@ -3031,19 +4005,27 @@ static const struct ixgbe_phy_operations phy_ops_X550 = { + + static const struct ixgbe_phy_operations phy_ops_X550EM_x = { + X550_COMMON_PHY ++ .check_overtemp = &ixgbe_tn_check_overtemp, + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, +- .read_i2c_combined = &ixgbe_read_i2c_combined_generic, +- .write_i2c_combined = &ixgbe_write_i2c_combined_generic, +- .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, +- .write_i2c_combined_unlocked = +- &ixgbe_write_i2c_combined_generic_unlocked, ++}; ++ ++static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { ++ X550_COMMON_PHY ++ .check_overtemp = NULL, ++ .init = ixgbe_init_phy_ops_X550em, ++ .identify = ixgbe_identify_phy_x550em, ++ .read_reg = NULL, ++ .write_reg = NULL, ++ .read_reg_mdi = NULL, ++ .write_reg_mdi = NULL, + }; + + static const struct ixgbe_phy_operations phy_ops_x550em_a = { + X550_COMMON_PHY ++ .check_overtemp = &ixgbe_tn_check_overtemp, + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_x550a, +@@ -3052,6 +4034,31 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = { + .write_reg_mdi = &ixgbe_write_phy_reg_mdi, + }; + ++static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { ++ X550_COMMON_PHY ++ .check_overtemp = ixgbe_check_overtemp_fw, ++ .init = ixgbe_init_phy_ops_X550em, ++ .identify = ixgbe_identify_phy_fw, ++#if 0 /* Declare C22 MDI directly access functions. By hilbert */ ++ .read_reg = NULL, ++ .write_reg = NULL, ++ .read_reg_mdi = NULL, ++ .write_reg_mdi = NULL, ++#else ++ .read_reg = &ixgbe_read_phy_reg_x550a, ++ .write_reg = &ixgbe_write_phy_reg_x550a, ++ .read_reg_mdi = &ixgbe_read_phy_reg_mdio, ++ .write_reg_mdi = &ixgbe_write_phy_reg_mdio, ++#endif ++}; ++ ++static const struct ixgbe_link_operations link_ops_x550em_x = { ++ .read_link = &ixgbe_read_i2c_combined_generic, ++ .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, ++ .write_link = &ixgbe_write_i2c_combined_generic, ++ .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked, ++}; ++ + static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550) + }; +@@ -3082,14 +4089,35 @@ const struct ixgbe_info ixgbe_X550EM_x_info = { + .phy_ops = &phy_ops_X550EM_x, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550EM_x, ++ .link_ops = &link_ops_x550em_x, ++}; ++ ++const struct ixgbe_info ixgbe_x550em_x_fw_info = { ++ .mac = ixgbe_mac_X550EM_x, ++ .get_invariants = ixgbe_get_invariants_X550_x_fw, ++ .mac_ops = &mac_ops_X550EM_x_fw, ++ .eeprom_ops = &eeprom_ops_X550EM_x, ++ .phy_ops = &phy_ops_x550em_x_fw, ++ .mbx_ops = &mbx_ops_generic, ++ .mvals = ixgbe_mvals_X550EM_x, + }; + + const struct ixgbe_info ixgbe_x550em_a_info = { + .mac = ixgbe_mac_x550em_a, +- .get_invariants = &ixgbe_get_invariants_X550_x, ++ .get_invariants = &ixgbe_get_invariants_X550_a, + .mac_ops = &mac_ops_x550em_a, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, + }; ++ ++const struct ixgbe_info ixgbe_x550em_a_fw_info = { ++ .mac = ixgbe_mac_x550em_a, ++ .get_invariants = ixgbe_get_invariants_X550_a_fw, ++ .mac_ops = &mac_ops_x550em_a_fw, ++ .eeprom_ops = &eeprom_ops_X550EM_x, ++ .phy_ops = &phy_ops_x550em_a_fw, ++ .mbx_ops = &mbx_ops_generic, ++ .mvals = ixgbe_mvals_x550em_a, ++}; +-- +2.7.4 + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/Makefile b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/Makefile new file mode 100644 index 000000000000..9a4cb381bda8 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/Makefile @@ -0,0 +1 @@ +obj-m:=pegatron_fn_6254_dn_f_cpld.o pegatron_hwmon_mcu.o pegatron_fn_6254_dn_f_sfp.o diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_cpld.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_cpld.c new file mode 100644 index 000000000000..c8095b21151d --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_cpld.c @@ -0,0 +1,1133 @@ +/* + * A CPLD driver for the fn_6254_dn_f + * + * Copyright (C) 2018 Pegatron Corporation. + * Peter5_Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pegatron_fn_6254_dn_f_DEBUG +#ifdef pegatron_fn_6254_dn_f_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ + +#define CPLD_SFP_MAX_GROUP 3 +#define SFP_PORT_MAX_NUM 54 +#define SFP_EEPROM_SIZE 256 +#define QSFP_FIRST_PORT 48 +#define CPLDA_SFP_NUM 24 +#define CPLDB_SFP_NUM 12 +#define CPLDC_SFP_NUM 18 +#define CPLDA_ADDRESS 0x74 +#define CPLDB_ADDRESS 0x75 +#define CPLDC_ADDRESS 0x76 +#define CPLD_VERSION_REG 0x0 +#define SYNC_CONTROL_REG 0x1 +#define CPLD_SYS_PWR_LED_REG 0xD +#define CPLD_LOC_FAN_LED_REG 0xE +#define CPLD_EEPROM_WRITE_REG 0x12 +#define CPLD_PSU_REG 0x15 +#define SFP_13_36_SCL_BASE 0x4 +#define SFP_1_12_SCL_BASE 0x2 +#define SFP_37_54_SCL_BASE 0x5 +#define SFP_13_36_STATUS_BASE 0x8 +#define SFP_1_12_STATUS_BASE 0x5 +#define SFP_37_54_STATUS_BASE 0x9 +#define QSFP_PRESENT_ADDRESS 0xF +#define QSFP_RESET_ADDRESS_BASE 0x10 +#define QSFP_MODSELN_ADDRESS 0x17 +#define QSFP_LOW_POWER_ADDRESS 0x18 +#define CPLD_SERIAL_LED_BIT 2 +#define CPLD_EEPROM_WRITE_BIT 2 +#define SFP_PRESENT_BASE 0 +#define SFP_RXLOSS_BASE 1 +#define SFP_TXFAULT_BASE 2 +#define SFP_TXDISABLE_BASE 3 +#define CPLD_PSU_PWOK_BASE 0 +#define CPLD_PSU_PRESENT_BASE 2 +#define GET_BIT(data, bit, value) value = (data >> bit) & 0x1 +#define SET_BIT(data, bit) data |= (1 << bit) +#define CLEAR_BIT(data, bit) data &= ~(1 << bit) + +static LIST_HEAD(cpld_client_list); +static struct mutex list_lock; +/* Addresses scanned for pegatron_fn_6254_dn_f_cpld + */ +static const unsigned short normal_i2c[] = { CPLDA_ADDRESS, CPLDB_ADDRESS, CPLDC_ADDRESS, I2C_CLIENT_END }; + +struct cpld_client_node { + struct i2c_client *client; + struct list_head list; +}; + +int pegatron_fn_6254_dn_f_cpld_read(unsigned short addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int data = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == addr) { + data = i2c_smbus_read_byte_data(cpld_node->client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, addr, reg, data)); + break; + } + } + + mutex_unlock(&list_lock); + + return data; +} +EXPORT_SYMBOL(pegatron_fn_6254_dn_f_cpld_read); + +int pegatron_fn_6254_dn_f_cpld_write(unsigned short addr, u8 reg, u8 val) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EIO; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == addr) { + ret = i2c_smbus_write_byte_data(cpld_node->client, reg, val); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, addr, reg, val)); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(pegatron_fn_6254_dn_f_cpld_write); + +static ssize_t read_cpld_HWversion(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_VERSION_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%02x\n", (data >> 5) & 0x7); +} + +static ssize_t read_cpld_SWversion(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_VERSION_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + + return sprintf(buf, "%02x\n", (data & 0x1f)); +} + +static ssize_t show_allled_ctrl(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = SYNC_CONTROL_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data &= 0x3; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_allled_ctrl(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = SYNC_CONTROL_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + data = val | (data & 0xfc); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t show_serial_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = SYNC_CONTROL_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, CPLD_SERIAL_LED_BIT, val); + + return sprintf(buf, "%02x\n", val); +} + +static ssize_t set_serial_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = SYNC_CONTROL_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + if(val) + SET_BIT(data, CPLD_SERIAL_LED_BIT); + else + CLEAR_BIT(data, CPLD_SERIAL_LED_BIT); + + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t show_sys_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_SYS_PWR_LED_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (data >> 5) & 0x7; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_sys_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_SYS_PWR_LED_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + data = (val << 5) | (data & 0x1f); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} +static ssize_t show_pwr_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_SYS_PWR_LED_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (data >> 2) & 0x7; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_pwr_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_SYS_PWR_LED_REG; + long val = 0; + + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + data = (val << 2) | (data & 0xe3); + + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} +static ssize_t show_loc_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_LOC_FAN_LED_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (data>>4) & 0x3; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_loc_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_LOC_FAN_LED_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + data = (val << 4) | (data & 0xf); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t show_fan_led(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_LOC_FAN_LED_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data &= 0x7; + + return sprintf(buf, "%02x\n", data); +} + +static ssize_t set_fan_led(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_LOC_FAN_LED_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + data = val | (data & 0xf8); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t show_eeprom_write_enable(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = CPLD_EEPROM_WRITE_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, reg, val); + + return sprintf(buf, "%02x\n", val); +} + +static ssize_t set_eeprom_write_enable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = CPLD_EEPROM_WRITE_REG; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + if(val) + SET_BIT(data, CPLD_EEPROM_WRITE_BIT); + else + CLEAR_BIT(data, CPLD_EEPROM_WRITE_BIT); + + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t read_psu_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = CPLD_PSU_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (CPLD_PSU_PRESENT_BASE + attr->index), val); + + return sprintf(buf, "%02x\n", val); +} + +static ssize_t read_psu_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val=0, reg = CPLD_PSU_REG; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (CPLD_PSU_PWOK_BASE + attr->index), val); + + return sprintf(buf, "%02x\n", val); +} + +#define GET_SFP_STATUS_ADDRESS(idx, reg) \ + if(idx < CPLDB_SFP_NUM) \ + reg = SFP_1_12_STATUS_BASE + (idx / 2); \ + else if(idx < CPLDA_SFP_NUM + CPLDB_SFP_NUM) \ + reg = SFP_13_36_STATUS_BASE + ((idx-CPLDB_SFP_NUM) / 2); \ + else \ + reg = SFP_37_54_STATUS_BASE + ((idx-CPLDB_SFP_NUM-CPLDA_SFP_NUM) / 2) + +static ssize_t get_sfp_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0, val = 0; + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SFP_PRESENT_BASE + 4*(attr->index % 2), val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_sfp_tx_disable(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0, val = 0; + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SFP_TXDISABLE_BASE + 4*(attr->index % 2), val); + + return sprintf(buf, "%d\n", val); +} +static ssize_t set_sfp_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + + if(val) + SET_BIT(data, SFP_TXDISABLE_BASE + 4*(attr->index % 2)); + else + CLEAR_BIT(data, SFP_TXDISABLE_BASE + 4*(attr->index % 2)); + + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} +static ssize_t get_sfp_rx_loss(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0, val = 0; + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SFP_RXLOSS_BASE + 4*(attr->index % 2), val); + + return sprintf(buf, "%d\n", val); +} +static ssize_t get_sfp_tx_fault(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = 0, data = 0, val = 0; + + GET_SFP_STATUS_ADDRESS(attr->index, reg); + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, SFP_TXFAULT_BASE + 4*(attr->index % 2), val); + + return sprintf(buf, "%d\n",val); +} + +static ssize_t get_qsfp_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = QSFP_PRESENT_ADDRESS; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (attr->index % QSFP_FIRST_PORT), val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_qsfp_reset(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = (QSFP_RESET_ADDRESS_BASE + attr->index % QSFP_FIRST_PORT / 4), data =0; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + data = (data >> ((attr->index % QSFP_FIRST_PORT % 4)*2)) & 0x3; + + return sprintf(buf, "%d\n", data); +} + +static ssize_t set_qsfp_reset(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 reg = (QSFP_RESET_ADDRESS_BASE + attr->index % QSFP_FIRST_PORT / 4), data = 0; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + CLEAR_BIT(data, (attr->index % 4)*2); + CLEAR_BIT(data, (attr->index % 4)*2+1); + data |= (val & 0x3) << ((attr->index % QSFP_FIRST_PORT % 4)*2); + + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t get_qsfp_lowpower(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = QSFP_LOW_POWER_ADDRESS; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (attr->index % QSFP_FIRST_PORT), val); + return sprintf(buf, "%02x\n", val); +} + +static ssize_t set_qsfp_lowpower(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = QSFP_LOW_POWER_ADDRESS; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + if(val) + SET_BIT(data, (attr->index % QSFP_FIRST_PORT)); + else + CLEAR_BIT(data, (attr->index % QSFP_FIRST_PORT)); + + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} + +static ssize_t get_qsfp_modeseln(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0, reg = QSFP_MODSELN_ADDRESS; + + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, (attr->index % QSFP_FIRST_PORT), val); + return sprintf(buf, "%02x\n", val); +} + +static ssize_t set_qsfp_modeseln(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = QSFP_MODSELN_ADDRESS; + long val = 0; + + if (kstrtol(buf, 16, &val)) + { + return -EINVAL; + } + data = pegatron_fn_6254_dn_f_cpld_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + if(val) + SET_BIT(data, (attr->index % QSFP_FIRST_PORT)); + else + CLEAR_BIT(data, (attr->index % QSFP_FIRST_PORT)); + + pegatron_fn_6254_dn_f_cpld_write(client->addr, reg, data); + + return count; +} + +static SENSOR_DEVICE_ATTR(cpld_hw_version, S_IRUGO, read_cpld_HWversion, NULL, 0); +static SENSOR_DEVICE_ATTR(cpld_sw_version, S_IRUGO, read_cpld_SWversion, NULL, 0); +static SENSOR_DEVICE_ATTR(cpld_allled_ctrl, S_IRUGO | S_IWUSR, show_allled_ctrl, set_allled_ctrl, 0); +static SENSOR_DEVICE_ATTR(serial_led_enable, S_IRUGO | S_IWUSR, show_serial_led, set_serial_led, 0); +static SENSOR_DEVICE_ATTR(sys_led, S_IRUGO | S_IWUSR, show_sys_led, set_sys_led, 0); +static SENSOR_DEVICE_ATTR(pwr_led, S_IRUGO | S_IWUSR, show_pwr_led, set_pwr_led, 0); +static SENSOR_DEVICE_ATTR(loc_led, S_IRUGO | S_IWUSR, show_loc_led, set_loc_led, 0); +static SENSOR_DEVICE_ATTR(fan_led, S_IRUGO | S_IWUSR, show_fan_led, set_fan_led, 0); +static SENSOR_DEVICE_ATTR(eeprom_write_enable, S_IRUGO | S_IWUSR, show_eeprom_write_enable, set_eeprom_write_enable, 0); +static SENSOR_DEVICE_ATTR(psu_1_present, S_IRUGO, read_psu_present, NULL, 1); +static SENSOR_DEVICE_ATTR(psu_2_present, S_IRUGO, read_psu_present, NULL, 0); +static SENSOR_DEVICE_ATTR(psu_1_status, S_IRUGO, read_psu_status, NULL, 1); +static SENSOR_DEVICE_ATTR(psu_2_status, S_IRUGO, read_psu_status, NULL, 0); + +#define SET_SFP_ATTR(_num) \ + static SENSOR_DEVICE_ATTR(sfp##_num##_present, S_IRUGO, get_sfp_present, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_tx_disable, S_IRUGO | S_IWUSR, get_sfp_tx_disable, set_sfp_tx_disable, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_rx_loss, S_IRUGO, get_sfp_rx_loss, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_tx_fault, S_IRUGO, get_sfp_tx_fault, NULL, _num-1) + +#define SET_QSFP_ATTR(_num) \ + static SENSOR_DEVICE_ATTR(sfp##_num##_present, S_IRUGO, get_qsfp_present, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_reset, S_IRUGO | S_IWUSR, get_qsfp_reset, set_qsfp_reset, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_lowpower, S_IRUGO | S_IWUSR, get_qsfp_lowpower, set_qsfp_lowpower, _num-1); \ + static SENSOR_DEVICE_ATTR(sfp##_num##_modeseln, S_IRUGO | S_IWUSR, get_qsfp_modeseln, set_qsfp_modeseln, _num-1) + +SET_SFP_ATTR(1);SET_SFP_ATTR(2);SET_SFP_ATTR(3);SET_SFP_ATTR(4);SET_SFP_ATTR(5);SET_SFP_ATTR(6);SET_SFP_ATTR(7);SET_SFP_ATTR(8);SET_SFP_ATTR(9); +SET_SFP_ATTR(10);SET_SFP_ATTR(11);SET_SFP_ATTR(12);SET_SFP_ATTR(13);SET_SFP_ATTR(14);SET_SFP_ATTR(15);SET_SFP_ATTR(16);SET_SFP_ATTR(17);SET_SFP_ATTR(18); +SET_SFP_ATTR(19);SET_SFP_ATTR(20);SET_SFP_ATTR(21);SET_SFP_ATTR(22);SET_SFP_ATTR(23);SET_SFP_ATTR(24);SET_SFP_ATTR(25);SET_SFP_ATTR(26);SET_SFP_ATTR(27); +SET_SFP_ATTR(28);SET_SFP_ATTR(29);SET_SFP_ATTR(30);SET_SFP_ATTR(31);SET_SFP_ATTR(32);SET_SFP_ATTR(33);SET_SFP_ATTR(34);SET_SFP_ATTR(35);SET_SFP_ATTR(36); +SET_SFP_ATTR(37);SET_SFP_ATTR(38);SET_SFP_ATTR(39);SET_SFP_ATTR(40);SET_SFP_ATTR(41);SET_SFP_ATTR(42);SET_SFP_ATTR(43);SET_SFP_ATTR(44);SET_SFP_ATTR(45); +SET_SFP_ATTR(46);SET_SFP_ATTR(47);SET_SFP_ATTR(48); +SET_QSFP_ATTR(49);SET_QSFP_ATTR(50);SET_QSFP_ATTR(51);SET_QSFP_ATTR(52);SET_QSFP_ATTR(53);SET_QSFP_ATTR(54); + +static struct attribute *pegatron_fn_6254_dn_f_cpldA_attributes[] = { + &sensor_dev_attr_cpld_hw_version.dev_attr.attr, + &sensor_dev_attr_cpld_sw_version.dev_attr.attr, + + &sensor_dev_attr_sfp13_present.dev_attr.attr, + &sensor_dev_attr_sfp13_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp13_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp13_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp14_present.dev_attr.attr, + &sensor_dev_attr_sfp14_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp14_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp14_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp15_present.dev_attr.attr, + &sensor_dev_attr_sfp15_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp15_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp15_tx_fault.dev_attr.attr, + + + &sensor_dev_attr_sfp16_present.dev_attr.attr, + &sensor_dev_attr_sfp16_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp16_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp16_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp17_present.dev_attr.attr, + &sensor_dev_attr_sfp17_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp17_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp17_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp18_present.dev_attr.attr, + &sensor_dev_attr_sfp18_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp18_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp18_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp19_present.dev_attr.attr, + &sensor_dev_attr_sfp19_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp19_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp19_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp20_present.dev_attr.attr, + &sensor_dev_attr_sfp20_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp20_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp20_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp21_present.dev_attr.attr, + &sensor_dev_attr_sfp21_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp21_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp21_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp22_present.dev_attr.attr, + &sensor_dev_attr_sfp22_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp22_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp22_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp23_present.dev_attr.attr, + &sensor_dev_attr_sfp23_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp23_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp23_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp24_present.dev_attr.attr, + &sensor_dev_attr_sfp24_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp24_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp24_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp25_present.dev_attr.attr, + &sensor_dev_attr_sfp25_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp25_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp25_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp26_present.dev_attr.attr, + &sensor_dev_attr_sfp26_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp26_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp26_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp27_present.dev_attr.attr, + &sensor_dev_attr_sfp27_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp27_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp27_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp28_present.dev_attr.attr, + &sensor_dev_attr_sfp28_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp28_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp28_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp29_present.dev_attr.attr, + &sensor_dev_attr_sfp29_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp29_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp29_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp30_present.dev_attr.attr, + &sensor_dev_attr_sfp30_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp30_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp30_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp31_present.dev_attr.attr, + &sensor_dev_attr_sfp31_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp31_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp31_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp32_present.dev_attr.attr, + &sensor_dev_attr_sfp32_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp32_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp32_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp33_present.dev_attr.attr, + &sensor_dev_attr_sfp33_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp33_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp33_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp34_present.dev_attr.attr, + &sensor_dev_attr_sfp34_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp34_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp34_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp35_present.dev_attr.attr, + &sensor_dev_attr_sfp35_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp35_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp35_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp36_present.dev_attr.attr, + &sensor_dev_attr_sfp36_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp36_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp36_tx_fault.dev_attr.attr, + + NULL +}; + +static struct attribute *pegatron_fn_6254_dn_f_cpldB_attributes[] = { + &sensor_dev_attr_cpld_hw_version.dev_attr.attr, + &sensor_dev_attr_cpld_sw_version.dev_attr.attr, + &sensor_dev_attr_cpld_allled_ctrl.dev_attr.attr, + &sensor_dev_attr_serial_led_enable.dev_attr.attr, + &sensor_dev_attr_sys_led.dev_attr.attr, + &sensor_dev_attr_pwr_led.dev_attr.attr, + &sensor_dev_attr_loc_led.dev_attr.attr, + &sensor_dev_attr_fan_led.dev_attr.attr, + &sensor_dev_attr_eeprom_write_enable.dev_attr.attr, + &sensor_dev_attr_psu_1_present.dev_attr.attr, + &sensor_dev_attr_psu_2_present.dev_attr.attr, + &sensor_dev_attr_psu_1_status.dev_attr.attr, + &sensor_dev_attr_psu_2_status.dev_attr.attr, + + &sensor_dev_attr_sfp1_present.dev_attr.attr, + &sensor_dev_attr_sfp1_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp1_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp1_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp2_present.dev_attr.attr, + &sensor_dev_attr_sfp2_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp2_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp2_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp3_present.dev_attr.attr, + &sensor_dev_attr_sfp3_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp3_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp3_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp4_present.dev_attr.attr, + &sensor_dev_attr_sfp4_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp4_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp4_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp5_present.dev_attr.attr, + &sensor_dev_attr_sfp5_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp5_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp5_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp6_present.dev_attr.attr, + &sensor_dev_attr_sfp6_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp6_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp6_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp7_present.dev_attr.attr, + &sensor_dev_attr_sfp7_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp7_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp7_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp8_present.dev_attr.attr, + &sensor_dev_attr_sfp8_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp8_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp8_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp9_present.dev_attr.attr, + &sensor_dev_attr_sfp9_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp9_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp9_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp10_present.dev_attr.attr, + &sensor_dev_attr_sfp10_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp10_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp10_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp11_present.dev_attr.attr, + &sensor_dev_attr_sfp11_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp11_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp11_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp12_present.dev_attr.attr, + &sensor_dev_attr_sfp12_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp12_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp12_tx_fault.dev_attr.attr, + NULL +}; + +static struct attribute *pegatron_fn_6254_dn_f_cpldC_attributes[] = { + &sensor_dev_attr_cpld_hw_version.dev_attr.attr, + &sensor_dev_attr_cpld_sw_version.dev_attr.attr, + + &sensor_dev_attr_sfp37_present.dev_attr.attr, + &sensor_dev_attr_sfp37_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp37_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp37_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp38_present.dev_attr.attr, + &sensor_dev_attr_sfp38_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp38_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp38_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp39_present.dev_attr.attr, + &sensor_dev_attr_sfp39_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp39_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp39_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp40_present.dev_attr.attr, + &sensor_dev_attr_sfp40_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp40_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp40_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp41_present.dev_attr.attr, + &sensor_dev_attr_sfp41_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp41_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp41_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp42_present.dev_attr.attr, + &sensor_dev_attr_sfp42_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp42_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp42_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp43_present.dev_attr.attr, + &sensor_dev_attr_sfp43_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp43_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp43_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp44_present.dev_attr.attr, + &sensor_dev_attr_sfp44_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp44_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp44_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp45_present.dev_attr.attr, + &sensor_dev_attr_sfp45_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp45_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp45_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp46_present.dev_attr.attr, + &sensor_dev_attr_sfp46_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp46_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp46_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp47_present.dev_attr.attr, + &sensor_dev_attr_sfp47_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp47_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp47_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp48_present.dev_attr.attr, + &sensor_dev_attr_sfp48_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp48_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp48_tx_fault.dev_attr.attr, + + &sensor_dev_attr_sfp49_present.dev_attr.attr, + &sensor_dev_attr_sfp49_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp49_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp49_reset.dev_attr.attr, + + &sensor_dev_attr_sfp50_present.dev_attr.attr, + &sensor_dev_attr_sfp50_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp50_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp50_reset.dev_attr.attr, + + &sensor_dev_attr_sfp51_present.dev_attr.attr, + &sensor_dev_attr_sfp51_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp51_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp51_reset.dev_attr.attr, + + &sensor_dev_attr_sfp52_present.dev_attr.attr, + &sensor_dev_attr_sfp52_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp52_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp52_reset.dev_attr.attr, + + &sensor_dev_attr_sfp53_present.dev_attr.attr, + &sensor_dev_attr_sfp53_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp53_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp53_reset.dev_attr.attr, + + &sensor_dev_attr_sfp54_present.dev_attr.attr, + &sensor_dev_attr_sfp54_lowpower.dev_attr.attr, + &sensor_dev_attr_sfp54_modeseln.dev_attr.attr, + &sensor_dev_attr_sfp54_reset.dev_attr.attr, + NULL +}; + +static const struct attribute_group pegatron_fn_6254_dn_f_cpldA_group = { .attrs = pegatron_fn_6254_dn_f_cpldA_attributes}; +static const struct attribute_group pegatron_fn_6254_dn_f_cpldB_group = { .attrs = pegatron_fn_6254_dn_f_cpldB_attributes}; +static const struct attribute_group pegatron_fn_6254_dn_f_cpldC_group = { .attrs = pegatron_fn_6254_dn_f_cpldC_attributes}; + +static void pegatron_fn_6254_dn_f_cpld_add_client(struct i2c_client *client) +{ + struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &cpld_client_list); + mutex_unlock(&list_lock); +} + +static void pegatron_fn_6254_dn_f_cpld_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(cpld_node); + } + + mutex_unlock(&list_lock); +} + +static int pegatron_fn_6254_dn_f_cpld_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_dbg(&client->dev, "i2c_check_functionality failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + } + + /* Register sysfs hooks */ + switch(client->addr) + { + case CPLDA_ADDRESS: + status = sysfs_create_group(&client->dev.kobj, &pegatron_fn_6254_dn_f_cpldA_group); + break; + case CPLDB_ADDRESS: + status = sysfs_create_group(&client->dev.kobj, &pegatron_fn_6254_dn_f_cpldB_group); + break; + case CPLDC_ADDRESS: + status = sysfs_create_group(&client->dev.kobj, &pegatron_fn_6254_dn_f_cpldC_group); + break; + default: + dev_dbg(&client->dev, "i2c_check_CPLD failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + break; + } + + if (status) { + goto exit; + } + + dev_info(&client->dev, "chip found\n"); + pegatron_fn_6254_dn_f_cpld_add_client(client); + + return 0; + +exit: + return status; +} + +static int pegatron_fn_6254_dn_f_cpld_remove(struct i2c_client *client) +{ + switch(client->addr) + { + case CPLDA_ADDRESS: + sysfs_remove_group(&client->dev.kobj, &pegatron_fn_6254_dn_f_cpldA_group); + break; + case CPLDB_ADDRESS: + sysfs_remove_group(&client->dev.kobj, &pegatron_fn_6254_dn_f_cpldB_group); + break; + case CPLDC_ADDRESS: + sysfs_remove_group(&client->dev.kobj, &pegatron_fn_6254_dn_f_cpldC_group); + break; + default: + dev_dbg(&client->dev, "i2c_remove_CPLD failed (0x%x)\n", client->addr); + break; + } + + + pegatron_fn_6254_dn_f_cpld_remove_client(client); + return 0; +} + +static const struct i2c_device_id pegatron_fn_6254_dn_f_cpld_id[] = { + { "fn_6254_dn_f_cpld", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, pegatron_fn_6254_dn_f_cpld_id); + +static struct i2c_driver pegatron_fn_6254_dn_f_cpld_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "pegatron_fn_6254_dn_f_cpld", + }, + .probe = pegatron_fn_6254_dn_f_cpld_probe, + .remove = pegatron_fn_6254_dn_f_cpld_remove, + .id_table = pegatron_fn_6254_dn_f_cpld_id, + .address_list = normal_i2c, +}; + +static int __init pegatron_fn_6254_dn_f_cpld_init(void) +{ + mutex_init(&list_lock); + + return i2c_add_driver(&pegatron_fn_6254_dn_f_cpld_driver); +} + +static void __exit pegatron_fn_6254_dn_f_cpld_exit(void) +{ + i2c_del_driver(&pegatron_fn_6254_dn_f_cpld_driver); +} + +MODULE_AUTHOR("Peter5 Lin "); +MODULE_DESCRIPTION("pegatron_fn_6254_dn_f_cpld driver"); +MODULE_LICENSE("GPL"); + +module_init(pegatron_fn_6254_dn_f_cpld_init); +module_exit(pegatron_fn_6254_dn_f_cpld_exit); diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_sfp.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_sfp.c new file mode 100644 index 000000000000..c9a5d576ce71 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_sfp.c @@ -0,0 +1,431 @@ +/* + * A SFP driver for the fn_6254_dn_f platform + * + * Copyright (C) 2018 Pegatron Corporation. + * Peter5_Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef PEGA_DEBUG +/*#define PEGA_DEBUG*/ +#ifdef PEGA_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ + +#define SFP_EEPROM_SIZE 256 +#define SFP_EEPROM_A0_ADDR 0x50 +#define SFP_EEPROM_A2_ADDR 0x51 +#define SFP_EEPROM_BUS_TYPE I2C_SMBUS_I2C_BLOCK_DATA +#define CPLDA_SFP_NUM 24 +#define CPLDB_SFP_NUM 12 +#define CPLDC_SFP_NUM 18 +#define CPLDA_ADDRESS 0x74 +#define CPLDB_ADDRESS 0x75 +#define CPLDC_ADDRESS 0x76 +#define SFP_13_36_SCL_BASE 0x4 +#define SFP_1_12_SCL_BASE 0x2 +#define SFP_37_54_SCL_BASE 0x5 +#define QSFP_I2C_ENABLE_BASE 0x17 +#define GET_BIT(data, bit, value) value = (data >> bit) & 0x1 +#define SET_BIT(data, bit) data |= (1 << bit) +#define CLEAR_BIT(data, bit) data &= ~(1 << bit) + +enum cpld_croups { cpld_group_a, cpld_group_b, cpld_group_c}; + +static const unsigned short normal_i2c[] = { SFP_EEPROM_A0_ADDR, SFP_EEPROM_A2_ADDR, I2C_CLIENT_END }; +static char SFP_CPLD_GROUPA_MAPPING[CPLDA_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPB_MAPPING[CPLDB_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; + +/* + * This parameter is to help this driver avoid blocking other drivers out + * of I2C for potentially troublesome amounts of time. With a 100 kHz I2C + * clock, one 256 byte read takes about 1/43 second which is excessive; + * but the 1/170 second it takes at 400 kHz may be quite reasonable; and + * at 1 MHz (Fm+) a 1/430 second delay could easily be invisible. + * + * This value is forced to be a power of two so that writes align on pages. + */ +static unsigned io_limit = 128; +module_param(io_limit, uint, 0); +MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)"); + +/* + * Specs often allow 5 msec for a page write, sometimes 20 msec; + * it's important to recover from write timeouts. + */ +static unsigned write_timeout = 25; +module_param(write_timeout, uint, 0); +MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)"); + + +struct fn_6254_dn_f_sfp_data { + struct mutex lock; + struct bin_attribute bin; + int use_smbus; + kernel_ulong_t driver_data; + + struct i2c_client *client; +}; + +extern int pegatron_fn_6254_dn_f_cpld_read(unsigned short cpld_addr, u8 reg); +extern int pegatron_fn_6254_dn_f_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +static ssize_t fn_6254_dn_f_sfp_eeprom_read(struct fn_6254_dn_f_sfp_data *data, char *buf, + unsigned offset, size_t count) +{ + struct i2c_msg msg[2]; + u8 msgbuf[2]; + struct i2c_client *client = data->client; + unsigned long timeout, read_time; + int status; + + memset(msg, 0, sizeof(msg)); + + if (count > io_limit) + count = io_limit; + + /* Smaller eeproms can work given some SMBus extension calls */ + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + + /* + * Reads fail if the previous write didn't complete yet. We may + * loop a few times until this one succeeds, waiting at least + * long enough for one entire page write to work. + */ + timeout = jiffies + msecs_to_jiffies(write_timeout); + do { + read_time = jiffies; + switch (data->use_smbus) { + case I2C_SMBUS_I2C_BLOCK_DATA: + status = i2c_smbus_read_i2c_block_data(client, offset, + count, buf); + break; + case I2C_SMBUS_WORD_DATA: + status = i2c_smbus_read_word_data(client, offset); + if (status >= 0) { + buf[0] = status & 0xff; + if (count == 2) + buf[1] = status >> 8; + status = count; + } + break; + case I2C_SMBUS_BYTE_DATA: + status = i2c_smbus_read_byte_data(client, offset); + if (status >= 0) { + buf[0] = status; + status = count; + } + break; + default: + status = i2c_transfer(client->adapter, msg, 2); + if (status == 2) + status = count; + } + dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", + count, offset, status, jiffies); + + if (status == count) + return count; + + /* REVISIT: at HZ=100, this is sloooow */ + msleep(1); + } while (time_before(read_time, timeout)); + + return -ETIMEDOUT; +} + +static ssize_t fn_6254_dn_f_sfp_read(struct fn_6254_dn_f_sfp_data *data, + char *buf, loff_t off, size_t count) +{ + ssize_t retval = 0; + + if (unlikely(!count)) + return count; + + /* + * Read data from chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&data->lock); + + while (count) { + ssize_t status; + + status = fn_6254_dn_f_sfp_eeprom_read(data, buf, off, count); + if (status <= 0) { + if (retval == 0) + retval = status; + break; + } + buf += status; + off += status; + count -= status; + retval += status; + } + + mutex_unlock(&data->lock); + + return retval; +} + +static ssize_t +fn_6254_dn_f_sfp_bin_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + int i; + u8 cpldData = 0; + struct fn_6254_dn_f_sfp_data *data; + + /*SFP 1-12*/ + for(i=0; iattr.name, SFP_CPLD_GROUPB_MAPPING[i])) + { + pegatron_fn_6254_dn_f_cpld_write(CPLDB_ADDRESS, SFP_1_12_SCL_BASE, i+1); + goto check_done; + } + } + /*SFP 13-36*/ + for(i=0; iattr.name, SFP_CPLD_GROUPA_MAPPING[i])) + { + pegatron_fn_6254_dn_f_cpld_write(CPLDA_ADDRESS, SFP_13_36_SCL_BASE, i+1); + goto check_done; + } + } + + /*SFP 37-54*/ + for(i=0; iattr.name, SFP_CPLD_GROUPC_MAPPING[i])) + { + /* Enable QSFP i2c function */ + if(i >= 12) + { + cpldData = 0xff; + cpldData = pegatron_fn_6254_dn_f_cpld_read(CPLDC_ADDRESS, QSFP_I2C_ENABLE_BASE); + CLEAR_BIT(cpldData, i-12); + pegatron_fn_6254_dn_f_cpld_write(CPLDC_ADDRESS, QSFP_I2C_ENABLE_BASE, cpldData); + } + pegatron_fn_6254_dn_f_cpld_write(CPLDC_ADDRESS, SFP_37_54_SCL_BASE, i+1); + goto check_done; + } + } + +check_done: + data = dev_get_drvdata(container_of(kobj, struct device, kobj)); + + return fn_6254_dn_f_sfp_read(data, buf, off, count); +} + +#define SFP_EEPROM_ATTR(_num) \ + static struct bin_attribute sfp##_num##_eeprom_attr = { \ + .attr = { \ + .name = __stringify(sfp##_num##_eeprom), \ + .mode = S_IRUGO\ + }, \ + .size = SFP_EEPROM_SIZE, \ + .read = fn_6254_dn_f_sfp_bin_read, \ + } + +SFP_EEPROM_ATTR(1);SFP_EEPROM_ATTR(2);SFP_EEPROM_ATTR(3);SFP_EEPROM_ATTR(4);SFP_EEPROM_ATTR(5);SFP_EEPROM_ATTR(6);SFP_EEPROM_ATTR(7);SFP_EEPROM_ATTR(8);SFP_EEPROM_ATTR(9); +SFP_EEPROM_ATTR(10);SFP_EEPROM_ATTR(11);SFP_EEPROM_ATTR(12);SFP_EEPROM_ATTR(13);SFP_EEPROM_ATTR(14);SFP_EEPROM_ATTR(15);SFP_EEPROM_ATTR(16);SFP_EEPROM_ATTR(17);SFP_EEPROM_ATTR(18); +SFP_EEPROM_ATTR(19);SFP_EEPROM_ATTR(20);SFP_EEPROM_ATTR(21);SFP_EEPROM_ATTR(22);SFP_EEPROM_ATTR(23);SFP_EEPROM_ATTR(24);SFP_EEPROM_ATTR(25);SFP_EEPROM_ATTR(26);SFP_EEPROM_ATTR(27); +SFP_EEPROM_ATTR(28);SFP_EEPROM_ATTR(29);SFP_EEPROM_ATTR(30);SFP_EEPROM_ATTR(31);SFP_EEPROM_ATTR(32);SFP_EEPROM_ATTR(33);SFP_EEPROM_ATTR(34);SFP_EEPROM_ATTR(35);SFP_EEPROM_ATTR(36); +SFP_EEPROM_ATTR(37);SFP_EEPROM_ATTR(38);SFP_EEPROM_ATTR(39);SFP_EEPROM_ATTR(40);SFP_EEPROM_ATTR(41);SFP_EEPROM_ATTR(42);SFP_EEPROM_ATTR(43);SFP_EEPROM_ATTR(44);SFP_EEPROM_ATTR(45); +SFP_EEPROM_ATTR(46);SFP_EEPROM_ATTR(47);SFP_EEPROM_ATTR(48);SFP_EEPROM_ATTR(49);SFP_EEPROM_ATTR(50);SFP_EEPROM_ATTR(51);SFP_EEPROM_ATTR(52);SFP_EEPROM_ATTR(53);SFP_EEPROM_ATTR(54); + +static struct bin_attribute *fn_6254_dn_f_cpldA_sfp_epprom_attributes[] = { + &sfp13_eeprom_attr, &sfp14_eeprom_attr, &sfp15_eeprom_attr, &sfp16_eeprom_attr, &sfp17_eeprom_attr, &sfp18_eeprom_attr, &sfp19_eeprom_attr, &sfp20_eeprom_attr, + &sfp21_eeprom_attr, &sfp22_eeprom_attr, &sfp23_eeprom_attr, &sfp24_eeprom_attr, &sfp25_eeprom_attr, &sfp26_eeprom_attr, &sfp27_eeprom_attr, &sfp28_eeprom_attr, + &sfp29_eeprom_attr, &sfp30_eeprom_attr, &sfp31_eeprom_attr, &sfp32_eeprom_attr, &sfp33_eeprom_attr, &sfp34_eeprom_attr, &sfp35_eeprom_attr, &sfp36_eeprom_attr, + NULL +}; + +static struct bin_attribute *fn_6254_dn_f_cpldB_sfp_epprom_attributes[] = { + &sfp1_eeprom_attr, &sfp2_eeprom_attr, &sfp3_eeprom_attr, &sfp4_eeprom_attr, &sfp5_eeprom_attr, &sfp6_eeprom_attr, &sfp7_eeprom_attr, &sfp8_eeprom_attr, + &sfp9_eeprom_attr, &sfp10_eeprom_attr, &sfp11_eeprom_attr, &sfp12_eeprom_attr, + NULL +}; + +static struct bin_attribute *fn_6254_dn_f_cpldC_sfp_epprom_attributes[] = { + &sfp37_eeprom_attr, &sfp38_eeprom_attr, &sfp39_eeprom_attr, &sfp40_eeprom_attr, &sfp41_eeprom_attr, &sfp42_eeprom_attr, &sfp43_eeprom_attr, &sfp44_eeprom_attr, + &sfp45_eeprom_attr, &sfp46_eeprom_attr, &sfp47_eeprom_attr, &sfp48_eeprom_attr, &sfp49_eeprom_attr, &sfp50_eeprom_attr, &sfp51_eeprom_attr, &sfp52_eeprom_attr, + &sfp53_eeprom_attr, &sfp54_eeprom_attr, + NULL +}; + +static const struct attribute_group fn_6254_dn_f_sfpA_group = { .bin_attrs = fn_6254_dn_f_cpldA_sfp_epprom_attributes}; +static const struct attribute_group fn_6254_dn_f_sfpB_group = { .bin_attrs = fn_6254_dn_f_cpldB_sfp_epprom_attributes}; +static const struct attribute_group fn_6254_dn_f_sfpC_group = { .bin_attrs = fn_6254_dn_f_cpldC_sfp_epprom_attributes}; + +static int fn_6254_dn_f_sfp_device_probe(struct i2c_client *client, const struct i2c_device_id *dev_id) +{ + int use_smbus = SFP_EEPROM_BUS_TYPE; + struct fn_6254_dn_f_sfp_data *data; + int err, i; + unsigned num_addresses; + kernel_ulong_t magic; + + data = kzalloc(sizeof(struct fn_6254_dn_f_sfp_data) , GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_init(&data->lock); + data->use_smbus = use_smbus; + /* + * Export the EEPROM bytes through sysfs, since that's convenient. + * By default, only root should see the data (maybe passwords etc) + */ + + data->client = client; + data->driver_data = dev_id->driver_data; + + sysfs_bin_attr_init(&data->bin); + + switch(dev_id->driver_data) + { + case cpld_group_a: + err = sysfs_create_group(&client->dev.kobj, &fn_6254_dn_f_sfpA_group); + if (err) + goto err_clients; + break; + case cpld_group_b: + err = sysfs_create_group(&client->dev.kobj, &fn_6254_dn_f_sfpB_group); + if (err) + goto err_clients; + break; + case cpld_group_c: + err = sysfs_create_group(&client->dev.kobj, &fn_6254_dn_f_sfpC_group); + if (err) + goto err_clients; + break; + default: + printk(KERN_ALERT "i2c_check_CPLD failed\n"); + err = -EIO; + break; + } + + i2c_set_clientdata(client, data); + + return 0; + +err_clients: + kfree(data); + return err; +} + +static int fn_6254_dn_f_sfp_device_remove(struct i2c_client *client) +{ + struct fn_6254_dn_f_sfp_data *data; + int i; + + data = i2c_get_clientdata(client); + + switch(data->driver_data) + { + case cpld_group_a: + sysfs_remove_group(&client->dev.kobj, &fn_6254_dn_f_sfpA_group); + break; + case cpld_group_b: + sysfs_remove_group(&client->dev.kobj, &fn_6254_dn_f_sfpB_group); + break; + case cpld_group_c: + sysfs_remove_group(&client->dev.kobj, &fn_6254_dn_f_sfpC_group); + break; + default: + dev_dbg(&client->dev, "i2c_remove_CPLD failed (0x%x)\n", client->addr); + break; + } + + + return 0; +} + +static const struct i2c_device_id fn_6254_dn_f_sfp_id[] = { + { "fn_6254_dn_f_sfpA", cpld_group_a }, + { "fn_6254_dn_f_sfpB", cpld_group_b }, + { "fn_6254_dn_f_sfpC", cpld_group_c }, + {} +}; +MODULE_DEVICE_TABLE(i2c, fn_6254_dn_f_sfp_id); + +static struct i2c_driver fn_6254_dn_f_sfp_driver = { + .driver = { + .name = "pegatron_fn_6254_dn_f_sfp", + }, + .probe = fn_6254_dn_f_sfp_device_probe, + .remove = fn_6254_dn_f_sfp_device_remove, + .id_table = fn_6254_dn_f_sfp_id, + .address_list = normal_i2c, +}; + +static int __init fn_6254_dn_f_sfp_init(void) +{ + int i; + + /*SFP 1-12*/ + for(i=0; i"); +MODULE_DESCRIPTION("fn_6254_dn_f_cpld_mux driver"); +MODULE_LICENSE("GPL"); + +module_init(fn_6254_dn_f_sfp_init); +module_exit(fn_6254_dn_f_sfp_exit); + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_hwmon_mcu.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_hwmon_mcu.c new file mode 120000 index 000000000000..1357104478a3 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_hwmon_mcu.c @@ -0,0 +1 @@ +../../common/modules/pegatron_hwmon_mcu.c \ No newline at end of file diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors new file mode 100755 index 000000000000..6bc6097bc17a --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors @@ -0,0 +1,7 @@ +#!/bin/bash +docker exec -i pmon sensors "$@" + +#To probe sensors not part of lm-sensors +if [ -r /usr/local/bin/fn_6254_dn_f_sensors.py ]; then + python /usr/local/bin/fn_6254_dn_f_sensors.py get_sensors +fi diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service new file mode 100644 index 000000000000..2cdd391d1556 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service @@ -0,0 +1,13 @@ +[Unit] +Description=Pegastron fn-6254-dn-f Platform initialization service +After=local-fs.target +DefaultDependencies=no + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/pegatron_fn_6254_dn_f_util.py install +ExecStop=/usr/local/bin/pegatron_fn_6254_dn_f_util.py uninstall +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/fn_6254_dn_f_sensors.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/fn_6254_dn_f_sensors.py new file mode 100755 index 000000000000..40e23ef01b7e --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/fn_6254_dn_f_sensors.py @@ -0,0 +1,141 @@ +#!/usr/bin/python + +import os +import sys +import logging + +FAN_NUM = 5 +sensors_path = '/sys/bus/i2c/devices/5-0070/' +sensors_nodes = {'fan_rpm': ['_inner_rpm', '_outer_rpm'], + 'fan_vol': ['ADC8_vol', 'ADC7_vol','ADC6_vol', 'ADC5_vol','ADC4_vol', 'ADC3_vol'], + 'temp':['lm75_49_temp', 'lm75_48_temp', 'SA56004_local_temp','SA56004_remote_temp']} +sensors_type = {'fan_rpm': ['Inner RPM', 'Outer RPM'], + 'fan_vol': ['P0.2', 'P0.6','P0.1', 'P1.5','P0.7', 'P1.6'], + 'temp':['lm75_49_temp', 'lm75_48_temp', 'SA56004_local_temp','SA56004_remote_temp']} + +# Get sysfs attribute +def get_attr_value(attr_path): + retval = 'ERR' + if (not os.path.isfile(attr_path)): + return retval + + try: + with open(attr_path, 'r') as fd: + retval = fd.read() + except Exception as error: + logging.error("Unable to open ", attr_path, " file !") + + retval = retval.rstrip('\r\n') + fd.close() + return retval + +def get_fan_status(number): + attr_value = get_attr_value(sensors_path + "fan" + str(number+1) + "_present") + if (attr_value != 'ERR'): + attr_value = int(attr_value, 16) + + if(attr_value == 0): + string = "Connect" + else: + string = "Disconnect" + return string + +def get_fan_alert(number): + attr_value = get_attr_value(sensors_path + "fan" + str(number+1) + "_status_alert") + if (attr_value != 'ERR'): + attr_value = int(attr_value, 16) + + if(attr_value == 0): + string = "Normal" + else: + string = "Abnormal" + return string + +def get_fan_inner_rpm(number): + return get_attr_value(sensors_path + "fan" + str(number+1) + "_inner_rpm") + +def get_fan_outer_rpm(number): + return get_attr_value(sensors_path + "fan" + str(number+1) + "_outer_rpm") + +def get_fan(): + for i in range(0,FAN_NUM): + print " " + #status + string = get_fan_status(i) + print "FAN " + str(i+1) + ":" + ' ' + string + if string=='Disconnect': + continue + + #alert + string = get_fan_alert(i) + print " Status:"+ ' ' + string + + #inner rpm + string = get_fan_inner_rpm(i) + print " Inner RPM:"+ string.rjust(10) + ' RPM' + + #outer rpm + string = get_fan_outer_rpm(i) + print " Outer RPM:"+ string.rjust(10) + ' RPM' + + return + +def get_hwmon(): + print " " + string = get_attr_value(sensors_path + "lm75_48_temp") + print "Sensor A: " + string + " C" + + string = get_attr_value(sensors_path + "lm75_49_temp") + print "Sensor B: " + string + " C" + + return + +def get_voltage(): + print " " + nodes = sensors_nodes['fan_vol'] + types = sensors_type['fan_vol'] + for i in range(0,len(nodes)): + string = get_attr_value(sensors_path + nodes[i]) + print types[i] + ': ' + string + " V" + + return + +def init_fan(): + return + +def main(): + """ + Usage: %(scriptName)s command object + + command: + install : install drivers and generate related sysfs nodes + clean : uninstall drivers and remove related sysfs nodes + show : show all systen status + set : change board setting with fan|led|sfp + """ + + if len(sys.argv)<2: + print main.__doc__ + + for arg in sys.argv[1:]: + if arg == 'fan_init': + init_fan() + elif arg == 'get_sensors': + ver = get_attr_value(sensors_path + "fb_hw_version") + print 'HW Version: ' + ver + ver = get_attr_value(sensors_path + "fb_fw_version") + print 'SW Version: ' + ver + get_fan() + get_hwmon() + get_voltage() + elif arg == 'fan_set': + if len(sys.argv[1:])<1: + print main.__doc__ + else: + set_fan(sys.argv[1:]) + return + else: + print main.__doc__ + +if __name__ == "__main__": + main() diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py new file mode 100755 index 000000000000..55e6114b11c8 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python +# +# Copyright (C) 2018 Pegatron, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import sys, getopt +import logging +import os +import commands +import threading + +DEBUG = False + +SFP_MAX_NUM = 48 +TOTAL_PORT_NUM = 54 +CPLDA_SFP_NUM = 24 +CPLDB_SFP_NUM = 12 +CPLDC_SFP_NUM = 18 + +kernel_module = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_fn_6254_dn_f_cpld', 'pegatron_hwmon_mcu', 'pegatron_fn_6254_dn_f_sfp'] +moduleID = ['pca9544', 'pca9544', '24c02', 'pega_hwmon_mcu', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_sfpA', 'fn_6254_dn_f_sfpB', 'fn_6254_dn_f_sfpC'] +i2c_check_node = ['i2c-0', 'i2c-1'] +uninstall_check_node = ['-0072', '-0073'] +device_address = ['0x72', '0x73', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] +device_node= ['i2c-2', 'i2c-6', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] + +i2c_prefix = '/sys/bus/i2c/devices/' +cpld_bus = ['6-0074', '7-0075', '8-0076'] +led_nodes = ['sys_led', 'pwr_led', 'loc_led', 'fan_led', "cpld_allled_ctrl", "serial_led_enable"] + +def dbg_print(string): + if DEBUG == True: + print string + return + +def do_cmd(cmd, show): + logging.info('Run :' + cmd) + status, output = commands.getstatusoutput(cmd) + dbg_print(cmd + "with result:" + str(status)) + dbg_print("output:" + output) + if status: + logging.info('Failed :' + cmd) + if show: + print('Failed :' + cmd) + return status, output + +def install_driver(): + status, output = do_cmd("depmod -a", 1) + + for i in range(0, len(kernel_module)): + status, output = do_cmd("modprobe " + kernel_module[i], 1) + if status: + return status + + return + +def check_device_position(num): + for i in range(0, len(i2c_check_node)): + status, output = do_cmd("echo " + moduleID[num] + " " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/new_device", 0) + status, output = do_cmd("ls " + i2c_prefix + device_node[num], 0) + device_node[num] = i2c_check_node[i] + + if status: + status, output = do_cmd("echo " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/delete_device", 0) + else: + return + + return + +def install_device(): + for i in range(0, len(moduleID)): + if moduleID[i] == "pca9544": + check_device_position(i) + else: + status, output = do_cmd("echo " + moduleID[i] + " " + device_address[i] + " > " + i2c_prefix + device_node[i] + "/new_device", 1) + + return + +def check_driver(): + for i in range(0, len(kernel_module)): + status, output = do_cmd("lsmod | grep " + kernel_module[i], 0) + if status: + status, output = do_cmd("modprobe " + kernel_module[i], 1) + + return + +def do_install(): + status, output = do_cmd("depmod -a", 1) + + check_driver() + install_device() + + return + +def do_uninstall(): + for i in range(0, len(kernel_module)): + status, output = do_cmd("modprobe -rq " + kernel_module[i], 0) + + for i in range(0, len(moduleID)): + if moduleID[i] == "pca9544": + for node in range(0, len(i2c_check_node)): + status, output = do_cmd("ls " + i2c_prefix + str(node) + uninstall_check_node[i], 0) + if not status: + status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + i2c_check_node[node] + "/delete_device", 0) + + else: + status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + device_node[i] + "/delete_device", 0) + + return + +led_command = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, + 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, + 'serial_led_enable': {'disable':'0', 'enable':'1'}} + +def set_led(args): + """ + Usage: %(scriptName)s set led object command + + object: + sys_led : set SYS led [command: off|green|amber|blink_green|blink_amber] + pwr_led : set PWR led [command: off|green|amber|blink_green|blink_amber] + loc_led : set LOCATOR led [command: off|on|blink] + fan_led : set FAN led [command: off|green|amber|blink_green|blink_amber] + """ + if args[0] not in led_command: + print set_led.__doc__ + sys.exit(0) + + for i in range(0,len(led_nodes)): + if args[0] == led_nodes[i]: + node = i2c_prefix + cpld_bus[1] + '/'+ led_nodes[i] + + command = led_command[args[0]] + data = command[args[1]] + + status, output = do_cmd("echo "+ str(data) + " > "+ node, 1) + + return + +def set_device(args): + """ + Usage: %(scriptName)s command object + + command: + led : set status led sys_led|pwr_led|loc_led|mst_led|fan_led|digit_led + """ + + if args[0] == 'led': + set_led(args[1:]) + return + else: + print set_device.__doc__ + + return + +device_init = {'led': [['led', 'sys_led', 'green'], ['led', 'pwr_led', 'green'], ['led', 'fan_led', 'green'], ['led', 'cpld_allled_ctrl', 'normal'], ['led', 'serial_led_enable', 'enable']]} + +def pega_init(): + #set led + for i in range(0,len(device_init['led'])): + set_device(device_init['led'][i]) + + #set tx_disable + for x in range(0, SFP_MAX_NUM): + if x < CPLDB_SFP_NUM: + bus = cpld_bus[1] + elif x < CPLDB_SFP_NUM + CPLDA_SFP_NUM: + bus = cpld_bus[0] + else: + bus = cpld_bus[2] + + nodes = i2c_prefix + bus + '/sfp' + str(x+1) + '_tx_disable' + dbg_print("SFP_TX_DISABLE NODES: " + nodes) + status, output = do_cmd("echo 0 > "+ nodes, 1) + + for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): + nodes = i2c_prefix + cpld_bus[2] + '/sfp' + str(x+1) + '_reset' + dbg_print("SFP_RESET NODES: " + nodes) + status, output = do_cmd("echo 3 > "+ nodes, 1) + + return + +def main(): + """ + Usage: %(scriptName)s command object + + command: + install : install drivers and generate related sysfs nodes + uninstall : uninstall drivers and remove related sysfs nodes + set : change board setting [led] + debug : debug info [on/off] + """ + + if len(sys.argv)<2: + print main.__doc__ + + for arg in sys.argv[1:]: + if arg == 'install': + do_install() + pega_init() + elif arg == 'uninstall': + do_uninstall() + elif arg == 'set': + if len(sys.argv[2:])<1: + print main.__doc__ + else: + set_device(sys.argv[2:]) + return + elif arg == 'debug': + if sys.argv[2] == 'on': + DEBUG = True + else: + DEBUG = False + else: + print main.__doc__ + +if __name__ == "__main__": + main() From 43d10943cfc7b6fb8c8275ff5a34ae6181074509 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Thu, 11 Apr 2019 16:45:45 +0800 Subject: [PATCH 10/20] remove ixgbe patch --- .../0001-update-Intel-ixgbe-x550-driver.patch | 4648 ----------------- 1 file changed, 4648 deletions(-) delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-x550-driver.patch diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-x550-driver.patch b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-x550-driver.patch deleted file mode 100644 index cc9417d0d21d..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-x550-driver.patch +++ /dev/null @@ -1,4648 +0,0 @@ -From 80be203669d5cb1c5755e6195ab3d319547b4f55 Mon Sep 17 00:00:00 2001 -From: PeterLin -Date: Fri, 29 Mar 2019 09:22:35 +0800 -Subject: [PATCH] update Intel ixgbe x550 driver - ---- - drivers/net/ethernet/intel/ixgbe/ixgbe.h | 10 + - drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 28 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 15 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 439 ++++-- - drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 7 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 103 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 75 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 407 +++--- - drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 27 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 153 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 20 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 1668 +++++++++++++++++----- - 12 files changed, 2272 insertions(+), 680 deletions(-) - -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h -index b06e32d..255ec3b 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h -@@ -89,6 +89,7 @@ - - /* Supported Rx Buffer Sizes */ - #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ -+#define IXGBE_RXBUFFER_1536 1536 - #define IXGBE_RXBUFFER_2K 2048 - #define IXGBE_RXBUFFER_3K 3072 - #define IXGBE_RXBUFFER_4K 4096 -@@ -661,6 +662,9 @@ struct ixgbe_adapter { - #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) - #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) - #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) -+#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) -+#define IXGBE_FLAG2_EEE_ENABLED BIT(15) -+#define IXGBE_FLAG2_RX_LEGACY BIT(16) - - /* Tx fast path data */ - int num_tx_queues; -@@ -861,7 +865,9 @@ enum ixgbe_boards { - board_X540, - board_X550, - board_X550EM_x, -+ board_x550em_x_fw, - board_x550em_a, -+ board_x550em_a_fw, - }; - - extern const struct ixgbe_info ixgbe_82598_info; -@@ -869,7 +875,9 @@ extern const struct ixgbe_info ixgbe_82599_info; - extern const struct ixgbe_info ixgbe_X540_info; - extern const struct ixgbe_info ixgbe_X550_info; - extern const struct ixgbe_info ixgbe_X550EM_x_info; -+extern const struct ixgbe_info ixgbe_x550em_x_fw_info; - extern const struct ixgbe_info ixgbe_x550em_a_info; -+extern const struct ixgbe_info ixgbe_x550em_a_fw_info; - #ifdef CONFIG_IXGBE_DCB - extern const struct dcbnl_rtnl_ops dcbnl_ops; - #endif -@@ -1027,4 +1035,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, - struct ixgbe_ring *tx_ring); - u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); - void ixgbe_store_reta(struct ixgbe_adapter *adapter); -+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, -+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); - #endif /* _IXGBE_H_ */ -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c -index fb51be7..8a32eb7 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c -@@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) - case ixgbe_phy_tn: - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.check_link = &ixgbe_check_phy_link_tnx; -- phy->ops.get_firmware_version = -- &ixgbe_get_phy_firmware_version_tnx; - break; - case ixgbe_phy_nl: - phy->ops.reset = &ixgbe_reset_phy_nl; -@@ -177,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) - **/ - static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) - { --#ifndef CONFIG_SPARC -- u32 regval; -- u32 i; --#endif - s32 ret_val; - - ret_val = ixgbe_start_hw_generic(hw); -- --#ifndef CONFIG_SPARC -- /* Disable relaxed ordering */ -- for (i = 0; ((i < hw->mac.max_tx_queues) && -- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { -- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); -- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; -- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); -- } -- -- for (i = 0; ((i < hw->mac.max_rx_queues) && -- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { -- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); -- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | -- IXGBE_DCA_RXCTRL_HEAD_WRO_EN); -- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); -- } --#endif - if (ret_val) - return ret_val; - -@@ -367,7 +343,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) - } - - /* Negotiate the fc mode to use */ -- ixgbe_fc_autoneg(hw); -+ hw->mac.ops.fc_autoneg(hw); - - /* Disable any previous flow control settings */ - fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); -@@ -1179,6 +1155,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { - .get_link_capabilities = &ixgbe_get_link_capabilities_82598, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, -@@ -1193,6 +1170,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { - .set_vfta = &ixgbe_set_vfta_82598, - .fc_enable = &ixgbe_fc_enable_82598, - .setup_fc = ixgbe_setup_fc_generic, -+ .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = NULL, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, - .release_swfw_sync = &ixgbe_release_swfw_sync, -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c -index 63b2500..d602637 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c -@@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) - case ixgbe_phy_tn: - phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; -- phy->ops.get_firmware_version = -- &ixgbe_get_phy_firmware_version_tnx; - break; - default: - break; -@@ -1451,7 +1449,7 @@ do { \ - * @atr_input: input bitstream to compute the hash on - * @input_mask: mask for the input bitstream - * -- * This function serves two main purposes. First it applys the input_mask -+ * This function serves two main purposes. First it applies the input_mask - * to the atr_input resulting in a cleaned up atr_input data stream. - * Secondly it computes the hash and stores it in the bkt_hash field at - * the end of the input byte stream. This way it will be available for -@@ -1591,15 +1589,17 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - - switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { - case 0x0000: -- /* mask VLAN ID, fall through to mask VLAN priority */ -+ /* mask VLAN ID */ - fdirm |= IXGBE_FDIRM_VLANID; -+ /* fall through */ - case 0x0FFF: - /* mask VLAN priority */ - fdirm |= IXGBE_FDIRM_VLANP; - break; - case 0xE000: -- /* mask VLAN ID only, fall through */ -+ /* mask VLAN ID only */ - fdirm |= IXGBE_FDIRM_VLANID; -+ /* fall through */ - case 0xEFFF: - /* no VLAN fields masked */ - break; -@@ -1610,8 +1610,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - - switch (input_mask->formatted.flex_bytes & 0xFFFF) { - case 0x0000: -- /* Mask Flex Bytes, fall through */ -+ /* Mask Flex Bytes */ - fdirm |= IXGBE_FDIRM_FLEX; -+ /* fall through */ - case 0xFFFF: - break; - default: -@@ -2204,6 +2205,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { - .get_link_capabilities = &ixgbe_get_link_capabilities_82599, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, -@@ -2219,6 +2221,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .setup_fc = ixgbe_setup_fc_generic, -+ .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = &ixgbe_setup_sfp_modules_82599, -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c -index ad33622..fd055cc 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c -@@ -79,16 +79,28 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) - - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: -- hw->mac.ops.check_link(hw, &speed, &link_up, false); -- /* if link is down, assume supported */ -- if (link_up) -- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? -+ /* flow control autoneg black list */ -+ switch (hw->device_id) { -+ case IXGBE_DEV_ID_X550EM_A_SFP: -+ case IXGBE_DEV_ID_X550EM_A_SFP_N: -+ supported = false; -+ break; -+ default: -+ hw->mac.ops.check_link(hw, &speed, &link_up, false); -+ /* if link is down, assume supported */ -+ if (link_up) -+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? - true : false; -- else -- supported = true; -+ else -+ supported = true; -+ } -+ - break; - case ixgbe_media_type_backplane: -- supported = true; -+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) -+ supported = false; -+ else -+ supported = true; - break; - case ixgbe_media_type_copper: - /* only some copper devices support flow control autoneg */ -@@ -100,6 +112,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) - case IXGBE_DEV_ID_X550T1: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: - supported = true; - break; - default: -@@ -109,6 +123,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) - break; - } - -+ if (!supported) -+ hw_dbg(hw, "Device %x does not support flow control autoneg\n", -+ hw->device_id); -+ - return supported; - } - -@@ -153,7 +171,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) - if (ret_val) - return ret_val; - -- /* only backplane uses autoc so fall though */ -+ /* fall through - only backplane uses autoc */ - case ixgbe_media_type_fiber: - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - -@@ -279,6 +297,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) - s32 ret_val; - u32 ctrl_ext; - u16 device_caps; -+#if 1 //by hilbert -+ s32 rc; -+ u16 regVal=0; -+#endif - - /* Set the media type */ - hw->phy.media_type = hw->mac.ops.get_media_type(hw); -@@ -298,10 +320,12 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) - IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); - IXGBE_WRITE_FLUSH(hw); - -- /* Setup flow control */ -- ret_val = hw->mac.ops.setup_fc(hw); -- if (ret_val) -- return ret_val; -+ /* Setup flow control if method for doing so */ -+ if (hw->mac.ops.setup_fc) { -+ ret_val = hw->mac.ops.setup_fc(hw); -+ if (ret_val) -+ return ret_val; -+ } - - /* Cashe bit indicating need for crosstalk fix */ - switch (hw->mac.type) { -@@ -322,6 +346,67 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) - /* Clear adapter stopped flag */ - hw->adapter_stopped = false; - -+#if 1 /* To modify speed LED polarity and configure led on only for speed 1G in M88E1512 -+ * for Porsche2 platform. By hilbert -+ * From 88E1512 datasheet: -+ * Page register: 0x16 -+ * LED functon control register: 0x10 in page 3 -+ * LED polarity control register: 0x11 in page 3 -+ */ -+ -+ if (hw->mac.type == ixgbe_mac_x550em_a && -+ (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { -+ /* For M88E1512, to select page 3 in register 0x16 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+#if 0 //for debug -+ /* For M88E1512, read from register 0x16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x16, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "phy register read failed, rc:%x\n", rc); -+ } -+ hw_err(hw, "####read phy register 0x16 again, value:%x\n", regVal); -+#endif -+ /* For M88E1512, read from page 3, register 0x11 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x11, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led polarity register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 0x11 with polarity bit set */ -+ regVal |= 0x01; -+ rc = hw->phy.ops.write_reg(hw, 0x11, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led polarity register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, read from page 3, register 16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 16 with only 1000M led on */ -+ regVal = (regVal & 0xFFF0) | 0x0007; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write page 22 back to default 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ } -+#endif - return 0; - } - -@@ -346,25 +431,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) - } - IXGBE_WRITE_FLUSH(hw); - --#ifndef CONFIG_SPARC -- /* Disable relaxed ordering */ -- for (i = 0; i < hw->mac.max_tx_queues; i++) { -- u32 regval; -- -- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); -- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; -- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); -- } -- -- for (i = 0; i < hw->mac.max_rx_queues; i++) { -- u32 regval; -- -- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); -- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | -- IXGBE_DCA_RXCTRL_HEAD_WRO_EN); -- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); -- } --#endif - return 0; - } - -@@ -390,6 +456,10 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) - status = hw->mac.ops.start_hw(hw); - } - -+ /* Initialize the LED link active for LED blink support */ -+ if (hw->mac.ops.init_led_link_act) -+ hw->mac.ops.init_led_link_act(hw); -+ - return status; - } - -@@ -773,22 +843,100 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) - } - - /** -+ * ixgbe_init_led_link_act_generic - Store the LED index link/activity. -+ * @hw: pointer to hardware structure -+ * -+ * Store the index for the link active LED. This will be used to support -+ * blinking the LED. -+ **/ -+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ u32 led_reg, led_mode; -+ u16 i; -+ -+ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); -+ -+ /* Get LED link active from the LEDCTL register */ -+ for (i = 0; i < 4; i++) { -+ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); -+ -+ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == -+ IXGBE_LED_LINK_ACTIVE) { -+ mac->led_link_act = i; -+ return 0; -+ } -+ } -+ -+ /* If LEDCTL register does not have the LED link active set, then use -+ * known MAC defaults. -+ */ -+ switch (hw->mac.type) { -+ case ixgbe_mac_x550em_a: -+ mac->led_link_act = 0; -+ break; -+ case ixgbe_mac_X550EM_x: -+ mac->led_link_act = 1; -+ break; -+ default: -+ mac->led_link_act = 2; -+ } -+ -+ return 0; -+} -+ -+/** - * ixgbe_led_on_generic - Turns on the software controllable LEDs. - * @hw: pointer to hardware structure - * @index: led number to turn on - **/ - s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) - { -- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); -- -- if (index > 3) -- return IXGBE_ERR_PARAM; -- -- /* To turn on the LED, set mode to ON. */ -- led_reg &= ~IXGBE_LED_MODE_MASK(index); -- led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); -- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); -- IXGBE_WRITE_FLUSH(hw); -+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); -+ s32 rc; -+ u16 regVal; -+ -+ /* following led behavior was modified by hilbert, -+ * to force led on through C22 MDI command. -+ */ -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* For M88E1512, to select page 3 in register 22 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, read from page 3, register 16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 16 with force led on */ -+ regVal = (regVal & 0xFF00) | 0x0099; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write page 22 back to default 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ } else { -+ if (index > 3) -+ return IXGBE_ERR_PARAM; -+ -+ /* To turn on the LED, set mode to ON. */ -+ led_reg &= ~IXGBE_LED_MODE_MASK(index); -+ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); -+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); -+ IXGBE_WRITE_FLUSH(hw); -+ } - - return 0; - } -@@ -801,15 +949,50 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) - s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) - { - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); -- -- if (index > 3) -- return IXGBE_ERR_PARAM; -- -- /* To turn off the LED, set mode to OFF. */ -- led_reg &= ~IXGBE_LED_MODE_MASK(index); -- led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); -- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); -- IXGBE_WRITE_FLUSH(hw); -+ s32 rc; -+ u16 regVal; -+ -+ /* following led behavior was modified by hilbert, -+ * to force led on through C22 MDI command. -+ */ -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* For M88E1512, to select page 3 in register 22 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, read from page 3, register 16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 16 with force led on */ -+ regVal = (regVal & 0xFF00) | 0x0088; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write page 22 back to default 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ } else { -+ if (index > 3) -+ return IXGBE_ERR_PARAM; -+ -+ /* To turn off the LED, set mode to OFF. */ -+ led_reg &= ~IXGBE_LED_MODE_MASK(index); -+ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); -+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); -+ IXGBE_WRITE_FLUSH(hw); -+ } - - return 0; - } -@@ -2127,7 +2310,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) - } - - /* Negotiate the fc mode to use */ -- ixgbe_fc_autoneg(hw); -+ hw->mac.ops.fc_autoneg(hw); - - /* Disable any previous flow control settings */ - mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); -@@ -2231,8 +2414,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) - * Find the intersection between advertised settings and link partner's - * advertised settings - **/ --static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, -- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) -+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, -+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) - { - if ((!(adv_reg)) || (!(lp_reg))) - return IXGBE_ERR_FC_NOT_NEGOTIATED; -@@ -3334,6 +3517,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - else - *speed = IXGBE_LINK_SPEED_100_FULL; - break; -+ case IXGBE_LINKS_SPEED_10_X550EM_A: -+ *speed = IXGBE_LINK_SPEED_UNKNOWN; -+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || -+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { -+ *speed = IXGBE_LINK_SPEED_10_FULL; -+ } -+ break; - default: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - } -@@ -3491,7 +3681,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, - rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; - for (; i < (num_pb / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); -- /* Fall through to configure remaining packet buffers */ -+ /* fall through - configure remaining packet buffers */ - case (PBA_STRATEGY_EQUAL): - /* Divide the remaining Rx packet buffer evenly among the TCs */ - rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; -@@ -3530,7 +3720,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, - * Calculates the checksum for some buffer on a specified length. The - * checksum calculated is returned. - **/ --static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) -+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) - { - u32 i; - u8 sum = 0; -@@ -3545,43 +3735,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) - } - - /** -- * ixgbe_host_interface_command - Issue command to manageability block -+ * ixgbe_hic_unlocked - Issue command to manageability block unlocked - * @hw: pointer to the HW structure -- * @buffer: contains the command to write and where the return status will -- * be placed -+ * @buffer: command to write and where the return status will be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion -- * @return_data: read and return data from the buffer (true) or not (false) -- * Needed because FW structures are big endian and decoding of -- * these fields can be 8 bit or 16 bit based on command. Decoding -- * is not easily understood without making a table of commands. -- * So we will leave this up to the caller to read back the data -- * in these cases. - * -- * Communicates with the manageability block. On success return 0 -- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. -+ * Communicates with the manageability block. On success return 0 -+ * else returns semaphore error when encountering an error acquiring -+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. -+ * -+ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held -+ * by the caller. - **/ --s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, -- u32 length, u32 timeout, -- bool return_data) -+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, -+ u32 timeout) - { -- u32 hdr_size = sizeof(struct ixgbe_hic_hdr); -- u32 hicr, i, bi, fwsts; -- u16 buf_len, dword_len; -- union { -- struct ixgbe_hic_hdr hdr; -- u32 u32arr[1]; -- } *bp = buffer; -- s32 status; -+ u32 hicr, i, fwsts; -+ u16 dword_len; - - if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } -- /* Take management host interface semaphore */ -- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); -- if (status) -- return status; - - /* Set bit 9 of FWSTS clearing FW reset indication */ - fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); -@@ -3591,15 +3767,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if (!(hicr & IXGBE_HICR_EN)) { - hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); -- status = IXGBE_ERR_HOST_INTERFACE_COMMAND; -- goto rel_out; -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } - - /* Calculate length in DWORDs. We must be DWORD aligned */ - if (length % sizeof(u32)) { - hw_dbg(hw, "Buffer length failure, not aligned to dword"); -- status = IXGBE_ERR_INVALID_ARGUMENT; -- goto rel_out; -+ return IXGBE_ERR_INVALID_ARGUMENT; - } - - dword_len = length >> 2; -@@ -3609,7 +3783,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - */ - for (i = 0; i < dword_len; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, -- i, cpu_to_le32(bp->u32arr[i])); -+ i, cpu_to_le32(buffer[i])); - - /* Setting this bit tells the ARC that a new command is pending. */ - IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); -@@ -3623,11 +3797,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - - /* Check command successful completion. */ - if ((timeout && i == timeout) || -- !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { -- hw_dbg(hw, "Command has failed with no status valid.\n"); -- status = IXGBE_ERR_HOST_INTERFACE_COMMAND; -- goto rel_out; -+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; -+ -+ return 0; -+} -+ -+/** -+ * ixgbe_host_interface_command - Issue command to manageability block -+ * @hw: pointer to the HW structure -+ * @buffer: contains the command to write and where the return status will -+ * be placed -+ * @length: length of buffer, must be multiple of 4 bytes -+ * @timeout: time in ms to wait for command completion -+ * @return_data: read and return data from the buffer (true) or not (false) -+ * Needed because FW structures are big endian and decoding of -+ * these fields can be 8 bit or 16 bit based on command. Decoding -+ * is not easily understood without making a table of commands. -+ * So we will leave this up to the caller to read back the data -+ * in these cases. -+ * -+ * Communicates with the manageability block. On success return 0 -+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. -+ **/ -+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, -+ u32 length, u32 timeout, -+ bool return_data) -+{ -+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr); -+ union { -+ struct ixgbe_hic_hdr hdr; -+ u32 u32arr[1]; -+ } *bp = buffer; -+ u16 buf_len, dword_len; -+ s32 status; -+ u32 bi; -+ -+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { -+ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } -+ /* Take management host interface semaphore */ -+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); -+ if (status) -+ return status; -+ -+ status = ixgbe_hic_unlocked(hw, buffer, length, timeout); -+ if (status) -+ goto rel_out; - - if (!return_data) - goto rel_out; -@@ -3674,6 +3891,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - * @min: driver version minor number - * @build: driver version build number - * @sub: driver version sub build number -+ * @len: length of driver_ver string -+ * @driver_ver: driver string - * - * Sends driver version number to firmware through the manageability - * block. On success return 0 -@@ -3681,7 +3900,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - **/ - s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, -- u8 build, u8 sub) -+ u8 build, u8 sub, __always_unused u16 len, -+ __always_unused const char *driver_ver) - { - struct ixgbe_hic_drv_info fw_cmd; - int i; -@@ -4033,15 +4253,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - speedcnt++; - highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; - -- /* If we already have link at this speed, just jump out */ -- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, -- false); -- if (status) -- return status; -- -- if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up) -- goto out; -- - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: -@@ -4093,15 +4304,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) - highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; - -- /* If we already have link at this speed, just jump out */ -- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, -- false); -- if (status) -- return status; -- -- if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up) -- goto out; -- - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: -@@ -4208,4 +4410,23 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, - hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); - return; - } -+ -+ /* Set RS1 */ -+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, -+ IXGBE_I2C_EEPROM_DEV_ADDR2, -+ &eeprom_data); -+ if (status) { -+ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); -+ return; -+ } -+ -+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; -+ -+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, -+ IXGBE_I2C_EEPROM_DEV_ADDR2, -+ eeprom_data); -+ if (status) { -+ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); -+ return; -+ } - } -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h -index 6d4c260..e083732 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h -@@ -49,6 +49,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); - - s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); - s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); -+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); - - s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); - s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); -@@ -110,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); - void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); - s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); - s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, -- u8 build, u8 ver); -+ u8 build, u8 ver, u16 len, const char *str); -+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); - s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, - u32 timeout, bool return_data); -+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); -+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, -+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]); - void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); - bool ixgbe_mng_present(struct ixgbe_hw *hw); - bool ixgbe_mng_enabled(struct ixgbe_hw *hw); -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c -index a137e06..6b23b74 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c -@@ -172,6 +172,7 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) - case IXGBE_DEV_ID_82598_BX: - case IXGBE_DEV_ID_82599_KR: - case IXGBE_DEV_ID_X550EM_X_KR: -+ case IXGBE_DEV_ID_X550EM_X_XFI: - return SUPPORTED_10000baseKR_Full; - default: - return SUPPORTED_10000baseKX4_Full | -@@ -237,6 +238,7 @@ static int ixgbe_get_settings(struct net_device *netdev, - case ixgbe_phy_tn: - case ixgbe_phy_aq: - case ixgbe_phy_x550em_ext_t: -+ case ixgbe_phy_fw: - case ixgbe_phy_cu_unknown: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; -@@ -394,6 +396,9 @@ static int ixgbe_set_settings(struct net_device *netdev, - if (ecmd->advertising & ADVERTISED_100baseT_Full) - advertised |= IXGBE_LINK_SPEED_100_FULL; - -+ if (ecmd->advertising & ADVERTISED_10baseT_Full) -+ advertised |= IXGBE_LINK_SPEED_10_FULL; -+ - if (old == advertised) - return err; - /* this sets the link speed and restarts auto-neg */ -@@ -491,6 +496,59 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) - { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -+ -+ /* 2018/11/14 pega-julia modified start */ -+ /* Purpose : Add for light OOB LED static. */ -+ -+ struct ixgbe_hw *hw = &adapter->hw; -+ u16 regVal; -+ s32 rc; -+ -+ /* For M88E1512, write 3 in (page 0,register 22)[Page Address Register] to goto page 3 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ -+ /* For M88E1512, read from (page 3, register 16)[LED Function Control Register] */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ /*hw_err(hw, "[Pega Debug] : current register value = 0x%x\n", regVal);*/ -+ if (rc) -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ -+ if (data == 0) /* Turn off OOB LED. */ -+ { -+ /* For M88E1512, write to (page 3, register 16) with force led off */ -+ regVal = (regVal & 0xFF00) | 0x0088; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ else if (data == 1) /* Turn on OOB LED. */ -+ { -+ /* For M88E1512, write to (page 3, register 16) with force led on */ -+ regVal = (regVal & 0xFF00) | 0x0099; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ else /* Switch OOB LED back to normal. */ -+ { -+ /* For M88E1512, set led back to nornmal in (page 3, register 16). */ -+ regVal = (regVal & 0xFF00) | 0x0017; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write 0 in (page 0, register 22) to back to page 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ -+ /* 2018/11/14 pega-julia modified end */ - } - - static int ixgbe_get_regs_len(struct net_device *netdev) -@@ -2219,22 +2277,61 @@ static int ixgbe_set_phys_id(struct net_device *netdev, - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - -+ /* Modified by hilbert for C22 MDI directly access */ -+ s32 rc; -+ u16 regVal; -+ /* Modified by hilbert done */ -+ -+ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) -+ return -EOPNOTSUPP; -+ - switch (state) { - case ETHTOOL_ID_ACTIVE: - adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - return 2; - - case ETHTOOL_ID_ON: -- hw->mac.ops.led_on(hw, hw->bus.func); -+ hw->mac.ops.led_on(hw, hw->mac.led_link_act); - break; - - case ETHTOOL_ID_OFF: -- hw->mac.ops.led_off(hw, hw->bus.func); -+ hw->mac.ops.led_off(hw, hw->mac.led_link_act); - break; - - case ETHTOOL_ID_INACTIVE: - /* Restore LED settings */ -- IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); -+ /* Modified by hilbert for C22 MDI directly access */ -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* For M88E1512, to select page 3 in register 22 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, read from page 3, register 16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 16 with force led on */ -+ regVal = (regVal & 0xFF00) | 0x0017; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write page 22 back to default 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ } else { -+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); -+ } - break; - } - -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c -index a5428b6..d6d3a78 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c -@@ -84,7 +84,9 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_X540] = &ixgbe_X540_info, - [board_X550] = &ixgbe_X550_info, - [board_X550EM_x] = &ixgbe_X550EM_x_info, -+ [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, - [board_x550em_a] = &ixgbe_x550em_a_info, -+ [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, - }; - - /* ixgbe_pci_tbl - PCI Device ID Table -@@ -129,9 +131,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, -+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, -+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, -@@ -139,6 +143,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, -+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, -+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, - /* required last entry */ - {0, } - }; -@@ -179,6 +185,7 @@ MODULE_VERSION(DRV_VERSION); - static struct workqueue_struct *ixgbe_wq; - - static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); -+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); - - static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, - u32 reg, u16 *value) -@@ -374,7 +381,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) - if (ixgbe_removed(reg_addr)) - return IXGBE_FAILED_READ_REG; - if (unlikely(hw->phy.nw_mng_if_sel & -- IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { -+ IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { - struct ixgbe_adapter *adapter; - int i; - -@@ -2446,6 +2453,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) - { - struct ixgbe_hw *hw = &adapter->hw; - u32 eicr = adapter->interrupt_event; -+ s32 rc; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; -@@ -2484,6 +2492,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) - return; - - break; -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: -+ rc = hw->phy.ops.check_overtemp(hw); -+ if (rc != IXGBE_ERR_OVERTEMP) -+ return; -+ break; - default: - if (adapter->hw.mac.type >= ixgbe_mac_X540) - return; -@@ -2530,6 +2544,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) - return; - } - return; -+ case ixgbe_mac_x550em_a: -+ if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { -+ adapter->interrupt_event = eicr; -+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; -+ ixgbe_service_event_schedule(adapter); -+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, -+ IXGBE_EICR_GPI_SDP0_X550EM_a); -+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, -+ IXGBE_EICR_GPI_SDP0_X550EM_a); -+ } -+ return; -+ case ixgbe_mac_X550: - case ixgbe_mac_X540: - if (!(eicr & IXGBE_EICR_TS)) - return; -@@ -5035,7 +5061,7 @@ static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) - static void ixgbe_configure(struct ixgbe_adapter *adapter) - { - struct ixgbe_hw *hw = &adapter->hw; -- -+ - ixgbe_configure_pb(adapter); - #ifdef CONFIG_IXGBE_DCB - ixgbe_configure_dcb(adapter); -@@ -5045,10 +5071,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) - * the VLVF registers will not be populated - */ - ixgbe_configure_virtualization(adapter); -- - ixgbe_set_rx_mode(adapter->netdev); - ixgbe_restore_vlan(adapter); -- -+ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: -@@ -5075,7 +5100,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) - default: - break; - } -- - #ifdef CONFIG_IXGBE_DCA - /* configure DCA */ - if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) -@@ -5291,6 +5315,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) - - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); -+ if (adapter->hw.phy.type == ixgbe_phy_fw) -+ ixgbe_watchdog_link_is_down(adapter); - ixgbe_down(adapter); - /* - * If SR-IOV enabled then wait a bit before bringing the adapter -@@ -5706,6 +5732,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) - break; - case ixgbe_mac_x550em_a: - adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; -+ switch (hw->device_id) { -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: -+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; -+ break; -+ default: -+ break; -+ } - /* fall through */ - case ixgbe_mac_X550EM_x: - #ifdef CONFIG_IXGBE_DCB -@@ -5719,6 +5753,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) - #endif /* IXGBE_FCOE */ - /* Fall Through */ - case ixgbe_mac_X550: -+ if (hw->mac.type == ixgbe_mac_X550) -+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - #ifdef CONFIG_IXGBE_DCA - adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; - #endif -@@ -6093,29 +6129,28 @@ int ixgbe_open(struct net_device *netdev) - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int err, queues; -- -+ - /* disallow open during test */ - if (test_bit(__IXGBE_TESTING, &adapter->state)) - return -EBUSY; -- -+ - netif_carrier_off(netdev); -- -+ - /* allocate transmit descriptors */ - err = ixgbe_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; -- -+ - /* allocate receive descriptors */ - err = ixgbe_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; -- -+ - ixgbe_configure(adapter); -- -- err = ixgbe_request_irq(adapter); -+ err = ixgbe_request_irq(adapter); - if (err) - goto err_req_irq; -- -+ - /* Notify the stack of the actual queue counts. */ - if (adapter->num_rx_pools > 1) - queues = adapter->num_rx_queues_per_pool; -@@ -6791,6 +6826,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) - case IXGBE_LINK_SPEED_100_FULL: - speed_str = "100 Mbps"; - break; -+ case IXGBE_LINK_SPEED_10_FULL: -+ speed_str = "10 Mbps"; -+ break; - default: - speed_str = "unknown speed"; - break; -@@ -8013,6 +8051,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) - return ixgbe_ptp_set_ts_config(adapter, req); - case SIOCGHWTSTAMP: - return ixgbe_ptp_get_ts_config(adapter, req); -+ case SIOCGMIIPHY: -+ if (!adapter->hw.phy.ops.read_reg) -+ return -EOPNOTSUPP; -+ /* fall through */ - default: - return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); - } -@@ -9480,6 +9522,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) - hw->mac.ops = *ii->mac_ops; - hw->mac.type = ii->mac; - hw->mvals = ii->mvals; -+ if (ii->link_ops) -+ hw->link.ops = *ii->link_ops; - - /* EEPROM */ - hw->eeprom.ops = *ii->eeprom_ops; -@@ -9777,8 +9821,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) - * since os does not support feature - */ - if (hw->mac.ops.set_fw_drv_ver) -- hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, -- 0xFF); -+ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, -+ sizeof(ixgbe_driver_version) - 1, -+ ixgbe_driver_version); - - /* add san mac addr to netdev */ - ixgbe_add_sanmac_netdev(netdev); -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c -index b17464e..d914b40 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c -@@ -109,8 +109,8 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) - * - * Returns an error code on error. - */ --static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val, bool lock) -+s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, -+ u16 reg, u16 *val, bool lock) - { - u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 3; -@@ -178,36 +178,6 @@ static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, - } - - /** -- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation -- * @hw: pointer to the hardware structure -- * @addr: I2C bus address to read from -- * @reg: I2C device register to read from -- * @val: pointer to location to receive read value -- * -- * Returns an error code on error. -- */ --s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val) --{ -- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); --} -- --/** -- * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined -- * @hw: pointer to the hardware structure -- * @addr: I2C bus address to read from -- * @reg: I2C device register to read from -- * @val: pointer to location to receive read value -- * -- * Returns an error code on error. -- */ --s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val) --{ -- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); --} -- --/** - * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to -@@ -217,8 +187,8 @@ s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, - * - * Returns an error code on error. - */ --static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 val, bool lock) -+s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, -+ u16 reg, u16 val, bool lock) - { - u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 1; -@@ -273,33 +243,41 @@ static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, - } - - /** -- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation -- * @hw: pointer to the hardware structure -- * @addr: I2C bus address to write to -- * @reg: I2C device register to write to -- * @val: value to write -+ * ixgbe_probe_phy - Probe a single address for a PHY -+ * @hw: pointer to hardware structure -+ * @phy_addr: PHY address to probe - * -- * Returns an error code on error. -- */ --s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, -- u8 addr, u16 reg, u16 val) -+ * Returns true if PHY found -+ **/ -+static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) - { -- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); --} -+ u16 ext_ability = 0; - --/** -- * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined -- * @hw: pointer to the hardware structure -- * @addr: I2C bus address to write to -- * @reg: I2C device register to write to -- * @val: value to write -- * -- * Returns an error code on error. -- */ --s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, -- u8 addr, u16 reg, u16 val) --{ -- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); -+ hw->phy.mdio.prtad = phy_addr; -+ if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0) { -+ return false; -+ } -+ -+ if (ixgbe_get_phy_id(hw)) { -+ return false; -+ } -+ -+ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); -+ -+ if (hw->phy.type == ixgbe_phy_unknown) { -+ hw->phy.ops.read_reg(hw, -+ MDIO_PMA_EXTABLE, -+ MDIO_MMD_PMAPMD, -+ &ext_ability); -+ if (ext_ability & -+ (MDIO_PMA_EXTABLE_10GBT | -+ MDIO_PMA_EXTABLE_1000BT)) -+ hw->phy.type = ixgbe_phy_cu_unknown; -+ else -+ hw->phy.type = ixgbe_phy_generic; -+ } -+ -+ return true; - } - - /** -@@ -311,7 +289,7 @@ s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, - s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) - { - u32 phy_addr; -- u16 ext_ability = 0; -+ u32 status = IXGBE_ERR_PHY_ADDR_INVALID; - - if (!hw->phy.phy_semaphore_mask) { - if (hw->bus.lan_id) -@@ -320,37 +298,34 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - } - -- if (hw->phy.type == ixgbe_phy_unknown) { -- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { -- hw->phy.mdio.prtad = phy_addr; -- if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { -- ixgbe_get_phy_id(hw); -- hw->phy.type = -- ixgbe_get_phy_type_from_id(hw->phy.id); -- -- if (hw->phy.type == ixgbe_phy_unknown) { -- hw->phy.ops.read_reg(hw, -- MDIO_PMA_EXTABLE, -- MDIO_MMD_PMAPMD, -- &ext_ability); -- if (ext_ability & -- (MDIO_PMA_EXTABLE_10GBT | -- MDIO_PMA_EXTABLE_1000BT)) -- hw->phy.type = -- ixgbe_phy_cu_unknown; -- else -- hw->phy.type = -- ixgbe_phy_generic; -- } -+ if (hw->phy.type != ixgbe_phy_unknown) -+ return 0; - -- return 0; -- } -+ if (hw->phy.nw_mng_if_sel) { -+ phy_addr = (hw->phy.nw_mng_if_sel & -+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> -+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; -+ if (ixgbe_probe_phy(hw, phy_addr)) -+ return 0; -+ else -+ return IXGBE_ERR_PHY_ADDR_INVALID; -+ } -+ -+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { -+ if (ixgbe_probe_phy(hw, phy_addr)) { -+ status = 0; -+ break; - } -- /* indicate no PHY found */ -- hw->phy.mdio.prtad = MDIO_PRTAD_NONE; -- return IXGBE_ERR_PHY_ADDR_INVALID; - } -- return 0; -+ -+ /* Certain media types do not have a phy so an address will not -+ * be found and the code will take this path. Caller has to -+ * decide if it is an error or not. -+ */ -+ if (status) -+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; -+ -+ return status; - } - - /** -@@ -416,7 +391,8 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) - case TN1010_PHY_ID: - phy_type = ixgbe_phy_tn; - break; -- case X550_PHY_ID: -+ case X550_PHY_ID2: -+ case X550_PHY_ID3: - case X540_PHY_ID: - phy_type = ixgbe_phy_aq; - break; -@@ -427,6 +403,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) - phy_type = ixgbe_phy_nl; - break; - case X557_PHY_ID: -+ case X557_PHY_ID2: - phy_type = ixgbe_phy_x550em_ext_t; - break; - default: -@@ -477,11 +454,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) - */ - for (i = 0; i < 30; i++) { - msleep(100); -- hw->phy.ops.read_reg(hw, MDIO_CTRL1, -- MDIO_MMD_PHYXS, &ctrl); -- if (!(ctrl & MDIO_CTRL1_RESET)) { -- udelay(2); -- break; -+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { -+ status = hw->phy.ops.read_reg(hw, -+ IXGBE_MDIO_TX_VENDOR_ALARMS_3, -+ MDIO_MMD_PMAPMD, &ctrl); -+ if (status) -+ return status; -+ -+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { -+ udelay(2); -+ break; -+ } -+ } else { -+ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, -+ MDIO_MMD_PHYXS, &ctrl); -+ if (status) -+ return status; -+ -+ if (!(ctrl & MDIO_CTRL1_RESET)) { -+ udelay(2); -+ break; -+ } - } - } - -@@ -494,6 +487,98 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) - } - - /** -+ * ixgbe_read_phy_mdio - Reads a value from a specified PHY register without -+ * the SWFW lock. This Clasue 22 API is patched by Hilbert -+ * @hw: pointer to hardware structure -+ * @reg_addr: 32 bit address of PHY register to read -+ * @phy_data: Pointer to read data from PHY register -+ **/ -+s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, -+ u16 *phy_data) -+{ -+ u32 i, data, command; -+ -+ /* Setup and write the read command */ -+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | -+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | -+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | -+ IXGBE_MSCA_MDI_COMMAND; -+ -+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); -+ -+ /* Check every 10 usec to see if the address cycle completed. -+ * The MDI Command bit will clear when the operation is -+ * complete -+ */ -+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { -+ udelay(10); -+ -+ command = IXGBE_READ_REG(hw, IXGBE_MSCA); -+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) -+ break; -+ } -+ -+ -+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { -+ hw_dbg(hw, "PHY address command did not complete.\n"); -+ return IXGBE_ERR_PHY; -+ } -+ -+ /* Read operation is complete. Get the data -+ * from MSRWD -+ */ -+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); -+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT; -+ *phy_data = (u16)(data); -+ -+ return 0; -+} -+ -+/** -+ * ixgbe_write_phy_reg_mdio - Writes a value to specified PHY register -+ * without SWFW lock. This Clause 22 API is patched by Hilbert -+ * @hw: pointer to hardware structure -+ * @reg_addr: 32 bit PHY register to write -+ * @device_type: 5 bit device type -+ * @phy_data: Data to write to the PHY register -+ **/ -+s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, -+ u32 device_type, u16 phy_data) -+{ -+ u32 i, command; -+ -+ /* Put the data in the MDI single read and write data register*/ -+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); -+ -+ /* Setup and write the write command */ -+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | -+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | -+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | -+ IXGBE_MSCA_MDI_COMMAND; -+ -+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); -+ -+ /* -+ * Check every 10 usec to see if the address cycle completed. -+ * The MDI Command bit will clear when the operation is -+ * complete -+ */ -+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { -+ udelay(10); -+ -+ command = IXGBE_READ_REG(hw, IXGBE_MSCA); -+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) -+ break; -+ } -+ -+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { -+ hw_dbg(hw, "PHY write cmd didn't complete\n"); -+ return IXGBE_ERR_PHY; -+ } -+ -+ return 0; -+} -+/** - * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without - * the SWFW lock - * @hw: pointer to hardware structure -@@ -705,53 +790,52 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) - - ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); - -- if (speed & IXGBE_LINK_SPEED_10GB_FULL) { -- /* Set or unset auto-negotiation 10G advertisement */ -- hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, -- MDIO_MMD_AN, -- &autoneg_reg); -+ /* Set or unset auto-negotiation 10G advertisement */ -+ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); - -- autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) -- autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; -+ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; -+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && -+ (speed & IXGBE_LINK_SPEED_10GB_FULL)) -+ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; - -- hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, -- MDIO_MMD_AN, -- autoneg_reg); -- } -- -- if (speed & IXGBE_LINK_SPEED_1GB_FULL) { -- /* Set or unset auto-negotiation 1G advertisement */ -- hw->phy.ops.read_reg(hw, -- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -- MDIO_MMD_AN, -- &autoneg_reg); -+ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); - -- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) -- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; -+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -+ MDIO_MMD_AN, &autoneg_reg); - -- hw->phy.ops.write_reg(hw, -- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -- MDIO_MMD_AN, -- autoneg_reg); -+ if (hw->mac.type == ixgbe_mac_X550) { -+ /* Set or unset auto-negotiation 5G advertisement */ -+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; -+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && -+ (speed & IXGBE_LINK_SPEED_5GB_FULL)) -+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; -+ -+ /* Set or unset auto-negotiation 2.5G advertisement */ -+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; -+ if ((hw->phy.autoneg_advertised & -+ IXGBE_LINK_SPEED_2_5GB_FULL) && -+ (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) -+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; - } - -- if (speed & IXGBE_LINK_SPEED_100_FULL) { -- /* Set or unset auto-negotiation 100M advertisement */ -- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, -- MDIO_MMD_AN, -- &autoneg_reg); -+ /* Set or unset auto-negotiation 1G advertisement */ -+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; -+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && -+ (speed & IXGBE_LINK_SPEED_1GB_FULL)) -+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; - -- autoneg_reg &= ~(ADVERTISE_100FULL | -- ADVERTISE_100HALF); -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) -- autoneg_reg |= ADVERTISE_100FULL; -+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -+ MDIO_MMD_AN, autoneg_reg); - -- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, -- MDIO_MMD_AN, -- autoneg_reg); -- } -+ /* Set or unset auto-negotiation 100M advertisement */ -+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); -+ -+ autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); -+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && -+ (speed & IXGBE_LINK_SPEED_100_FULL)) -+ autoneg_reg |= ADVERTISE_100FULL; -+ -+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); - - /* Blocked by MNG FW so don't reset PHY */ - if (ixgbe_check_reset_blocked(hw)) -@@ -778,9 +862,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) - { -- -- /* -- * Clear autoneg_advertised and set new values based on input link -+ /* Clear autoneg_advertised and set new values based on input link - * speed. - */ - hw->phy.autoneg_advertised = 0; -@@ -788,14 +870,24 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - -+ if (speed & IXGBE_LINK_SPEED_5GB_FULL) -+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; -+ -+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) -+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; -+ - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (speed & IXGBE_LINK_SPEED_100_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; - -+ if (speed & IXGBE_LINK_SPEED_10_FULL) -+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; -+ - /* Setup link based on the new speed settings */ -- hw->phy.ops.setup_link(hw); -+ if (hw->phy.ops.setup_link) -+ hw->phy.ops.setup_link(hw); - - return 0; - } -@@ -830,6 +922,7 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; - break; - case ixgbe_mac_X550EM_x: -+ case ixgbe_mac_x550em_a: - hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; - break; - default: -@@ -986,40 +1079,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) - } - - /** -- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version -- * @hw: pointer to hardware structure -- * @firmware_version: pointer to the PHY Firmware Version -- **/ --s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, -- u16 *firmware_version) --{ -- s32 status; -- -- status = hw->phy.ops.read_reg(hw, TNX_FW_REV, -- MDIO_MMD_VEND1, -- firmware_version); -- -- return status; --} -- --/** -- * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version -- * @hw: pointer to hardware structure -- * @firmware_version: pointer to the PHY Firmware Version -- **/ --s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, -- u16 *firmware_version) --{ -- s32 status; -- -- status = hw->phy.ops.read_reg(hw, AQ_FW_REV, -- MDIO_MMD_VEND1, -- firmware_version); -- -- return status; --} -- --/** - * ixgbe_reset_phy_nl - Performs a PHY reset - * @hw: pointer to hardware structure - **/ -@@ -2398,9 +2457,7 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) - if (!on && ixgbe_mng_present(hw)) - return 0; - -- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -- ®); -+ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®); - if (status) - return status; - -@@ -2412,8 +2469,6 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) - reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; - } - -- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -- reg); -+ status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg); - return status; - } -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h -index cc735ec..e9f94ee 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h -@@ -84,8 +84,9 @@ - #define IXGBE_CS4227_GLOBAL_ID_LSB 0 - #define IXGBE_CS4227_GLOBAL_ID_MSB 1 - #define IXGBE_CS4227_SCRATCH 2 --#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */ --#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */ -+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F -+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ -+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ - #define IXGBE_CS4227_RESET_PENDING 0x1357 - #define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 - #define IXGBE_CS4227_RETRIES 15 -@@ -154,6 +155,12 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); - s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); -+#if 1 //by hilbert -+s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, -+ u32 device_type, u16 *phy_data); -+s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, -+ u32 device_type, u16 phy_data); -+#endif - s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); - s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - ixgbe_link_speed speed, -@@ -168,10 +175,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up); - s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); --s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, -- u16 *firmware_version); --s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, -- u16 *firmware_version); - - s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); - s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); -@@ -195,12 +198,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data); - s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 eeprom_data); --s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val); --s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val); --s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 val); --s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 val); -+s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, -+ u16 *val, bool lock); -+s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, -+ u16 val, bool lock); - #endif /* _IXGBE_PHY_H_ */ -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -index 31d82e3..531990b 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -@@ -85,6 +85,7 @@ - #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC - #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD - #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE -+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 - #define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 - #define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 - #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 -@@ -92,6 +93,8 @@ - #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 - #define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 - #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE -+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 -+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 - - /* VF Device IDs */ - #define IXGBE_DEV_ID_82599_VF 0x10ED -@@ -1393,8 +1396,10 @@ struct ixgbe_thermal_sensor_data { - #define TN1010_PHY_ID 0x00A19410 - #define TNX_FW_REV 0xB - #define X540_PHY_ID 0x01540200 --#define X550_PHY_ID 0x01540220 -+#define X550_PHY_ID2 0x01540223 -+#define X550_PHY_ID3 0x01540221 - #define X557_PHY_ID 0x01540240 -+#define X557_PHY_ID2 0x01540250 - #define QT2022_PHY_ID 0x0043A400 - #define ATH_PHY_ID 0x03429050 - #define AQ_FW_REV 0x20 -@@ -1513,6 +1518,8 @@ enum { - #define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) - - /* VMOLR bitmasks */ -+#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ -+#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ - #define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ - #define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ - #define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ -@@ -1928,6 +1935,7 @@ enum { - #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 - #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 - #define IXGBE_LINKS_SPEED_100_82599 0x10000000 -+#define IXGBE_LINKS_SPEED_10_X550EM_A 0 - #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ - #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ - -@@ -2633,6 +2641,7 @@ enum ixgbe_fdir_pballoc_type { - #define FW_CEM_UNUSED_VER 0x0 - #define FW_CEM_MAX_RETRIES 3 - #define FW_CEM_RESP_STATUS_SUCCESS 0x1 -+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ - #define FW_READ_SHADOW_RAM_CMD 0x31 - #define FW_READ_SHADOW_RAM_LEN 0x6 - #define FW_WRITE_SHADOW_RAM_CMD 0x33 -@@ -2658,6 +2667,59 @@ enum ixgbe_fdir_pballoc_type { - #define FW_INT_PHY_REQ_LEN 10 - #define FW_INT_PHY_REQ_READ 0 - #define FW_INT_PHY_REQ_WRITE 1 -+#define FW_PHY_ACT_REQ_CMD 5 -+#define FW_PHY_ACT_DATA_COUNT 4 -+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) -+#define FW_PHY_ACT_INIT_PHY 1 -+#define FW_PHY_ACT_SETUP_LINK 2 -+#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) -+#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) -+#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) -+#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) -+#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) -+#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) -+#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) -+#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) -+#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) -+#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) -+#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ -+ HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u -+#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) -+#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) -+#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) -+#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) -+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) -+#define FW_PHY_ACT_GET_LINK_INFO 3 -+#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) -+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) -+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) -+#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) -+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) -+#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) -+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) -+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) -+#define FW_PHY_ACT_FORCE_LINK_DOWN 4 -+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) -+#define FW_PHY_ACT_PHY_SW_RESET 5 -+#define FW_PHY_ACT_PHY_HW_RESET 6 -+#define FW_PHY_ACT_GET_PHY_INFO 7 -+#define FW_PHY_ACT_UD_2 0x1002 -+#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) -+#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) -+#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) -+#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) -+#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) -+#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) -+#define FW_PHY_ACT_RETRIES 50 -+#define FW_PHY_INFO_SPEED_MASK 0xFFFu -+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u -+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu - - /* Host Interface Command Structures */ - struct ixgbe_hic_hdr { -@@ -2700,6 +2762,16 @@ struct ixgbe_hic_drv_info { - u16 pad2; /* end spacing to ensure length is mult. of dword2 */ - }; - -+struct ixgbe_hic_drv_info2 { -+ struct ixgbe_hic_hdr hdr; -+ u8 port_num; -+ u8 ver_sub; -+ u8 ver_build; -+ u8 ver_min; -+ u8 ver_maj; -+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; -+}; -+ - /* These need to be dword aligned */ - struct ixgbe_hic_read_shadow_ram { - union ixgbe_hic_hdr2 hdr; -@@ -2748,6 +2820,19 @@ struct ixgbe_hic_internal_phy_resp { - __be32 read_data; - }; - -+struct ixgbe_hic_phy_activity_req { -+ struct ixgbe_hic_hdr hdr; -+ u8 port_number; -+ u8 pad; -+ __le16 activity_id; -+ __be32 data[FW_PHY_ACT_DATA_COUNT]; -+}; -+ -+struct ixgbe_hic_phy_activity_resp { -+ struct ixgbe_hic_hdr hdr; -+ __be32 data[FW_PHY_ACT_DATA_COUNT]; -+}; -+ - /* Transmit Descriptor - Advanced */ - union ixgbe_adv_tx_desc { - struct { -@@ -2863,6 +2948,7 @@ typedef u32 ixgbe_autoneg_advertised; - /* Link speed */ - typedef u32 ixgbe_link_speed; - #define IXGBE_LINK_SPEED_UNKNOWN 0 -+#define IXGBE_LINK_SPEED_10_FULL 0x0002 - #define IXGBE_LINK_SPEED_100_FULL 0x0008 - #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 - #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 -@@ -3059,7 +3145,9 @@ enum ixgbe_phy_type { - ixgbe_phy_aq, - ixgbe_phy_x550em_kr, - ixgbe_phy_x550em_kx4, -+ ixgbe_phy_x550em_xfi, - ixgbe_phy_x550em_ext_t, -+ ixgbe_phy_ext_1g_t, - ixgbe_phy_cu_unknown, - ixgbe_phy_qt, - ixgbe_phy_xaui, -@@ -3078,6 +3166,7 @@ enum ixgbe_phy_type { - ixgbe_phy_qsfp_unknown, - ixgbe_phy_sfp_unsupported, - ixgbe_phy_sgmii, -+ ixgbe_phy_fw, - ixgbe_phy_generic - }; - -@@ -3352,6 +3441,7 @@ struct ixgbe_mac_operations { - s32 (*led_off)(struct ixgbe_hw *, u32); - s32 (*blink_led_start)(struct ixgbe_hw *, u32); - s32 (*blink_led_stop)(struct ixgbe_hw *, u32); -+ s32 (*init_led_link_act)(struct ixgbe_hw *); - - /* RAR, Multicast, VLAN */ - s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); -@@ -3372,9 +3462,11 @@ struct ixgbe_mac_operations { - /* Flow Control */ - s32 (*fc_enable)(struct ixgbe_hw *); - s32 (*setup_fc)(struct ixgbe_hw *); -+ void (*fc_autoneg)(struct ixgbe_hw *); - - /* Manageability interface */ -- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); -+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, -+ const char *); - s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); - s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); - void (*disable_rx)(struct ixgbe_hw *hw); -@@ -3416,10 +3508,24 @@ struct ixgbe_phy_operations { - s32 (*set_phy_power)(struct ixgbe_hw *, bool on); - s32 (*enter_lplu)(struct ixgbe_hw *); - s32 (*handle_lasi)(struct ixgbe_hw *hw); -- s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, -- u16 *value); -- s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, -- u16 value); -+ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, -+ u8 *value); -+ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, -+ u8 value); -+}; -+ -+struct ixgbe_link_operations { -+ s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); -+ s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, -+ u16 *val); -+ s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); -+ s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, -+ u16 val); -+}; -+ -+struct ixgbe_link_info { -+ struct ixgbe_link_operations ops; -+ u8 addr; - }; - - struct ixgbe_eeprom_info { -@@ -3462,6 +3568,7 @@ struct ixgbe_mac_info { - u8 san_mac_rar_index; - struct ixgbe_thermal_sensor_data thermal_sensor_data; - bool set_lben; -+ u8 led_link_act; - }; - - struct ixgbe_phy_info { -@@ -3477,6 +3584,8 @@ struct ixgbe_phy_info { - bool reset_disable; - ixgbe_autoneg_advertised autoneg_advertised; - ixgbe_link_speed speeds_supported; -+ ixgbe_link_speed eee_speeds_supported; -+ ixgbe_link_speed eee_speeds_advertised; - enum ixgbe_smart_speed smart_speed; - bool smart_speed_active; - bool multispeed_fiber; -@@ -3523,6 +3632,7 @@ struct ixgbe_hw { - struct ixgbe_addr_filter_info addr_ctrl; - struct ixgbe_fc_info fc; - struct ixgbe_phy_info phy; -+ struct ixgbe_link_info link; - struct ixgbe_eeprom_info eeprom; - struct ixgbe_bus_info bus; - struct ixgbe_mbx_info mbx; -@@ -3546,6 +3656,7 @@ struct ixgbe_info { - const struct ixgbe_eeprom_operations *eeprom_ops; - const struct ixgbe_phy_operations *phy_ops; - const struct ixgbe_mbx_operations *mbx_ops; -+ const struct ixgbe_link_operations *link_ops; - const u32 *mvals; - }; - -@@ -3593,17 +3704,35 @@ struct ixgbe_info { - #define IXGBE_FUSES0_REV_MASK (3u << 6) - - #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) -+#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) - #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) - #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) - #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) - #define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) -+#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) - #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) - #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) - #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) - #define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) - #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) - #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) - -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31) -+ - #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) - #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) - -@@ -3618,6 +3747,7 @@ struct ixgbe_info { - #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) - #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) - #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) -+#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28) - #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) - #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) - -@@ -3627,6 +3757,8 @@ struct ixgbe_info { - #define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) - #define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) - -+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10) -+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11) - #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) - #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) - -@@ -3675,8 +3807,13 @@ struct ixgbe_info { - - #define IXGBE_NW_MNG_IF_SEL 0x00011178 - #define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) --#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) --#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) -+#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) -+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ - #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 - #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ - (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c -index f2b1d48..6ea0d6a 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c -@@ -95,6 +95,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) - { - s32 status; - u32 ctrl, i; -+ u32 swfw_mask = hw->phy.phy_semaphore_mask; - - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); -@@ -105,10 +106,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) - ixgbe_clear_tx_pending(hw); - - mac_reset_top: -+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); -+ if (status) { -+ hw_dbg(hw, "semaphore failed with %d", status); -+ return IXGBE_ERR_SWFW_SYNC; -+ } -+ - ctrl = IXGBE_CTRL_RST; - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); -+ hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(1000, 1200); - - /* Poll for reset bit to self-clear indicating reset is complete */ -@@ -780,8 +788,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) - ixgbe_link_speed speed; - bool link_up; - -- /* -- * Link should be up in order for the blink bit in the LED control -+ if (index > 3) -+ return IXGBE_ERR_PARAM; -+ -+ /* Link should be up in order for the blink bit in the LED control - * register to work. Force link and speed in the MAC if link is down. - * This will be reversed when we stop the blinking. - */ -@@ -814,6 +824,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) - u32 macc_reg; - u32 ledctl_reg; - -+ if (index > 3) -+ return IXGBE_ERR_PARAM; -+ - /* Restore the LED to its default value. */ - ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); -@@ -851,6 +864,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { - .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_X540, - .blink_led_stop = &ixgbe_blink_led_stop_X540, - .set_rar = &ixgbe_set_rar_generic, -@@ -866,6 +880,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .setup_fc = ixgbe_setup_fc_generic, -+ .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = NULL, -@@ -911,7 +926,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = { - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, - .set_phy_power = &ixgbe_set_copper_phy_power, -- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, - }; - - static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c -index 77a60aa..3236248 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c -@@ -28,11 +28,15 @@ - - static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); - static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); -+static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); -+static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); -+static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); - - static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) - { - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; -+ struct ixgbe_link_info *link = &hw->link; - - /* Start with X540 invariants, since so simular */ - ixgbe_get_invariants_X540(hw); -@@ -40,6 +44,46 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) - if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) - phy->ops.set_phy_power = NULL; - -+ link->addr = IXGBE_CS4227; -+ -+ return 0; -+} -+ -+static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_phy_info *phy = &hw->phy; -+ -+ /* Start with X540 invariants, since so similar */ -+ ixgbe_get_invariants_X540(hw); -+ -+ phy->ops.set_phy_power = NULL; -+ -+ return 0; -+} -+ -+static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ struct ixgbe_phy_info *phy = &hw->phy; -+ -+ /* Start with X540 invariants, since so simular */ -+ ixgbe_get_invariants_X540(hw); -+ -+ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) -+ phy->ops.set_phy_power = NULL; -+ -+ return 0; -+} -+ -+static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_phy_info *phy = &hw->phy; -+ -+ /* Start with X540 invariants, since so similar */ -+ ixgbe_get_invariants_X540(hw); -+ -+ phy->ops.set_phy_power = NULL; -+ - return 0; - } - -@@ -69,8 +113,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) - */ - static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) - { -- return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, -- value); -+ return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); - } - - /** -@@ -83,8 +126,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) - */ - static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) - { -- return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, -- value); -+ return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); - } - - /** -@@ -290,6 +332,9 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) - case IXGBE_DEV_ID_X550EM_X_KX4: - hw->phy.type = ixgbe_phy_x550em_kx4; - break; -+ case IXGBE_DEV_ID_X550EM_X_XFI: -+ hw->phy.type = ixgbe_phy_x550em_xfi; -+ break; - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: -@@ -301,9 +346,21 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) - else - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - /* Fallthrough */ -- case IXGBE_DEV_ID_X550EM_X_1G_T: - case IXGBE_DEV_ID_X550EM_X_10G_T: - return ixgbe_identify_phy_generic(hw); -+ case IXGBE_DEV_ID_X550EM_X_1G_T: -+ hw->phy.type = ixgbe_phy_ext_1g_t; -+ break; -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: -+ hw->phy.type = ixgbe_phy_fw; -+ hw->phy.ops.read_reg = NULL; -+ hw->phy.ops.write_reg = NULL; -+ if (hw->bus.lan_id) -+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; -+ else -+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; -+ break; - default: - break; - } -@@ -322,6 +379,280 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, - return IXGBE_NOT_IMPLEMENTED; - } - -+/** -+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation -+ * @hw: pointer to the hardware structure -+ * @addr: I2C bus address to read from -+ * @reg: I2C device register to read from -+ * @val: pointer to location to receive read value -+ * -+ * Returns an error code on error. -+ **/ -+static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, -+ u16 reg, u16 *val) -+{ -+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); -+} -+ -+/** -+ * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation -+ * @hw: pointer to the hardware structure -+ * @addr: I2C bus address to read from -+ * @reg: I2C device register to read from -+ * @val: pointer to location to receive read value -+ * -+ * Returns an error code on error. -+ **/ -+static s32 -+ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, -+ u16 reg, u16 *val) -+{ -+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); -+} -+ -+/** -+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation -+ * @hw: pointer to the hardware structure -+ * @addr: I2C bus address to write to -+ * @reg: I2C device register to write to -+ * @val: value to write -+ * -+ * Returns an error code on error. -+ **/ -+static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, -+ u8 addr, u16 reg, u16 val) -+{ -+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); -+} -+ -+/** -+ * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation -+ * @hw: pointer to the hardware structure -+ * @addr: I2C bus address to write to -+ * @reg: I2C device register to write to -+ * @val: value to write -+ * -+ * Returns an error code on error. -+ **/ -+static s32 -+ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, -+ u8 addr, u16 reg, u16 val) -+{ -+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); -+} -+ -+/** -+ * ixgbe_fw_phy_activity - Perform an activity on a PHY -+ * @hw: pointer to hardware structure -+ * @activity: activity to perform -+ * @data: Pointer to 4 32-bit words of data -+ */ -+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, -+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]) -+{ -+ union { -+ struct ixgbe_hic_phy_activity_req cmd; -+ struct ixgbe_hic_phy_activity_resp rsp; -+ } hic; -+ u16 retries = FW_PHY_ACT_RETRIES; -+ s32 rc; -+ u32 i; -+ -+ do { -+ memset(&hic, 0, sizeof(hic)); -+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; -+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; -+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; -+ hic.cmd.port_number = hw->bus.lan_id; -+ hic.cmd.activity_id = cpu_to_le16(activity); -+ for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) -+ hic.cmd.data[i] = cpu_to_be32((*data)[i]); -+ -+ rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), -+ IXGBE_HI_COMMAND_TIMEOUT, -+ true); -+ if (rc) -+ return rc; -+ if (hic.rsp.hdr.cmd_or_resp.ret_status == -+ FW_CEM_RESP_STATUS_SUCCESS) { -+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) -+ (*data)[i] = be32_to_cpu(hic.rsp.data[i]); -+ return 0; -+ } -+ usleep_range(20, 30); -+ --retries; -+ } while (retries > 0); -+ -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; -+} -+ -+static const struct { -+ u16 fw_speed; -+ ixgbe_link_speed phy_speed; -+} ixgbe_fw_map[] = { -+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, -+}; -+ -+/** -+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command -+ * @hw: pointer to hardware structure -+ * -+ * Returns error code -+ */ -+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) -+{ -+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ u16 phy_speeds; -+ u16 phy_id_lo; -+ s32 rc; -+ u16 i; -+ -+ if (hw->phy.id) -+ return 0; -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); -+ if (rc) -+ return rc; -+ -+ hw->phy.speeds_supported = 0; -+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; -+ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { -+ if (phy_speeds & ixgbe_fw_map[i].fw_speed) -+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; -+ } -+ -+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; -+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; -+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; -+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; -+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) -+ return IXGBE_ERR_PHY_ADDR_INVALID; -+ -+ hw->phy.autoneg_advertised = hw->phy.speeds_supported; -+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | -+ IXGBE_LINK_SPEED_1GB_FULL; -+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; -+ return 0; -+} -+ -+static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, -+ u32 device_type, u16 *phy_data); -+/** -+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command -+ * @hw: pointer to hardware structure -+ * -+ * Returns error code -+ */ -+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) -+{ -+ s32 rc; -+ u16 value=0; -+ -+ if (hw->bus.lan_id) -+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; -+ else -+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; -+ -+#if 0 /* Try also to get PHY ID through MDIO by using C22 in read_reg op. -+ * By hilbert -+ */ -+ rc = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &value); -+ hw_err(hw, "####rc:%x, PHY ID-1:%x\n", rc, value); -+#endif -+ -+ hw->phy.type = ixgbe_phy_fw; -+#if 0 /* We still need read/write ops later, don't NULL it. By hilbert */ -+ hw->phy.ops.read_reg = NULL; -+ hw->phy.ops.write_reg = NULL; -+#endif -+ return ixgbe_get_phy_id_fw(hw); -+} -+ -+/** -+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY -+ * @hw: pointer to hardware structure -+ * -+ * Returns error code -+ */ -+static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) -+{ -+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ -+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; -+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); -+} -+ -+/** -+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs -+ * @hw: pointer to hardware structure -+ */ -+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) -+{ -+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ s32 rc; -+ u16 i; -+ -+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) -+ return 0; -+ -+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { -+ hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); -+ return IXGBE_ERR_INVALID_LINK_SETTINGS; -+ } -+ -+ switch (hw->fc.requested_mode) { -+ case ixgbe_fc_full: -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << -+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; -+ break; -+ case ixgbe_fc_rx_pause: -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << -+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; -+ break; -+ case ixgbe_fc_tx_pause: -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << -+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; -+ break; -+ default: -+ break; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { -+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) -+ setup[0] |= ixgbe_fw_map[i].fw_speed; -+ } -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; -+ -+ if (hw->phy.eee_speeds_advertised) -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); -+ if (rc) -+ return rc; -+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) -+ return IXGBE_ERR_OVERTEMP; -+ return 0; -+} -+ -+/** -+ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs -+ * @hw: pointer to hardware structure -+ * -+ * Called at init time to set up flow control. -+ */ -+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) -+{ -+ if (hw->fc.requested_mode == ixgbe_fc_default) -+ hw->fc.requested_mode = ixgbe_fc_full; -+ -+ return ixgbe_setup_fw_link(hw); -+} -+ - /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params - * @hw: pointer to hardware structure - * -@@ -544,41 +875,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - return status; - } - --/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface -- * command assuming that the semaphore is already obtained. -- * @hw: pointer to hardware structure -- * @offset: offset of word in the EEPROM to read -- * @data: word read from the EEPROM -- * -- * Reads a 16 bit word from the EEPROM using the hostif. -- **/ --static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, -- u16 *data) --{ -- s32 status; -- struct ixgbe_hic_read_shadow_ram buffer; -- -- buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; -- buffer.hdr.req.buf_lenh = 0; -- buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; -- buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; -- -- /* convert offset from words to bytes */ -- buffer.address = cpu_to_be32(offset * 2); -- /* one word */ -- buffer.length = cpu_to_be16(sizeof(u16)); -- -- status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), -- IXGBE_HI_COMMAND_TIMEOUT, false); -- if (status) -- return status; -- -- *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, -- FW_NVM_DATA_OFFSET); -- -- return 0; --} -- - /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read -@@ -590,6 +886,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) - { -+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; - struct ixgbe_hic_read_shadow_ram buffer; - u32 current_word = 0; - u16 words_to_read; -@@ -597,7 +894,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u32 i; - - /* Take semaphore for the entire operation. */ -- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); -+ status = hw->mac.ops.acquire_swfw_sync(hw, mask); - if (status) { - hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); - return status; -@@ -620,10 +917,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - buffer.pad2 = 0; - buffer.pad3 = 0; - -- status = ixgbe_host_interface_command(hw, &buffer, -- sizeof(buffer), -- IXGBE_HI_COMMAND_TIMEOUT, -- false); -+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), -+ IXGBE_HI_COMMAND_TIMEOUT); - if (status) { - hw_dbg(hw, "Host interface command failed\n"); - goto out; -@@ -647,7 +942,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - } - - out: -- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); -+ hw->mac.ops.release_swfw_sync(hw, mask); - return status; - } - -@@ -818,15 +1113,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) - **/ - static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) - { -- s32 status = 0; -+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; -+ struct ixgbe_hic_read_shadow_ram buffer; -+ s32 status; - -- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { -- status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); -- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); -- } else { -- status = IXGBE_ERR_SWFW_SYNC; -+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; -+ buffer.hdr.req.buf_lenh = 0; -+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; -+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; -+ -+ /* convert offset from words to bytes */ -+ buffer.address = cpu_to_be32(offset * 2); -+ /* one word */ -+ buffer.length = cpu_to_be16(sizeof(u16)); -+ -+ status = hw->mac.ops.acquire_swfw_sync(hw, mask); -+ if (status) -+ return status; -+ -+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), -+ IXGBE_HI_COMMAND_TIMEOUT); -+ if (!status) { -+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, -+ FW_NVM_DATA_OFFSET); - } - -+ hw->mac.ops.release_swfw_sync(hw, mask); - return status; - } - -@@ -1130,47 +1442,17 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - return ret; - } - --/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. -+/** -+ * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration - * @hw: pointer to hardware structure -- * @speed: the link speed to force - * -- * Configures the integrated KR PHY to use iXFI mode. Used to connect an -- * internal and external PHY at a specific speed, without autonegotiation. -+ * iXfI configuration needed for ixgbe_mac_X550EM_x devices. - **/ --static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -+static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) - { - s32 status; - u32 reg_val; - -- /* Disable AN and force speed to 10G Serial. */ -- status = ixgbe_read_iosf_sb_reg_x550(hw, -- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); -- if (status) -- return status; -- -- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; -- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; -- -- /* Select forced link speed for internal PHY. */ -- switch (*speed) { -- case IXGBE_LINK_SPEED_10GB_FULL: -- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; -- break; -- case IXGBE_LINK_SPEED_1GB_FULL: -- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; -- break; -- default: -- /* Other link speeds are not supported by internal KR PHY. */ -- return IXGBE_ERR_LINK_SETUP; -- } -- -- status = ixgbe_write_iosf_sb_reg_x550(hw, -- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -- if (status) -- return status; -- - /* Disable training protocol FSM. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), -@@ -1230,20 +1512,111 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -- if (status) -- return status; -+ return status; -+} - -- /* Toggle port SW reset by AN reset. */ -- status = ixgbe_read_iosf_sb_reg_x550(hw, -+/** -+ * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the -+ * internal PHY -+ * @hw: pointer to hardware structure -+ **/ -+static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) -+{ -+ s32 status; -+ u32 link_ctrl; -+ -+ /* Restart auto-negotiation. */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-negotiation did not complete\n"); -+ return status; -+ } -+ -+ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; -+ status = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); -+ -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ u32 flx_mask_st20; -+ -+ /* Indicate to FW that AN restart has been asserted */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-negotiation did not complete\n"); -+ return status; -+ } -+ -+ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; -+ status = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); -+ } -+ -+ return status; -+} -+ -+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. -+ * @hw: pointer to hardware structure -+ * @speed: the link speed to force -+ * -+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an -+ * internal and external PHY at a specific speed, without autonegotiation. -+ **/ -+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ s32 status; -+ u32 reg_val; -+ -+ /* iXFI is only supported with X552 */ -+ if (mac->type != ixgbe_mac_X550EM_x) -+ return IXGBE_ERR_LINK_SETUP; -+ -+ /* Disable AN and force speed to 10G Serial. */ -+ status = ixgbe_read_iosf_sb_reg_x550(hw, -+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; - -- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; -+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; -+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; -+ -+ /* Select forced link speed for internal PHY. */ -+ switch (*speed) { -+ case IXGBE_LINK_SPEED_10GB_FULL: -+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; -+ break; -+ case IXGBE_LINK_SPEED_1GB_FULL: -+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; -+ break; -+ default: -+ /* Other link speeds are not supported by internal KR PHY. */ -+ return IXGBE_ERR_LINK_SETUP; -+ } -+ - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -+ if (status) -+ return status; -+ -+ /* Additional configuration needed for x550em_x */ -+ if (hw->mac.type == ixgbe_mac_X550EM_x) { -+ status = ixgbe_setup_ixfi_x550em_x(hw); -+ if (status) -+ return status; -+ } -+ -+ /* Toggle port SW reset by AN reset. */ -+ status = ixgbe_restart_an_internal_phy_x550em(hw); - - return status; - } -@@ -1294,7 +1667,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, - __always_unused bool autoneg_wait_to_complete) - { - s32 status; -- u16 slice, value; -+ u16 reg_slice, reg_val; - bool setup_linear = false; - - /* Check if SFP module is supported and linear */ -@@ -1310,71 +1683,68 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, - if (status) - return status; - -- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { -- /* Configure CS4227 LINE side to 10G SR. */ -- slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); -- value = IXGBE_CS4227_SPEED_10G; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -+ /* Configure internal PHY for KR/KX. */ -+ ixgbe_setup_kr_speed_x550em(hw, speed); - -- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); -- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -- -- /* Configure CS4227 for HOST connection rate then type. */ -- slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); -- value = speed & IXGBE_LINK_SPEED_10GB_FULL ? -- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -+ /* Configure CS4227 LINE side to proper mode. */ -+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); -+ if (setup_linear) -+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; -+ else -+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; - -- slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); -- if (setup_linear) -- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; -- else -- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -+ status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, -+ reg_val); - -- /* Setup XFI internal link. */ -- status = ixgbe_setup_ixfi_x550em(hw, &speed); -- if (status) { -- hw_dbg(hw, "setup_ixfi failed with %d\n", status); -- return status; -- } -- } else { -- /* Configure internal PHY for KR/KX. */ -- status = ixgbe_setup_kr_speed_x550em(hw, speed); -- if (status) { -- hw_dbg(hw, "setup_kr_speed failed with %d\n", status); -- return status; -- } -+ return status; -+} - -- /* Configure CS4227 LINE side to proper mode. */ -- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); -- if (setup_linear) -- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; -- else -- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -+/** -+ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode -+ * @hw: pointer to hardware structure -+ * @speed: the link speed to force -+ * -+ * Configures the integrated PHY for native SFI mode. Used to connect the -+ * internal PHY directly to an SFP cage, without autonegotiation. -+ **/ -+static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ s32 status; -+ u32 reg_val; -+ -+ /* Disable all AN and force speed to 10G Serial. */ -+ status = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); -+ if (status) -+ return status; -+ -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; -+ -+ /* Select forced link speed for internal PHY. */ -+ switch (*speed) { -+ case IXGBE_LINK_SPEED_10GB_FULL: -+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; -+ break; -+ case IXGBE_LINK_SPEED_1GB_FULL: -+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; -+ break; -+ default: -+ /* Other link speeds are not supported by internal PHY. */ -+ return IXGBE_ERR_LINK_SETUP; - } - -- return 0; -+ status = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -+ -+ /* Toggle port SW reset by AN reset. */ -+ status = ixgbe_restart_an_internal_phy_x550em(hw); - --i2c_err: -- hw_dbg(hw, "combined i2c access failed with %d\n", status); - return status; - } - -@@ -1390,45 +1760,39 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, - { - bool setup_linear = false; - u32 reg_phy_int; -- s32 rc; -+ s32 ret_val; - - /* Check if SFP module is supported and linear */ -- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); -+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - - /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. - */ -- if (rc == IXGBE_ERR_SFP_NOT_PRESENT) -+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; - -- if (!rc) -- return rc; -+ if (ret_val) -+ return ret_val; - -- /* Configure internal PHY for native SFI */ -- rc = hw->mac.ops.read_iosf_sb_reg(hw, -- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, -- ®_phy_int); -- if (rc) -- return rc; -+ /* Configure internal PHY for native SFI based on module type */ -+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); -+ if (ret_val) -+ return ret_val; - -- if (setup_linear) { -- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING; -- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR; -- } else { -- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING; -- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR; -- } -+ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; -+ if (!setup_linear) -+ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; - -- rc = hw->mac.ops.write_iosf_sb_reg(hw, -- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, -- reg_phy_int); -- if (rc) -- return rc; -+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); -+ if (ret_val) -+ return ret_val; - -- /* Setup XFI/SFI internal link */ -- return ixgbe_setup_ixfi_x550em(hw, &speed); -+ /* Setup SFI internal link. */ -+ return ixgbe_setup_sfi_x550a(hw, &speed); - } - - /** -@@ -1444,19 +1808,19 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, - u32 reg_slice, slice_offset; - bool setup_linear = false; - u16 reg_phy_ext; -- s32 rc; -+ s32 ret_val; - - /* Check if SFP module is supported and linear */ -- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); -+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - - /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. - */ -- if (rc == IXGBE_ERR_SFP_NOT_PRESENT) -+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; - -- if (!rc) -- return rc; -+ if (ret_val) -+ return ret_val; - - /* Configure internal PHY for KR/KX. */ - ixgbe_setup_kr_speed_x550em(hw, speed); -@@ -1464,16 +1828,16 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, - if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) - return IXGBE_ERR_PHY_ADDR_INVALID; - -- /* Get external PHY device id */ -- rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB, -- IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); -- if (rc) -- return rc; -+ /* Get external PHY SKU id */ -+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, -+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); -+ if (ret_val) -+ return ret_val; - - /* When configuring quad port CS4223, the MAC instance is part - * of the slice offset. - */ -- if (reg_phy_ext == IXGBE_CS4223_PHY_ID) -+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID) - slice_offset = (hw->bus.lan_id + - (hw->bus.instance_id << 1)) << 12; - else -@@ -1481,12 +1845,28 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, - - /* Configure CS4227/CS4223 LINE side to proper mode. */ - reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; -+ -+ ret_val = hw->phy.ops.read_reg(hw, reg_slice, -+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); -+ if (ret_val) -+ return ret_val; -+ -+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | -+ (IXGBE_CS4227_EDC_MODE_SR << 1)); -+ - if (setup_linear) - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; - else - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; -- return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, -- reg_phy_ext); -+ -+ ret_val = hw->phy.ops.write_reg(hw, reg_slice, -+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); -+ if (ret_val) -+ return ret_val; -+ -+ /* Flush previous write with a read */ -+ return hw->phy.ops.read_reg(hw, reg_slice, -+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - } - - /** -@@ -1515,8 +1895,10 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, - else - force_speed = IXGBE_LINK_SPEED_1GB_FULL; - -- /* If internal link mode is XFI, then setup XFI internal link. */ -- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { -+ /* If X552 and internal link mode is XFI, then setup XFI internal link. -+ */ -+ if (hw->mac.type == ixgbe_mac_X550EM_x && -+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - status = ixgbe_setup_ixfi_x550em(hw, &force_speed); - - if (status) -@@ -1540,7 +1922,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, - bool link_up_wait_to_complete) - { - u32 status; -- u16 autoneg_status; -+ u16 i, autoneg_status; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; -@@ -1552,14 +1934,18 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, - if (status || !(*link_up)) - return status; - -- /* MAC link is up, so check external PHY link. -- * Read this twice back to back to indicate current status. -- */ -- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -- &autoneg_status); -- if (status) -- return status; -+ /* MAC link is up, so check external PHY link. -+ * Link status is latching low, and can only be used to detect link -+ * drop, and not the current status of the link without performing -+ * back-to-back reads. -+ */ -+ for (i = 0; i < 2; i++) { -+ status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, -+ &autoneg_status); -+ -+ if (status) -+ return status; -+ } - - /* If external PHY link is not up, then indicate link not up */ - if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) -@@ -1577,7 +1963,7 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) - { - struct ixgbe_mac_info *mac = &hw->mac; -- u32 lval, sval; -+ u32 lval, sval, flx_val; - s32 rc; - - rc = mac->ops.read_iosf_sb_reg(hw, -@@ -1611,12 +1997,183 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, - if (rc) - return rc; - -- lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); -+ if (rc) -+ return rc; -+ -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); -+ if (rc) -+ return rc; -+ -+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; -+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; -+ -+ rc = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); -+ if (rc) -+ return rc; -+ -+ rc = ixgbe_restart_an_internal_phy_x550em(hw); -+ return rc; -+} -+ -+/** -+ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs -+ * @hw: pointer to hardware structure -+ */ -+static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, -+ bool autoneg_wait) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ u32 lval, sval, flx_val; -+ s32 rc; -+ -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); -+ if (rc) -+ return rc; -+ -+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; -+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; -+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; -+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; -+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; -+ rc = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval); -+ if (rc) -+ return rc; -+ -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); -+ if (rc) -+ return rc; -+ -+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; -+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; -+ rc = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval); -+ if (rc) -+ return rc; -+ - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, lval); -+ if (rc) -+ return rc; - -- return rc; -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); -+ if (rc) -+ return rc; -+ -+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; -+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; -+ -+ rc = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); -+ if (rc) -+ return rc; -+ -+ ixgbe_restart_an_internal_phy_x550em(hw); -+ -+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); -+} -+ -+/** -+ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 -+ * @hw: pointer to hardware structure -+ * -+ * Enable flow control according to IEEE clause 37. -+ */ -+static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) -+{ -+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; -+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ ixgbe_link_speed speed; -+ bool link_up; -+ -+ /* AN should have completed when the cable was plugged in. -+ * Look for reasons to bail out. Bail out if: -+ * - FC autoneg is disabled, or if -+ * - link is not up. -+ */ -+ if (hw->fc.disable_fc_autoneg) -+ goto out; -+ -+ hw->mac.ops.check_link(hw, &speed, &link_up, false); -+ if (!link_up) -+ goto out; -+ -+ /* Check if auto-negotiation has completed */ -+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); -+ if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { -+ status = IXGBE_ERR_FC_NOT_NEGOTIATED; -+ goto out; -+ } -+ -+ /* Negotiate the flow control */ -+ status = ixgbe_negotiate_fc(hw, info[0], info[0], -+ FW_PHY_ACT_GET_LINK_INFO_FC_RX, -+ FW_PHY_ACT_GET_LINK_INFO_FC_TX, -+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, -+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); -+ -+out: -+ if (!status) { -+ hw->fc.fc_was_autonegged = true; -+ } else { -+ hw->fc.fc_was_autonegged = false; -+ hw->fc.current_mode = hw->fc.requested_mode; -+ } -+} -+ -+/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers -+ * @hw: pointer to hardware structure -+ **/ -+static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ -+ switch (mac->ops.get_media_type(hw)) { -+ case ixgbe_media_type_fiber: -+ mac->ops.setup_fc = NULL; -+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; -+ break; -+ case ixgbe_media_type_copper: -+ if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && -+ hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { -+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; -+ break; -+ } -+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; -+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw; -+ mac->ops.setup_link = ixgbe_setup_sgmii_fw; -+ mac->ops.check_link = ixgbe_check_mac_link_generic; -+ break; -+ case ixgbe_media_type_backplane: -+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; -+ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; -+ break; -+ default: -+ break; -+ } - } - - /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers -@@ -1654,10 +2211,12 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) - ixgbe_set_soft_rate_select_speed; - break; - case ixgbe_media_type_copper: -+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) -+ break; - mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; - mac->ops.setup_fc = ixgbe_setup_fc_generic; - mac->ops.check_link = ixgbe_check_link_t_X550em; -- return; -+ break; - case ixgbe_media_type_backplane: - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || - hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) -@@ -1666,6 +2225,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) - default: - break; - } -+ -+ /* Additional modification for X550em_a devices */ -+ if (hw->mac.type == ixgbe_mac_x550em_a) -+ ixgbe_init_mac_link_ops_X550em_a(hw); - } - - /** ixgbe_setup_sfp_modules_X550em - Setup SFP module -@@ -1696,6 +2259,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) - { -+ if (hw->phy.type == ixgbe_phy_fw) { -+ *autoneg = true; -+ *speed = hw->phy.speeds_supported; -+ return 0; -+ } -+ - /* SFP */ - if (hw->phy.media_type == ixgbe_media_type_fiber) { - /* CS4227 SFP must not enable auto-negotiation */ -@@ -1714,8 +2283,39 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - else - *speed = IXGBE_LINK_SPEED_10GB_FULL; - } else { -- *speed = IXGBE_LINK_SPEED_10GB_FULL | -- IXGBE_LINK_SPEED_1GB_FULL; -+ switch (hw->phy.type) { -+ case ixgbe_phy_x550em_kx4: -+ *speed = IXGBE_LINK_SPEED_1GB_FULL | -+ IXGBE_LINK_SPEED_2_5GB_FULL | -+ IXGBE_LINK_SPEED_10GB_FULL; -+ break; -+ case ixgbe_phy_x550em_xfi: -+ *speed = IXGBE_LINK_SPEED_1GB_FULL | -+ IXGBE_LINK_SPEED_10GB_FULL; -+ break; -+ case ixgbe_phy_ext_1g_t: -+ case ixgbe_phy_sgmii: -+ *speed = IXGBE_LINK_SPEED_1GB_FULL; -+ break; -+ case ixgbe_phy_x550em_kr: -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* check different backplane modes */ -+ if (hw->phy.nw_mng_if_sel & -+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { -+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL; -+ break; -+ } else if (hw->device_id == -+ IXGBE_DEV_ID_X550EM_A_KR_L) { -+ *speed = IXGBE_LINK_SPEED_1GB_FULL; -+ break; -+ } -+ } -+ /* fall through */ -+ default: -+ *speed = IXGBE_LINK_SPEED_10GB_FULL | -+ IXGBE_LINK_SPEED_1GB_FULL; -+ break; -+ } - *autoneg = true; - } - return 0; -@@ -1742,7 +2342,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - - /* Vendor alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - - if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) -@@ -1750,7 +2350,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - - /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - - if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | -@@ -1759,7 +2359,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - - /* Global alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - - if (status) -@@ -1774,7 +2374,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { - /* device fault alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -1789,14 +2389,14 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - - /* Vendor alarm 2 triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); -+ MDIO_MMD_AN, ®); - - if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) - return status; - - /* link connect/disconnect event occurred */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); -+ MDIO_MMD_AN, ®); - - if (status) - return status; -@@ -1827,21 +2427,34 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); - - /* Enable link status change alarm */ -- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); -- if (status) -- return status; - -- reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; -+ /* Enable the LASI interrupts on X552 devices to receive notifications -+ * of the link configurations of the external PHY and correspondingly -+ * support the configuration of the internal iXFI link, since iXFI does -+ * not support auto-negotiation. This is not required for X553 devices -+ * having KR support, which performs auto-negotiations and which is used -+ * as the internal link to the external PHY. Hence adding a check here -+ * to avoid enabling LASI interrupts for X553 devices. -+ */ -+ if (hw->mac.type != ixgbe_mac_x550em_a) { -+ status = hw->phy.ops.read_reg(hw, -+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, -+ MDIO_MMD_AN, ®); -+ if (status) -+ return status; - -- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); -- if (status) -- return status; -+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; -+ -+ status = hw->phy.ops.write_reg(hw, -+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, -+ MDIO_MMD_AN, reg); -+ if (status) -+ return status; -+ } - - /* Enable high temperature failure and global fault alarms */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -1850,14 +2463,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) - IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - reg); - if (status) - return status; - - /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -1866,14 +2479,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) - IXGBE_MDIO_GLOBAL_ALARM_1_INT); - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - reg); - if (status) - return status; - - /* Enable chip-wide vendor alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -1881,7 +2494,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) - reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - reg); - - return status; -@@ -1945,51 +2558,31 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; - -- /* Restart auto-negotiation. */ -- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - -- return status; --} -- --/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. -- * @hw: pointer to hardware structure -- * -- * Configures the integrated KX4 PHY. -- **/ --static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) --{ -- s32 status; -- u32 reg_val; -- -- status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, -- IXGBE_SB_IOSF_TARGET_KX4_PCS0 + -- hw->bus.lan_id, ®_val); -- if (status) -- return status; -- -- reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | -- IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); -- -- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* Set lane mode to KR auto negotiation */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - -- /* Advertise 10G support. */ -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) -- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; -+ if (status) -+ return status; - -- /* Advertise 1G support. */ -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) -- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; -+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; -+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - -- /* Restart auto-negotiation. */ -- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; -- status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, -- IXGBE_SB_IOSF_TARGET_KX4_PCS0 + -- hw->bus.lan_id, reg_val); -+ status = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -+ } - -- return status; -+ return ixgbe_restart_an_internal_phy_x550em(hw); - } - - /** -@@ -2002,6 +2595,9 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) - return 0; - -+ if (ixgbe_check_reset_blocked(hw)) -+ return 0; -+ - return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); - } - -@@ -2019,14 +2615,12 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) - *link_up = false; - - /* read this twice back to back to indicate current status */ -- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - if (ret) - return ret; - -- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - if (ret) - return ret; -@@ -2057,7 +2651,8 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; - -- if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { -+ if (!(hw->mac.type == ixgbe_mac_X550EM_x && -+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - return ixgbe_setup_kr_speed_x550em(hw, speed); -@@ -2072,7 +2667,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) - return 0; - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &speed); - if (status) - return status; -@@ -2133,10 +2728,10 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) - - /* To turn on the LED, set mode to ON. */ - hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); -+ MDIO_MMD_VEND1, &phy_data); - phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; - hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); -+ MDIO_MMD_VEND1, phy_data); - - return 0; - } -@@ -2155,14 +2750,70 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) - - /* To turn on the LED, set mode to ON. */ - hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); -+ MDIO_MMD_VEND1, &phy_data); - phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; - hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); -+ MDIO_MMD_VEND1, phy_data); - - return 0; - } - -+/** -+ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware -+ * @hw: pointer to the HW structure -+ * @maj: driver version major number -+ * @min: driver version minor number -+ * @build: driver version build number -+ * @sub: driver version sub build number -+ * @len: length of driver_ver string -+ * @driver_ver: driver string -+ * -+ * Sends driver version number to firmware through the manageability -+ * block. On success return 0 -+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring -+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. -+ **/ -+static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, -+ u8 build, u8 sub, u16 len, -+ const char *driver_ver) -+{ -+ struct ixgbe_hic_drv_info2 fw_cmd; -+ s32 ret_val; -+ int i; -+ -+ if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) -+ return IXGBE_ERR_INVALID_ARGUMENT; -+ -+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; -+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; -+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; -+ fw_cmd.port_num = (u8)hw->bus.func; -+ fw_cmd.ver_maj = maj; -+ fw_cmd.ver_min = min; -+ fw_cmd.ver_build = build; -+ fw_cmd.ver_sub = sub; -+ fw_cmd.hdr.checksum = 0; -+ memcpy(fw_cmd.driver_string, driver_ver, len); -+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, -+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); -+ -+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { -+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, -+ sizeof(fw_cmd), -+ IXGBE_HI_COMMAND_TIMEOUT, -+ true); -+ if (ret_val) -+ continue; -+ -+ if (fw_cmd.hdr.cmd_or_resp.ret_status != -+ FW_CEM_RESP_STATUS_SUCCESS) -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; -+ return 0; -+ } -+ -+ return ret_val; -+} -+ - /** ixgbe_get_lcd_x550em - Determine lowest common denominator - * @hw: pointer to hardware structure - * @lcd_speed: pointer to lowest common link speed -@@ -2179,7 +2830,7 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, - *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; - - status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &an_lp_status); - if (status) - return status; -@@ -2208,7 +2859,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) - { - bool pause, asm_dir; - u32 reg_val; -- s32 rc; -+ s32 rc = 0; - - /* Validate the requested mode */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { -@@ -2251,33 +2902,122 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) - return IXGBE_ERR_CONFIG; - } - -- if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && -- hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && -- hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) -- return 0; -+ switch (hw->device_id) { -+ case IXGBE_DEV_ID_X550EM_X_KR: -+ case IXGBE_DEV_ID_X550EM_A_KR: -+ case IXGBE_DEV_ID_X550EM_A_KR_L: -+ rc = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, -+ ®_val); -+ if (rc) -+ return rc; -+ -+ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | -+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); -+ if (pause) -+ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; -+ if (asm_dir) -+ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; -+ rc = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, -+ reg_val); -+ -+ /* This device does not fully support AN. */ -+ hw->fc.disable_fc_autoneg = true; -+ break; -+ case IXGBE_DEV_ID_X550EM_X_XFI: -+ hw->fc.disable_fc_autoneg = true; -+ break; -+ default: -+ break; -+ } -+ return rc; -+} - -- rc = hw->mac.ops.read_iosf_sb_reg(hw, -- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, -- ®_val); -- if (rc) -- return rc; -+/** -+ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 -+ * @hw: pointer to hardware structure -+ **/ -+static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) -+{ -+ u32 link_s1, lp_an_page_low, an_cntl_1; -+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; -+ ixgbe_link_speed speed; -+ bool link_up; - -- reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | -- IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); -- if (pause) -- reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; -- if (asm_dir) -- reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; -- rc = hw->mac.ops.write_iosf_sb_reg(hw, -- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, -- reg_val); -+ /* AN should have completed when the cable was plugged in. -+ * Look for reasons to bail out. Bail out if: -+ * - FC autoneg is disabled, or if -+ * - link is not up. -+ */ -+ if (hw->fc.disable_fc_autoneg) { -+ hw_err(hw, "Flow control autoneg is disabled"); -+ goto out; -+ } - -- /* This device does not fully support AN. */ -- hw->fc.disable_fc_autoneg = true; -+ hw->mac.ops.check_link(hw, &speed, &link_up, false); -+ if (!link_up) { -+ hw_err(hw, "The link is down"); -+ goto out; -+ } - -- return rc; -+ /* Check at auto-negotiation has completed */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_LINK_S1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); -+ -+ if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { -+ hw_dbg(hw, "Auto-Negotiation did not complete\n"); -+ status = IXGBE_ERR_FC_NOT_NEGOTIATED; -+ goto out; -+ } -+ -+ /* Read the 10g AN autoc and LP ability registers and resolve -+ * local flow control settings accordingly -+ */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-Negotiation did not complete\n"); -+ goto out; -+ } -+ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-Negotiation did not complete\n"); -+ goto out; -+ } -+ -+ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, -+ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, -+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, -+ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, -+ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); -+ -+out: -+ if (!status) { -+ hw->fc.fc_was_autonegged = true; -+ } else { -+ hw->fc.fc_was_autonegged = false; -+ hw->fc.current_mode = hw->fc.requested_mode; -+ } -+} -+ -+/** -+ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings -+ * @hw: pointer to hardware structure -+ **/ -+static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) -+{ -+ hw->fc.fc_was_autonegged = false; -+ hw->fc.current_mode = hw->fc.requested_mode; - } - - /** ixgbe_enter_lplu_x550em - Transition to low power states -@@ -2326,7 +3066,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) - return ixgbe_set_copper_phy_power(hw, false); - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &speed); - if (status) - return status; -@@ -2348,20 +3088,20 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) - - /* Clear AN completed indication */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &autoneg_reg); - if (status) - return status; - -- status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, -+ MDIO_MMD_AN, - &an_10g_cntl_reg); - if (status) - return status; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &autoneg_reg); - if (status) - return status; -@@ -2378,6 +3118,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) - } - - /** -+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs -+ * @hw: pointer to hardware structure -+ */ -+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) -+{ -+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ s32 rc; -+ -+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) -+ return 0; -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); -+ if (rc) -+ return rc; -+ memset(store, 0, sizeof(store)); -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); -+ if (rc) -+ return rc; -+ -+ return ixgbe_setup_fw_link(hw); -+} -+ -+/** -+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp -+ * @hw: pointer to hardware structure -+ */ -+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) -+{ -+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ s32 rc; -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); -+ if (rc) -+ return rc; -+ -+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { -+ ixgbe_shutdown_fw_phy(hw); -+ return IXGBE_ERR_OVERTEMP; -+ } -+ return 0; -+} -+ -+/** - * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register - * @hw: pointer to hardware structure - * -@@ -2398,6 +3182,18 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) - hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; -+#if 1 /* Since by Intel FW(LEK8),LAN controller 1 default set port 0 use phy address 0 -+ * and port 1 use phy address 1, we swap it for Porsche2 platform. -+ * By hilbert. -+ */ -+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { -+ /*hw_err(hw, "####swap phy address used for different lan id in LAN conroller-1\n");*/ -+ hw->phy.mdio.prtad = (hw->bus.lan_id == 0) ? (1) : (0); -+ /*hw_err(hw, "####lan id: %d, phy address:%d\n", -+ hw->bus.lan_id, -+ hw->phy.mdio.prtad);*/ -+ } -+#endif - } - } - -@@ -2433,7 +3229,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) - /* Set functions pointers based on phy type */ - switch (hw->phy.type) { - case ixgbe_phy_x550em_kx4: -- phy->ops.setup_link = ixgbe_setup_kx4_x550em; -+ phy->ops.setup_link = NULL; - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; -@@ -2442,6 +3238,12 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; -+ case ixgbe_phy_x550em_xfi: -+ /* link is managed by HW */ -+ phy->ops.setup_link = NULL; -+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em; -+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em; -+ break; - case ixgbe_phy_x550em_ext_t: - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode -@@ -2463,6 +3265,19 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) - phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; - phy->ops.reset = ixgbe_reset_phy_t_X550em; - break; -+ case ixgbe_phy_sgmii: -+ phy->ops.setup_link = NULL; -+ break; -+ case ixgbe_phy_fw: -+ phy->ops.setup_link = ixgbe_setup_fw_link; -+ phy->ops.reset = ixgbe_reset_phy_fw; -+ break; -+ case ixgbe_phy_ext_1g_t: -+ phy->ops.setup_link = NULL; -+ phy->ops.read_reg = NULL; -+ phy->ops.write_reg = NULL; -+ phy->ops.reset = NULL; -+ break; - default: - break; - } -@@ -2488,6 +3303,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) - /* Fallthrough */ - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_KX4: -+ case IXGBE_DEV_ID_X550EM_X_XFI: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - media_type = ixgbe_media_type_backplane; -@@ -2500,6 +3316,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) - case IXGBE_DEV_ID_X550EM_X_1G_T: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: - media_type = ixgbe_media_type_copper; - break; - default: -@@ -2519,7 +3337,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) - - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_TX_VENDOR_ALARMS_3, -- IXGBE_MDIO_PMA_PMD_DEV_TYPE, -+ MDIO_MMD_PMAPMD, - ®); - if (status) - return status; -@@ -2530,7 +3348,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) - if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -2539,7 +3357,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) - - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - reg); - if (status) - return status; -@@ -2567,6 +3385,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) - hlreg0 &= ~IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - break; -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: -+ /* Select fast MDIO clock speed for these devices */ -+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); -+ hlreg0 |= IXGBE_HLREG0_MDCSPD; -+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); -+ break; - default: - break; - } -@@ -2586,6 +3411,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) - u32 ctrl = 0; - u32 i; - bool link_up = false; -+ u32 swfw_mask = hw->phy.phy_semaphore_mask; - - /* Call adapter stop to disable Tx/Rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); -@@ -2613,6 +3439,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) - hw->phy.sfp_setup_needed = false; - } - -+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) -+ return status; -+ - /* Reset PHY */ - if (!hw->phy.reset_disable && hw->phy.ops.reset) - hw->phy.ops.reset(hw); -@@ -2631,9 +3460,16 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) - ctrl = IXGBE_CTRL_RST; - } - -+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); -+ if (status) { -+ hw_dbg(hw, "semaphore failed with %d", status); -+ return IXGBE_ERR_SWFW_SYNC; -+ } -+ - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); -+ hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(1000, 1200); - - /* Poll for reset bit to self-clear meaning reset is complete */ -@@ -2728,6 +3564,90 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, - } - - /** -+ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control -+ * @hw: pointer to hardware structure -+ * -+ * Called at init time to set up flow control. -+ **/ -+static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) -+{ -+ s32 status = 0; -+ u32 an_cntl = 0; -+ -+ /* Validate the requested mode */ -+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { -+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); -+ return IXGBE_ERR_INVALID_LINK_SETTINGS; -+ } -+ -+ if (hw->fc.requested_mode == ixgbe_fc_default) -+ hw->fc.requested_mode = ixgbe_fc_full; -+ -+ /* Set up the 1G and 10G flow control advertisement registers so the -+ * HW will be able to do FC autoneg once the cable is plugged in. If -+ * we link at 10G, the 1G advertisement is harmless and vice versa. -+ */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-Negotiation did not complete\n"); -+ return status; -+ } -+ -+ /* The possible values of fc.requested_mode are: -+ * 0: Flow control is completely disabled -+ * 1: Rx flow control is enabled (we can receive pause frames, -+ * but not send pause frames). -+ * 2: Tx flow control is enabled (we can send pause frames but -+ * we do not support receiving pause frames). -+ * 3: Both Rx and Tx flow control (symmetric) are enabled. -+ * other: Invalid. -+ */ -+ switch (hw->fc.requested_mode) { -+ case ixgbe_fc_none: -+ /* Flow control completely disabled by software override. */ -+ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | -+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); -+ break; -+ case ixgbe_fc_tx_pause: -+ /* Tx Flow control is enabled, and Rx Flow control is -+ * disabled by software override. -+ */ -+ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; -+ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; -+ break; -+ case ixgbe_fc_rx_pause: -+ /* Rx Flow control is enabled and Tx Flow control is -+ * disabled by software override. Since there really -+ * isn't a way to advertise that we are capable of RX -+ * Pause ONLY, we will advertise that we support both -+ * symmetric and asymmetric Rx PAUSE, as such we fall -+ * through to the fc_full statement. Later, we will -+ * disable the adapter's ability to send PAUSE frames. -+ */ -+ case ixgbe_fc_full: -+ /* Flow control (both Rx and Tx) is enabled by SW override. */ -+ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | -+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; -+ break; -+ default: -+ hw_err(hw, "Flow control param set incorrectly\n"); -+ return IXGBE_ERR_CONFIG; -+ } -+ -+ status = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); -+ -+ /* Restart auto-negotiation. */ -+ status = ixgbe_restart_an_internal_phy_x550em(hw); -+ -+ return status; -+} -+ -+/** - * ixgbe_set_mux - Set mux for port 1 access with CS4227 - * @hw: pointer to hardware structure - * @state: set mux if 1, clear if 0 -@@ -2881,7 +3801,13 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - if (hw->mac.ops.acquire_swfw_sync(hw, mask)) - return IXGBE_ERR_SWFW_SYNC; - -+#if 0 /* To use C22 MDI access function created by our own. -+ * By hilbert -+ */ - status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); -+#else -+ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); -+#endif - hw->mac.ops.release_swfw_sync(hw, mask); - - return status; -@@ -2914,7 +3840,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - .clear_vfta = &ixgbe_clear_vfta_generic, \ - .set_vfta = &ixgbe_set_vfta_generic, \ - .fc_enable = &ixgbe_fc_enable_generic, \ -- .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ -+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ - .init_uta_tables = &ixgbe_init_uta_tables_generic, \ - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ -@@ -2933,6 +3859,7 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_generic, - .led_off = ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = &ixgbe_reset_hw_X540, - .get_media_type = &ixgbe_get_media_type_X540, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, -@@ -2947,12 +3874,14 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { - .prot_autoc_read = prot_autoc_read_generic, - .prot_autoc_write = prot_autoc_write_generic, - .setup_fc = ixgbe_setup_fc_generic, -+ .fc_autoneg = ixgbe_fc_autoneg, - }; - - static const struct ixgbe_mac_operations mac_ops_X550EM_x = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_t_x550em, - .led_off = ixgbe_led_off_t_x550em, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = &ixgbe_reset_hw_X550em, - .get_media_type = &ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, -@@ -2965,6 +3894,29 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { - .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .setup_fc = NULL, /* defined later */ -+ .fc_autoneg = ixgbe_fc_autoneg, -+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, -+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, -+}; -+ -+static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { -+ X550_COMMON_MAC -+ .led_on = NULL, -+ .led_off = NULL, -+ .init_led_link_act = NULL, -+ .reset_hw = &ixgbe_reset_hw_X550em, -+ .get_media_type = &ixgbe_get_media_type_X550em, -+ .get_san_mac_addr = NULL, -+ .get_wwn_prefix = NULL, -+ .setup_link = &ixgbe_setup_mac_link_X540, -+ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, -+ .get_bus_info = &ixgbe_get_bus_info_X550em, -+ .setup_sfp = ixgbe_setup_sfp_modules_X550em, -+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, -+ .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, -+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540, -+ .setup_fc = NULL, -+ .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, - }; -@@ -2973,6 +3925,28 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_t_x550em, - .led_off = ixgbe_led_off_t_x550em, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, -+ .reset_hw = ixgbe_reset_hw_X550em, -+ .get_media_type = ixgbe_get_media_type_X550em, -+ .get_san_mac_addr = NULL, -+ .get_wwn_prefix = NULL, -+ .setup_link = &ixgbe_setup_mac_link_X540, -+ .get_link_capabilities = ixgbe_get_link_capabilities_X550em, -+ .get_bus_info = ixgbe_get_bus_info_X550em, -+ .setup_sfp = ixgbe_setup_sfp_modules_X550em, -+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, -+ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, -+ .setup_fc = ixgbe_setup_fc_x550em, -+ .fc_autoneg = ixgbe_fc_autoneg, -+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, -+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, -+}; -+ -+static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { -+ X550_COMMON_MAC -+ .led_on = ixgbe_led_on_generic, -+ .led_off = ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = ixgbe_reset_hw_X550em, - .get_media_type = ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, -@@ -2984,6 +3958,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { - .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, - .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, - .setup_fc = ixgbe_setup_fc_x550em, -+ .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, - }; -@@ -3017,12 +3992,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ - .setup_link = &ixgbe_setup_phy_link_generic, \ -- .set_phy_power = NULL, \ -- .check_overtemp = &ixgbe_tn_check_overtemp, \ -- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, -+ .set_phy_power = NULL, - - static const struct ixgbe_phy_operations phy_ops_X550 = { - X550_COMMON_PHY -+ .check_overtemp = &ixgbe_tn_check_overtemp, - .init = NULL, - .identify = &ixgbe_identify_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, -@@ -3031,19 +4005,27 @@ static const struct ixgbe_phy_operations phy_ops_X550 = { - - static const struct ixgbe_phy_operations phy_ops_X550EM_x = { - X550_COMMON_PHY -+ .check_overtemp = &ixgbe_tn_check_overtemp, - .init = &ixgbe_init_phy_ops_X550em, - .identify = &ixgbe_identify_phy_x550em, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, -- .read_i2c_combined = &ixgbe_read_i2c_combined_generic, -- .write_i2c_combined = &ixgbe_write_i2c_combined_generic, -- .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, -- .write_i2c_combined_unlocked = -- &ixgbe_write_i2c_combined_generic_unlocked, -+}; -+ -+static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { -+ X550_COMMON_PHY -+ .check_overtemp = NULL, -+ .init = ixgbe_init_phy_ops_X550em, -+ .identify = ixgbe_identify_phy_x550em, -+ .read_reg = NULL, -+ .write_reg = NULL, -+ .read_reg_mdi = NULL, -+ .write_reg_mdi = NULL, - }; - - static const struct ixgbe_phy_operations phy_ops_x550em_a = { - X550_COMMON_PHY -+ .check_overtemp = &ixgbe_tn_check_overtemp, - .init = &ixgbe_init_phy_ops_X550em, - .identify = &ixgbe_identify_phy_x550em, - .read_reg = &ixgbe_read_phy_reg_x550a, -@@ -3052,6 +4034,31 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = { - .write_reg_mdi = &ixgbe_write_phy_reg_mdi, - }; - -+static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { -+ X550_COMMON_PHY -+ .check_overtemp = ixgbe_check_overtemp_fw, -+ .init = ixgbe_init_phy_ops_X550em, -+ .identify = ixgbe_identify_phy_fw, -+#if 0 /* Declare C22 MDI directly access functions. By hilbert */ -+ .read_reg = NULL, -+ .write_reg = NULL, -+ .read_reg_mdi = NULL, -+ .write_reg_mdi = NULL, -+#else -+ .read_reg = &ixgbe_read_phy_reg_x550a, -+ .write_reg = &ixgbe_write_phy_reg_x550a, -+ .read_reg_mdi = &ixgbe_read_phy_reg_mdio, -+ .write_reg_mdi = &ixgbe_write_phy_reg_mdio, -+#endif -+}; -+ -+static const struct ixgbe_link_operations link_ops_x550em_x = { -+ .read_link = &ixgbe_read_i2c_combined_generic, -+ .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, -+ .write_link = &ixgbe_write_i2c_combined_generic, -+ .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked, -+}; -+ - static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X550) - }; -@@ -3082,14 +4089,35 @@ const struct ixgbe_info ixgbe_X550EM_x_info = { - .phy_ops = &phy_ops_X550EM_x, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X550EM_x, -+ .link_ops = &link_ops_x550em_x, -+}; -+ -+const struct ixgbe_info ixgbe_x550em_x_fw_info = { -+ .mac = ixgbe_mac_X550EM_x, -+ .get_invariants = ixgbe_get_invariants_X550_x_fw, -+ .mac_ops = &mac_ops_X550EM_x_fw, -+ .eeprom_ops = &eeprom_ops_X550EM_x, -+ .phy_ops = &phy_ops_x550em_x_fw, -+ .mbx_ops = &mbx_ops_generic, -+ .mvals = ixgbe_mvals_X550EM_x, - }; - - const struct ixgbe_info ixgbe_x550em_a_info = { - .mac = ixgbe_mac_x550em_a, -- .get_invariants = &ixgbe_get_invariants_X550_x, -+ .get_invariants = &ixgbe_get_invariants_X550_a, - .mac_ops = &mac_ops_x550em_a, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_x550em_a, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_x550em_a, - }; -+ -+const struct ixgbe_info ixgbe_x550em_a_fw_info = { -+ .mac = ixgbe_mac_x550em_a, -+ .get_invariants = ixgbe_get_invariants_X550_a_fw, -+ .mac_ops = &mac_ops_x550em_a_fw, -+ .eeprom_ops = &eeprom_ops_X550EM_x, -+ .phy_ops = &phy_ops_x550em_a_fw, -+ .mbx_ops = &mbx_ops_generic, -+ .mvals = ixgbe_mvals_x550em_a, -+}; --- -2.7.4 - From ae20550163aa694afe01b1c2f3f32ef3422ad311 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Thu, 11 Apr 2019 16:46:39 +0800 Subject: [PATCH 11/20] add Intel ixgbe driver patch for pegatron fn-6254-dn-f --- ...gbe-driver-for-pegatron-fn-6254-dn-f.patch | 4666 +++++++++++++++++ 1 file changed, 4666 insertions(+) create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-driver-for-pegatron-fn-6254-dn-f.patch diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-driver-for-pegatron-fn-6254-dn-f.patch b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-driver-for-pegatron-fn-6254-dn-f.patch new file mode 100644 index 000000000000..0ffce8ae2ecf --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-driver-for-pegatron-fn-6254-dn-f.patch @@ -0,0 +1,4666 @@ +From f55d2dcb51f86f58f43cf563045fe6c4dfd590e0 Mon Sep 17 00:00:00 2001 +From: PeterLin +Date: Thu, 11 Apr 2019 14:21:33 +0800 +Subject: [PATCH] update Intel ixgbe driver for pegatron fn-6254-dn-f + +--- + drivers/net/ethernet/intel/ixgbe/ixgbe.h | 10 + + drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 28 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 15 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 439 ++++-- + drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 7 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 103 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 85 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 407 +++--- + drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 27 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 153 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 20 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 1668 +++++++++++++++++----- + 12 files changed, 2282 insertions(+), 680 deletions(-) + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h +index b06e32d..255ec3b 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h +@@ -89,6 +89,7 @@ + + /* Supported Rx Buffer Sizes */ + #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ ++#define IXGBE_RXBUFFER_1536 1536 + #define IXGBE_RXBUFFER_2K 2048 + #define IXGBE_RXBUFFER_3K 3072 + #define IXGBE_RXBUFFER_4K 4096 +@@ -661,6 +662,9 @@ struct ixgbe_adapter { + #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) + #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) + #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) ++#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) ++#define IXGBE_FLAG2_EEE_ENABLED BIT(15) ++#define IXGBE_FLAG2_RX_LEGACY BIT(16) + + /* Tx fast path data */ + int num_tx_queues; +@@ -861,7 +865,9 @@ enum ixgbe_boards { + board_X540, + board_X550, + board_X550EM_x, ++ board_x550em_x_fw, + board_x550em_a, ++ board_x550em_a_fw, + }; + + extern const struct ixgbe_info ixgbe_82598_info; +@@ -869,7 +875,9 @@ extern const struct ixgbe_info ixgbe_82599_info; + extern const struct ixgbe_info ixgbe_X540_info; + extern const struct ixgbe_info ixgbe_X550_info; + extern const struct ixgbe_info ixgbe_X550EM_x_info; ++extern const struct ixgbe_info ixgbe_x550em_x_fw_info; + extern const struct ixgbe_info ixgbe_x550em_a_info; ++extern const struct ixgbe_info ixgbe_x550em_a_fw_info; + #ifdef CONFIG_IXGBE_DCB + extern const struct dcbnl_rtnl_ops dcbnl_ops; + #endif +@@ -1027,4 +1035,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_ring *tx_ring); + u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); + void ixgbe_store_reta(struct ixgbe_adapter *adapter); ++s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, ++ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); + #endif /* _IXGBE_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +index fb51be7..8a32eb7 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +@@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) + case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.check_link = &ixgbe_check_phy_link_tnx; +- phy->ops.get_firmware_version = +- &ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = &ixgbe_reset_phy_nl; +@@ -177,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) + **/ + static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) + { +-#ifndef CONFIG_SPARC +- u32 regval; +- u32 i; +-#endif + s32 ret_val; + + ret_val = ixgbe_start_hw_generic(hw); +- +-#ifndef CONFIG_SPARC +- /* Disable relaxed ordering */ +- for (i = 0; ((i < hw->mac.max_tx_queues) && +- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { +- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); +- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; +- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); +- } +- +- for (i = 0; ((i < hw->mac.max_rx_queues) && +- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { +- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | +- IXGBE_DCA_RXCTRL_HEAD_WRO_EN); +- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); +- } +-#endif + if (ret_val) + return ret_val; + +@@ -367,7 +343,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) + } + + /* Negotiate the fc mode to use */ +- ixgbe_fc_autoneg(hw); ++ hw->mac.ops.fc_autoneg(hw); + + /* Disable any previous flow control settings */ + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); +@@ -1179,6 +1155,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { + .get_link_capabilities = &ixgbe_get_link_capabilities_82598, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, +@@ -1193,6 +1170,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { + .set_vfta = &ixgbe_set_vfta_82598, + .fc_enable = &ixgbe_fc_enable_82598, + .setup_fc = ixgbe_setup_fc_generic, ++ .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = NULL, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +index 63b2500..d602637 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +@@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) + case ixgbe_phy_tn: + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; +- phy->ops.get_firmware_version = +- &ixgbe_get_phy_firmware_version_tnx; + break; + default: + break; +@@ -1451,7 +1449,7 @@ do { \ + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * +- * This function serves two main purposes. First it applys the input_mask ++ * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for +@@ -1591,15 +1589,17 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + + switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: +- /* mask VLAN ID, fall through to mask VLAN priority */ ++ /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; ++ /* fall through */ + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: +- /* mask VLAN ID only, fall through */ ++ /* mask VLAN ID only */ + fdirm |= IXGBE_FDIRM_VLANID; ++ /* fall through */ + case 0xEFFF: + /* no VLAN fields masked */ + break; +@@ -1610,8 +1610,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: +- /* Mask Flex Bytes, fall through */ ++ /* Mask Flex Bytes */ + fdirm |= IXGBE_FDIRM_FLEX; ++ /* fall through */ + case 0xFFFF: + break; + default: +@@ -2204,6 +2205,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { + .get_link_capabilities = &ixgbe_get_link_capabilities_82599, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, +@@ -2219,6 +2221,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, ++ .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = &ixgbe_setup_sfp_modules_82599, +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index ad33622..fd055cc 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -79,16 +79,28 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: +- hw->mac.ops.check_link(hw, &speed, &link_up, false); +- /* if link is down, assume supported */ +- if (link_up) +- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? ++ /* flow control autoneg black list */ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_SFP: ++ case IXGBE_DEV_ID_X550EM_A_SFP_N: ++ supported = false; ++ break; ++ default: ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ /* if link is down, assume supported */ ++ if (link_up) ++ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; +- else +- supported = true; ++ else ++ supported = true; ++ } ++ + break; + case ixgbe_media_type_backplane: +- supported = true; ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) ++ supported = false; ++ else ++ supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ +@@ -100,6 +112,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_X550T1: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: + supported = true; + break; + default: +@@ -109,6 +123,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + break; + } + ++ if (!supported) ++ hw_dbg(hw, "Device %x does not support flow control autoneg\n", ++ hw->device_id); ++ + return supported; + } + +@@ -153,7 +171,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) + if (ret_val) + return ret_val; + +- /* only backplane uses autoc so fall though */ ++ /* fall through - only backplane uses autoc */ + case ixgbe_media_type_fiber: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + +@@ -279,6 +297,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + s32 ret_val; + u32 ctrl_ext; + u16 device_caps; ++#if 1 //by hilbert ++ s32 rc; ++ u16 regVal=0; ++#endif + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); +@@ -298,10 +320,12 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + +- /* Setup flow control */ +- ret_val = hw->mac.ops.setup_fc(hw); +- if (ret_val) +- return ret_val; ++ /* Setup flow control if method for doing so */ ++ if (hw->mac.ops.setup_fc) { ++ ret_val = hw->mac.ops.setup_fc(hw); ++ if (ret_val) ++ return ret_val; ++ } + + /* Cashe bit indicating need for crosstalk fix */ + switch (hw->mac.type) { +@@ -322,6 +346,67 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + ++#if 1 /* To modify speed LED polarity and configure led on only for speed 1G in M88E1512 ++ * for Porsche2 platform. By hilbert ++ * From 88E1512 datasheet: ++ * Page register: 0x16 ++ * LED functon control register: 0x10 in page 3 ++ * LED polarity control register: 0x11 in page 3 ++ */ ++ ++ if (hw->mac.type == ixgbe_mac_x550em_a && ++ (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { ++ /* For M88E1512, to select page 3 in register 0x16 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++#if 0 //for debug ++ /* For M88E1512, read from register 0x16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x16, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "phy register read failed, rc:%x\n", rc); ++ } ++ hw_err(hw, "####read phy register 0x16 again, value:%x\n", regVal); ++#endif ++ /* For M88E1512, read from page 3, register 0x11 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x11, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led polarity register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 0x11 with polarity bit set */ ++ regVal |= 0x01; ++ rc = hw->phy.ops.write_reg(hw, 0x11, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led polarity register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with only 1000M led on */ ++ regVal = (regVal & 0xFFF0) | 0x0007; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } ++#endif + return 0; + } + +@@ -346,25 +431,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) + } + IXGBE_WRITE_FLUSH(hw); + +-#ifndef CONFIG_SPARC +- /* Disable relaxed ordering */ +- for (i = 0; i < hw->mac.max_tx_queues; i++) { +- u32 regval; +- +- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); +- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; +- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); +- } +- +- for (i = 0; i < hw->mac.max_rx_queues; i++) { +- u32 regval; +- +- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | +- IXGBE_DCA_RXCTRL_HEAD_WRO_EN); +- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); +- } +-#endif + return 0; + } + +@@ -390,6 +456,10 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) + status = hw->mac.ops.start_hw(hw); + } + ++ /* Initialize the LED link active for LED blink support */ ++ if (hw->mac.ops.init_led_link_act) ++ hw->mac.ops.init_led_link_act(hw); ++ + return status; + } + +@@ -773,22 +843,100 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) + } + + /** ++ * ixgbe_init_led_link_act_generic - Store the LED index link/activity. ++ * @hw: pointer to hardware structure ++ * ++ * Store the index for the link active LED. This will be used to support ++ * blinking the LED. ++ **/ ++s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 led_reg, led_mode; ++ u16 i; ++ ++ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ ++ /* Get LED link active from the LEDCTL register */ ++ for (i = 0; i < 4; i++) { ++ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); ++ ++ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == ++ IXGBE_LED_LINK_ACTIVE) { ++ mac->led_link_act = i; ++ return 0; ++ } ++ } ++ ++ /* If LEDCTL register does not have the LED link active set, then use ++ * known MAC defaults. ++ */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_x550em_a: ++ mac->led_link_act = 0; ++ break; ++ case ixgbe_mac_X550EM_x: ++ mac->led_link_act = 1; ++ break; ++ default: ++ mac->led_link_act = 2; ++ } ++ ++ return 0; ++} ++ ++/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ + s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) + { +- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +- +- if (index > 3) +- return IXGBE_ERR_PARAM; +- +- /* To turn on the LED, set mode to ON. */ +- led_reg &= ~IXGBE_LED_MODE_MASK(index); +- led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); +- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +- IXGBE_WRITE_FLUSH(hw); ++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ s32 rc; ++ u16 regVal; ++ ++ /* following led behavior was modified by hilbert, ++ * to force led on through C22 MDI command. ++ */ ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0099; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } else { ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ /* To turn on the LED, set mode to ON. */ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ } + + return 0; + } +@@ -801,15 +949,50 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) + s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) + { + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +- +- if (index > 3) +- return IXGBE_ERR_PARAM; +- +- /* To turn off the LED, set mode to OFF. */ +- led_reg &= ~IXGBE_LED_MODE_MASK(index); +- led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); +- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +- IXGBE_WRITE_FLUSH(hw); ++ s32 rc; ++ u16 regVal; ++ ++ /* following led behavior was modified by hilbert, ++ * to force led on through C22 MDI command. ++ */ ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0088; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } else { ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ /* To turn off the LED, set mode to OFF. */ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ } + + return 0; + } +@@ -2127,7 +2310,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + } + + /* Negotiate the fc mode to use */ +- ixgbe_fc_autoneg(hw); ++ hw->mac.ops.fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); +@@ -2231,8 +2414,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, +- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) ++s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, ++ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) + { + if ((!(adv_reg)) || (!(lp_reg))) + return IXGBE_ERR_FC_NOT_NEGOTIATED; +@@ -3334,6 +3517,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + else + *speed = IXGBE_LINK_SPEED_100_FULL; + break; ++ case IXGBE_LINKS_SPEED_10_X550EM_A: ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || ++ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { ++ *speed = IXGBE_LINK_SPEED_10_FULL; ++ } ++ break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } +@@ -3491,7 +3681,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); +- /* Fall through to configure remaining packet buffers */ ++ /* fall through - configure remaining packet buffers */ + case (PBA_STRATEGY_EQUAL): + /* Divide the remaining Rx packet buffer evenly among the TCs */ + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; +@@ -3530,7 +3720,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) ++u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) + { + u32 i; + u8 sum = 0; +@@ -3545,43 +3735,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) + } + + /** +- * ixgbe_host_interface_command - Issue command to manageability block ++ * ixgbe_hic_unlocked - Issue command to manageability block unlocked + * @hw: pointer to the HW structure +- * @buffer: contains the command to write and where the return status will +- * be placed ++ * @buffer: command to write and where the return status will be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion +- * @return_data: read and return data from the buffer (true) or not (false) +- * Needed because FW structures are big endian and decoding of +- * these fields can be 8 bit or 16 bit based on command. Decoding +- * is not easily understood without making a table of commands. +- * So we will leave this up to the caller to read back the data +- * in these cases. + * +- * Communicates with the manageability block. On success return 0 +- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. ++ * Communicates with the manageability block. On success return 0 ++ * else returns semaphore error when encountering an error acquiring ++ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. ++ * ++ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held ++ * by the caller. + **/ +-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, +- u32 length, u32 timeout, +- bool return_data) ++s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, ++ u32 timeout) + { +- u32 hdr_size = sizeof(struct ixgbe_hic_hdr); +- u32 hicr, i, bi, fwsts; +- u16 buf_len, dword_len; +- union { +- struct ixgbe_hic_hdr hdr; +- u32 u32arr[1]; +- } *bp = buffer; +- s32 status; ++ u32 hicr, i, fwsts; ++ u16 dword_len; + + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } +- /* Take management host interface semaphore */ +- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); +- if (status) +- return status; + + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); +@@ -3591,15 +3767,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_EN)) { + hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); +- status = IXGBE_ERR_HOST_INTERFACE_COMMAND; +- goto rel_out; ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if (length % sizeof(u32)) { + hw_dbg(hw, "Buffer length failure, not aligned to dword"); +- status = IXGBE_ERR_INVALID_ARGUMENT; +- goto rel_out; ++ return IXGBE_ERR_INVALID_ARGUMENT; + } + + dword_len = length >> 2; +@@ -3609,7 +3783,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, +- i, cpu_to_le32(bp->u32arr[i])); ++ i, cpu_to_le32(buffer[i])); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); +@@ -3623,11 +3797,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + + /* Check command successful completion. */ + if ((timeout && i == timeout) || +- !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { +- hw_dbg(hw, "Command has failed with no status valid.\n"); +- status = IXGBE_ERR_HOST_INTERFACE_COMMAND; +- goto rel_out; ++ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_host_interface_command - Issue command to manageability block ++ * @hw: pointer to the HW structure ++ * @buffer: contains the command to write and where the return status will ++ * be placed ++ * @length: length of buffer, must be multiple of 4 bytes ++ * @timeout: time in ms to wait for command completion ++ * @return_data: read and return data from the buffer (true) or not (false) ++ * Needed because FW structures are big endian and decoding of ++ * these fields can be 8 bit or 16 bit based on command. Decoding ++ * is not easily understood without making a table of commands. ++ * So we will leave this up to the caller to read back the data ++ * in these cases. ++ * ++ * Communicates with the manageability block. On success return 0 ++ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. ++ **/ ++s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, ++ u32 length, u32 timeout, ++ bool return_data) ++{ ++ u32 hdr_size = sizeof(struct ixgbe_hic_hdr); ++ union { ++ struct ixgbe_hic_hdr hdr; ++ u32 u32arr[1]; ++ } *bp = buffer; ++ u16 buf_len, dword_len; ++ s32 status; ++ u32 bi; ++ ++ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { ++ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } ++ /* Take management host interface semaphore */ ++ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); ++ if (status) ++ return status; ++ ++ status = ixgbe_hic_unlocked(hw, buffer, length, timeout); ++ if (status) ++ goto rel_out; + + if (!return_data) + goto rel_out; +@@ -3674,6 +3891,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number ++ * @len: length of driver_ver string ++ * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 +@@ -3681,7 +3900,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ + s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +- u8 build, u8 sub) ++ u8 build, u8 sub, __always_unused u16 len, ++ __always_unused const char *driver_ver) + { + struct ixgbe_hic_drv_info fw_cmd; + int i; +@@ -4033,15 +4253,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + +- /* If we already have link at this speed, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +- false); +- if (status) +- return status; +- +- if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up) +- goto out; +- + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: +@@ -4093,15 +4304,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + +- /* If we already have link at this speed, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +- false); +- if (status) +- return status; +- +- if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up) +- goto out; +- + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: +@@ -4208,4 +4410,23 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); + return; + } ++ ++ /* Set RS1 */ ++ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, ++ IXGBE_I2C_EEPROM_DEV_ADDR2, ++ &eeprom_data); ++ if (status) { ++ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); ++ return; ++ } ++ ++ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; ++ ++ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, ++ IXGBE_I2C_EEPROM_DEV_ADDR2, ++ eeprom_data); ++ if (status) { ++ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); ++ return; ++ } + } +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +index 6d4c260..e083732 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +@@ -49,6 +49,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + + s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); + s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); + + s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); + s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +@@ -110,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); + void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); + s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); + s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +- u8 build, u8 ver); ++ u8 build, u8 ver, u16 len, const char *str); ++u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); + s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, + u32 timeout, bool return_data); ++s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); ++s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, ++ u32 (*data)[FW_PHY_ACT_DATA_COUNT]); + void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); + bool ixgbe_mng_present(struct ixgbe_hw *hw); + bool ixgbe_mng_enabled(struct ixgbe_hw *hw); +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +index a137e06..6b23b74 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +@@ -172,6 +172,7 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_X_XFI: + return SUPPORTED_10000baseKR_Full; + default: + return SUPPORTED_10000baseKX4_Full | +@@ -237,6 +238,7 @@ static int ixgbe_get_settings(struct net_device *netdev, + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_x550em_ext_t: ++ case ixgbe_phy_fw: + case ixgbe_phy_cu_unknown: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; +@@ -394,6 +396,9 @@ static int ixgbe_set_settings(struct net_device *netdev, + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + ++ if (ecmd->advertising & ADVERTISED_10baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_10_FULL; ++ + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ +@@ -491,6 +496,59 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; ++ ++ /* 2018/11/14 pega-julia modified start */ ++ /* Purpose : Add for light OOB LED static. */ ++ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u16 regVal; ++ s32 rc; ++ ++ /* For M88E1512, write 3 in (page 0,register 22)[Page Address Register] to goto page 3 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ ++ /* For M88E1512, read from (page 3, register 16)[LED Function Control Register] */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ /*hw_err(hw, "[Pega Debug] : current register value = 0x%x\n", regVal);*/ ++ if (rc) ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ ++ if (data == 0) /* Turn off OOB LED. */ ++ { ++ /* For M88E1512, write to (page 3, register 16) with force led off */ ++ regVal = (regVal & 0xFF00) | 0x0088; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ else if (data == 1) /* Turn on OOB LED. */ ++ { ++ /* For M88E1512, write to (page 3, register 16) with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0099; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ else /* Switch OOB LED back to normal. */ ++ { ++ /* For M88E1512, set led back to nornmal in (page 3, register 16). */ ++ regVal = (regVal & 0xFF00) | 0x0017; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write 0 in (page 0, register 22) to back to page 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ ++ /* 2018/11/14 pega-julia modified end */ + } + + static int ixgbe_get_regs_len(struct net_device *netdev) +@@ -2219,22 +2277,61 @@ static int ixgbe_set_phys_id(struct net_device *netdev, + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + ++ /* Modified by hilbert for C22 MDI directly access */ ++ s32 rc; ++ u16 regVal; ++ /* Modified by hilbert done */ ++ ++ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) ++ return -EOPNOTSUPP; ++ + switch (state) { + case ETHTOOL_ID_ACTIVE: + adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + return 2; + + case ETHTOOL_ID_ON: +- hw->mac.ops.led_on(hw, hw->bus.func); ++ hw->mac.ops.led_on(hw, hw->mac.led_link_act); + break; + + case ETHTOOL_ID_OFF: +- hw->mac.ops.led_off(hw, hw->bus.func); ++ hw->mac.ops.led_off(hw, hw->mac.led_link_act); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); ++ /* Modified by hilbert for C22 MDI directly access */ ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0017; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } else { ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); ++ } + break; + } + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index a5428b6..66753f1 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -84,7 +84,9 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { + [board_X540] = &ixgbe_X540_info, + [board_X550] = &ixgbe_X550_info, + [board_X550EM_x] = &ixgbe_X550EM_x_info, ++ [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, + [board_x550em_a] = &ixgbe_x550em_a_info, ++ [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, + }; + + /* ixgbe_pci_tbl - PCI Device ID Table +@@ -129,9 +131,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, +@@ -139,6 +143,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, + /* required last entry */ + {0, } + }; +@@ -179,6 +185,7 @@ MODULE_VERSION(DRV_VERSION); + static struct workqueue_struct *ixgbe_wq; + + static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); ++static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); + + static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, + u32 reg, u16 *value) +@@ -374,7 +381,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) + if (ixgbe_removed(reg_addr)) + return IXGBE_FAILED_READ_REG; + if (unlikely(hw->phy.nw_mng_if_sel & +- IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { ++ IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { + struct ixgbe_adapter *adapter; + int i; + +@@ -2446,6 +2453,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; ++ s32 rc; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; +@@ -2484,6 +2492,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) + return; + + break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ rc = hw->phy.ops.check_overtemp(hw); ++ if (rc != IXGBE_ERR_OVERTEMP) ++ return; ++ break; + default: + if (adapter->hw.mac.type >= ixgbe_mac_X540) + return; +@@ -2530,6 +2544,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) + return; + } + return; ++ case ixgbe_mac_x550em_a: ++ if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { ++ adapter->interrupt_event = eicr; ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; ++ ixgbe_service_event_schedule(adapter); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ++ IXGBE_EICR_GPI_SDP0_X550EM_a); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, ++ IXGBE_EICR_GPI_SDP0_X550EM_a); ++ } ++ return; ++ case ixgbe_mac_X550: + case ixgbe_mac_X540: + if (!(eicr & IXGBE_EICR_TS)) + return; +@@ -5035,7 +5061,7 @@ static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) + static void ixgbe_configure(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; +- ++ + ixgbe_configure_pb(adapter); + #ifdef CONFIG_IXGBE_DCB + ixgbe_configure_dcb(adapter); +@@ -5045,10 +5071,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter); +- + ixgbe_set_rx_mode(adapter->netdev); + ixgbe_restore_vlan(adapter); +- ++ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: +@@ -5075,7 +5100,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) + default: + break; + } +- + #ifdef CONFIG_IXGBE_DCA + /* configure DCA */ + if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) +@@ -5291,6 +5315,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); ++ if (adapter->hw.phy.type == ixgbe_phy_fw) ++ ixgbe_watchdog_link_is_down(adapter); + ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter +@@ -5706,6 +5732,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) + break; + case ixgbe_mac_x550em_a: + adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; ++ break; ++ default: ++ break; ++ } + /* fall through */ + case ixgbe_mac_X550EM_x: + #ifdef CONFIG_IXGBE_DCB +@@ -5719,6 +5753,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) + #endif /* IXGBE_FCOE */ + /* Fall Through */ + case ixgbe_mac_X550: ++ if (hw->mac.type == ixgbe_mac_X550) ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + #ifdef CONFIG_IXGBE_DCA + adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; + #endif +@@ -6093,29 +6129,28 @@ int ixgbe_open(struct net_device *netdev) + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err, queues; +- ++ + /* disallow open during test */ + if (test_bit(__IXGBE_TESTING, &adapter->state)) + return -EBUSY; +- ++ + netif_carrier_off(netdev); +- ++ + /* allocate transmit descriptors */ + err = ixgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; +- ++ + /* allocate receive descriptors */ + err = ixgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; +- ++ + ixgbe_configure(adapter); +- + err = ixgbe_request_irq(adapter); + if (err) + goto err_req_irq; +- ++ + /* Notify the stack of the actual queue counts. */ + if (adapter->num_rx_pools > 1) + queues = adapter->num_rx_queues_per_pool; +@@ -6791,6 +6826,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) + case IXGBE_LINK_SPEED_100_FULL: + speed_str = "100 Mbps"; + break; ++ case IXGBE_LINK_SPEED_10_FULL: ++ speed_str = "10 Mbps"; ++ break; + default: + speed_str = "unknown speed"; + break; +@@ -8013,6 +8051,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) + return ixgbe_ptp_set_ts_config(adapter, req); + case SIOCGHWTSTAMP: + return ixgbe_ptp_get_ts_config(adapter, req); ++ case SIOCGMIIPHY: ++ if (!adapter->hw.phy.ops.read_reg) ++ return -EOPNOTSUPP; ++ /* fall through */ + default: + return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); + } +@@ -9480,6 +9522,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + hw->mac.ops = *ii->mac_ops; + hw->mac.type = ii->mac; + hw->mvals = ii->mvals; ++ if (ii->link_ops) ++ hw->link.ops = *ii->link_ops; + + /* EEPROM */ + hw->eeprom.ops = *ii->eeprom_ops; +@@ -9747,7 +9791,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + "representative who provided you with this " + "hardware.\n"); + } +- strcpy(netdev->name, "eth%d"); ++ ++ /*2019/04/11, change OOB from eth2 to eth0, for pegatron fn-6524-dn-f, Peter5_Lin*/ ++ if(!strcmp("0000:04:00.0", pci_name(pdev))) ++ strcpy(netdev->name, "eth0"); ++ else if(!strcmp("0000:04:00.1", pci_name(pdev))) ++ strcpy(netdev->name, "eth1"); ++ else if(!strcmp("0000:03:00.0", pci_name(pdev))) ++ strcpy(netdev->name, "eth2"); ++ else if(!strcmp("0000:03:00.1", pci_name(pdev))) ++ strcpy(netdev->name, "eth3"); ++ + err = register_netdev(netdev); + if (err) + goto err_register; +@@ -9777,8 +9831,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + * since os does not support feature + */ + if (hw->mac.ops.set_fw_drv_ver) +- hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, +- 0xFF); ++ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, ++ sizeof(ixgbe_driver_version) - 1, ++ ixgbe_driver_version); + + /* add san mac addr to netdev */ + ixgbe_add_sanmac_netdev(netdev); +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +index b17464e..d914b40 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +@@ -109,8 +109,8 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) + * + * Returns an error code on error. + */ +-static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val, bool lock) ++s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 *val, bool lock) + { + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 3; +@@ -178,36 +178,6 @@ static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + } + + /** +- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation +- * @hw: pointer to the hardware structure +- * @addr: I2C bus address to read from +- * @reg: I2C device register to read from +- * @val: pointer to location to receive read value +- * +- * Returns an error code on error. +- */ +-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val) +-{ +- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +-} +- +-/** +- * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined +- * @hw: pointer to the hardware structure +- * @addr: I2C bus address to read from +- * @reg: I2C device register to read from +- * @val: pointer to location to receive read value +- * +- * Returns an error code on error. +- */ +-s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val) +-{ +- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +-} +- +-/** + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to +@@ -217,8 +187,8 @@ s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + * + * Returns an error code on error. + */ +-static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 val, bool lock) ++s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 val, bool lock) + { + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 1; +@@ -273,33 +243,41 @@ static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + } + + /** +- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation +- * @hw: pointer to the hardware structure +- * @addr: I2C bus address to write to +- * @reg: I2C device register to write to +- * @val: value to write ++ * ixgbe_probe_phy - Probe a single address for a PHY ++ * @hw: pointer to hardware structure ++ * @phy_addr: PHY address to probe + * +- * Returns an error code on error. +- */ +-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, +- u8 addr, u16 reg, u16 val) ++ * Returns true if PHY found ++ **/ ++static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) + { +- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +-} ++ u16 ext_ability = 0; + +-/** +- * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined +- * @hw: pointer to the hardware structure +- * @addr: I2C bus address to write to +- * @reg: I2C device register to write to +- * @val: value to write +- * +- * Returns an error code on error. +- */ +-s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, +- u8 addr, u16 reg, u16 val) +-{ +- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); ++ hw->phy.mdio.prtad = phy_addr; ++ if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0) { ++ return false; ++ } ++ ++ if (ixgbe_get_phy_id(hw)) { ++ return false; ++ } ++ ++ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ hw->phy.ops.read_reg(hw, ++ MDIO_PMA_EXTABLE, ++ MDIO_MMD_PMAPMD, ++ &ext_ability); ++ if (ext_ability & ++ (MDIO_PMA_EXTABLE_10GBT | ++ MDIO_PMA_EXTABLE_1000BT)) ++ hw->phy.type = ixgbe_phy_cu_unknown; ++ else ++ hw->phy.type = ixgbe_phy_generic; ++ } ++ ++ return true; + } + + /** +@@ -311,7 +289,7 @@ s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) + { + u32 phy_addr; +- u16 ext_ability = 0; ++ u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + + if (!hw->phy.phy_semaphore_mask) { + if (hw->bus.lan_id) +@@ -320,37 +298,34 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + +- if (hw->phy.type == ixgbe_phy_unknown) { +- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { +- hw->phy.mdio.prtad = phy_addr; +- if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { +- ixgbe_get_phy_id(hw); +- hw->phy.type = +- ixgbe_get_phy_type_from_id(hw->phy.id); +- +- if (hw->phy.type == ixgbe_phy_unknown) { +- hw->phy.ops.read_reg(hw, +- MDIO_PMA_EXTABLE, +- MDIO_MMD_PMAPMD, +- &ext_ability); +- if (ext_ability & +- (MDIO_PMA_EXTABLE_10GBT | +- MDIO_PMA_EXTABLE_1000BT)) +- hw->phy.type = +- ixgbe_phy_cu_unknown; +- else +- hw->phy.type = +- ixgbe_phy_generic; +- } ++ if (hw->phy.type != ixgbe_phy_unknown) ++ return 0; + +- return 0; +- } ++ if (hw->phy.nw_mng_if_sel) { ++ phy_addr = (hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> ++ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; ++ if (ixgbe_probe_phy(hw, phy_addr)) ++ return 0; ++ else ++ return IXGBE_ERR_PHY_ADDR_INVALID; ++ } ++ ++ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { ++ if (ixgbe_probe_phy(hw, phy_addr)) { ++ status = 0; ++ break; + } +- /* indicate no PHY found */ +- hw->phy.mdio.prtad = MDIO_PRTAD_NONE; +- return IXGBE_ERR_PHY_ADDR_INVALID; + } +- return 0; ++ ++ /* Certain media types do not have a phy so an address will not ++ * be found and the code will take this path. Caller has to ++ * decide if it is an error or not. ++ */ ++ if (status) ++ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; ++ ++ return status; + } + + /** +@@ -416,7 +391,8 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; +- case X550_PHY_ID: ++ case X550_PHY_ID2: ++ case X550_PHY_ID3: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; +@@ -427,6 +403,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + phy_type = ixgbe_phy_nl; + break; + case X557_PHY_ID: ++ case X557_PHY_ID2: + phy_type = ixgbe_phy_x550em_ext_t; + break; + default: +@@ -477,11 +454,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) + */ + for (i = 0; i < 30; i++) { + msleep(100); +- hw->phy.ops.read_reg(hw, MDIO_CTRL1, +- MDIO_MMD_PHYXS, &ctrl); +- if (!(ctrl & MDIO_CTRL1_RESET)) { +- udelay(2); +- break; ++ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_TX_VENDOR_ALARMS_3, ++ MDIO_MMD_PMAPMD, &ctrl); ++ if (status) ++ return status; ++ ++ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { ++ udelay(2); ++ break; ++ } ++ } else { ++ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, ++ MDIO_MMD_PHYXS, &ctrl); ++ if (status) ++ return status; ++ ++ if (!(ctrl & MDIO_CTRL1_RESET)) { ++ udelay(2); ++ break; ++ } + } + } + +@@ -494,6 +487,98 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) + } + + /** ++ * ixgbe_read_phy_mdio - Reads a value from a specified PHY register without ++ * the SWFW lock. This Clasue 22 API is patched by Hilbert ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit address of PHY register to read ++ * @phy_data: Pointer to read data from PHY register ++ **/ ++s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 *phy_data) ++{ ++ u32 i, data, command; ++ ++ /* Setup and write the read command */ ++ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | ++ IXGBE_MSCA_MDI_COMMAND; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* Check every 10 usec to see if the address cycle completed. ++ * The MDI Command bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ udelay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) ++ break; ++ } ++ ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ++ hw_dbg(hw, "PHY address command did not complete.\n"); ++ return IXGBE_ERR_PHY; ++ } ++ ++ /* Read operation is complete. Get the data ++ * from MSRWD ++ */ ++ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); ++ data >>= IXGBE_MSRWD_READ_DATA_SHIFT; ++ *phy_data = (u16)(data); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_write_phy_reg_mdio - Writes a value to specified PHY register ++ * without SWFW lock. This Clause 22 API is patched by Hilbert ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: 5 bit device type ++ * @phy_data: Data to write to the PHY register ++ **/ ++s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data) ++{ ++ u32 i, command; ++ ++ /* Put the data in the MDI single read and write data register*/ ++ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); ++ ++ /* Setup and write the write command */ ++ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | ++ IXGBE_MSCA_MDI_COMMAND; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* ++ * Check every 10 usec to see if the address cycle completed. ++ * The MDI Command bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ udelay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) ++ break; ++ } ++ ++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ++ hw_dbg(hw, "PHY write cmd didn't complete\n"); ++ return IXGBE_ERR_PHY; ++ } ++ ++ return 0; ++} ++/** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure +@@ -705,53 +790,52 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + +- if (speed & IXGBE_LINK_SPEED_10GB_FULL) { +- /* Set or unset auto-negotiation 10G advertisement */ +- hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, +- MDIO_MMD_AN, +- &autoneg_reg); ++ /* Set or unset auto-negotiation 10G advertisement */ ++ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); + +- autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +- autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; ++ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_10GB_FULL)) ++ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + +- hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, +- MDIO_MMD_AN, +- autoneg_reg); +- } +- +- if (speed & IXGBE_LINK_SPEED_1GB_FULL) { +- /* Set or unset auto-negotiation 1G advertisement */ +- hw->phy.ops.read_reg(hw, +- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +- MDIO_MMD_AN, +- &autoneg_reg); ++ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); + +- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; ++ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ MDIO_MMD_AN, &autoneg_reg); + +- hw->phy.ops.write_reg(hw, +- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +- MDIO_MMD_AN, +- autoneg_reg); ++ if (hw->mac.type == ixgbe_mac_X550) { ++ /* Set or unset auto-negotiation 5G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_5GB_FULL)) ++ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; ++ ++ /* Set or unset auto-negotiation 2.5G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & ++ IXGBE_LINK_SPEED_2_5GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) ++ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; + } + +- if (speed & IXGBE_LINK_SPEED_100_FULL) { +- /* Set or unset auto-negotiation 100M advertisement */ +- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, +- &autoneg_reg); ++ /* Set or unset auto-negotiation 1G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_1GB_FULL)) ++ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + +- autoneg_reg &= ~(ADVERTISE_100FULL | +- ADVERTISE_100HALF); +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) +- autoneg_reg |= ADVERTISE_100FULL; ++ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ MDIO_MMD_AN, autoneg_reg); + +- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, +- autoneg_reg); +- } ++ /* Set or unset auto-negotiation 100M advertisement */ ++ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); ++ ++ autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && ++ (speed & IXGBE_LINK_SPEED_100_FULL)) ++ autoneg_reg |= ADVERTISE_100FULL; ++ ++ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) +@@ -778,9 +862,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) + { +- +- /* +- * Clear autoneg_advertised and set new values based on input link ++ /* Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; +@@ -788,14 +870,24 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + ++ if (speed & IXGBE_LINK_SPEED_5GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; ++ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + ++ if (speed & IXGBE_LINK_SPEED_10_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; ++ + /* Setup link based on the new speed settings */ +- hw->phy.ops.setup_link(hw); ++ if (hw->phy.ops.setup_link) ++ hw->phy.ops.setup_link(hw); + + return 0; + } +@@ -830,6 +922,7 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + break; + case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_x550em_a: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; + break; + default: +@@ -986,40 +1079,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) + } + + /** +- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version +- * @hw: pointer to hardware structure +- * @firmware_version: pointer to the PHY Firmware Version +- **/ +-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, +- u16 *firmware_version) +-{ +- s32 status; +- +- status = hw->phy.ops.read_reg(hw, TNX_FW_REV, +- MDIO_MMD_VEND1, +- firmware_version); +- +- return status; +-} +- +-/** +- * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version +- * @hw: pointer to hardware structure +- * @firmware_version: pointer to the PHY Firmware Version +- **/ +-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, +- u16 *firmware_version) +-{ +- s32 status; +- +- status = hw->phy.ops.read_reg(hw, AQ_FW_REV, +- MDIO_MMD_VEND1, +- firmware_version); +- +- return status; +-} +- +-/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +@@ -2398,9 +2457,7 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) + if (!on && ixgbe_mng_present(hw)) + return 0; + +- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, +- ®); ++ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®); + if (status) + return status; + +@@ -2412,8 +2469,6 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) + reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } + +- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, +- reg); ++ status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg); + return status; + } +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +index cc735ec..e9f94ee 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +@@ -84,8 +84,9 @@ + #define IXGBE_CS4227_GLOBAL_ID_LSB 0 + #define IXGBE_CS4227_GLOBAL_ID_MSB 1 + #define IXGBE_CS4227_SCRATCH 2 +-#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */ +-#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */ ++#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F ++#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ ++#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ + #define IXGBE_CS4227_RESET_PENDING 0x1357 + #define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 + #define IXGBE_CS4227_RETRIES 15 +@@ -154,6 +155,12 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); + s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); ++#if 1 //by hilbert ++s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data); ++s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data); ++#endif + s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); + s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, +@@ -168,10 +175,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); + s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, +- u16 *firmware_version); +-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, +- u16 *firmware_version); + + s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); + s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); +@@ -195,12 +198,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); + s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val); +-s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 *val); +-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 val); +-s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, +- u16 reg, u16 val); ++s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 *val, bool lock); ++s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 val, bool lock); + #endif /* _IXGBE_PHY_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +index 31d82e3..531990b 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +@@ -85,6 +85,7 @@ + #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC + #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD + #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE ++#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 + #define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 + #define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 + #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +@@ -92,6 +93,8 @@ + #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 + #define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 + #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE ++#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 ++#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 + + /* VF Device IDs */ + #define IXGBE_DEV_ID_82599_VF 0x10ED +@@ -1393,8 +1396,10 @@ struct ixgbe_thermal_sensor_data { + #define TN1010_PHY_ID 0x00A19410 + #define TNX_FW_REV 0xB + #define X540_PHY_ID 0x01540200 +-#define X550_PHY_ID 0x01540220 ++#define X550_PHY_ID2 0x01540223 ++#define X550_PHY_ID3 0x01540221 + #define X557_PHY_ID 0x01540240 ++#define X557_PHY_ID2 0x01540250 + #define QT2022_PHY_ID 0x0043A400 + #define ATH_PHY_ID 0x03429050 + #define AQ_FW_REV 0x20 +@@ -1513,6 +1518,8 @@ enum { + #define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + + /* VMOLR bitmasks */ ++#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ ++#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ + #define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ + #define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ + #define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +@@ -1928,6 +1935,7 @@ enum { + #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 + #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 + #define IXGBE_LINKS_SPEED_100_82599 0x10000000 ++#define IXGBE_LINKS_SPEED_10_X550EM_A 0 + #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ + #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +@@ -2633,6 +2641,7 @@ enum ixgbe_fdir_pballoc_type { + #define FW_CEM_UNUSED_VER 0x0 + #define FW_CEM_MAX_RETRIES 3 + #define FW_CEM_RESP_STATUS_SUCCESS 0x1 ++#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ + #define FW_READ_SHADOW_RAM_CMD 0x31 + #define FW_READ_SHADOW_RAM_LEN 0x6 + #define FW_WRITE_SHADOW_RAM_CMD 0x33 +@@ -2658,6 +2667,59 @@ enum ixgbe_fdir_pballoc_type { + #define FW_INT_PHY_REQ_LEN 10 + #define FW_INT_PHY_REQ_READ 0 + #define FW_INT_PHY_REQ_WRITE 1 ++#define FW_PHY_ACT_REQ_CMD 5 ++#define FW_PHY_ACT_DATA_COUNT 4 ++#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) ++#define FW_PHY_ACT_INIT_PHY 1 ++#define FW_PHY_ACT_SETUP_LINK 2 ++#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) ++#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) ++#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) ++#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) ++#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) ++#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) ++#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) ++#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) ++#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) ++#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) ++#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ ++ HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u ++#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) ++#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) ++#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) ++#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) ++#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) ++#define FW_PHY_ACT_GET_LINK_INFO 3 ++#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) ++#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) ++#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) ++#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) ++#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) ++#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) ++#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) ++#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) ++#define FW_PHY_ACT_FORCE_LINK_DOWN 4 ++#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) ++#define FW_PHY_ACT_PHY_SW_RESET 5 ++#define FW_PHY_ACT_PHY_HW_RESET 6 ++#define FW_PHY_ACT_GET_PHY_INFO 7 ++#define FW_PHY_ACT_UD_2 0x1002 ++#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) ++#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) ++#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) ++#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) ++#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) ++#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) ++#define FW_PHY_ACT_RETRIES 50 ++#define FW_PHY_INFO_SPEED_MASK 0xFFFu ++#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u ++#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu + + /* Host Interface Command Structures */ + struct ixgbe_hic_hdr { +@@ -2700,6 +2762,16 @@ struct ixgbe_hic_drv_info { + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ + }; + ++struct ixgbe_hic_drv_info2 { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_num; ++ u8 ver_sub; ++ u8 ver_build; ++ u8 ver_min; ++ u8 ver_maj; ++ char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; ++}; ++ + /* These need to be dword aligned */ + struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; +@@ -2748,6 +2820,19 @@ struct ixgbe_hic_internal_phy_resp { + __be32 read_data; + }; + ++struct ixgbe_hic_phy_activity_req { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_number; ++ u8 pad; ++ __le16 activity_id; ++ __be32 data[FW_PHY_ACT_DATA_COUNT]; ++}; ++ ++struct ixgbe_hic_phy_activity_resp { ++ struct ixgbe_hic_hdr hdr; ++ __be32 data[FW_PHY_ACT_DATA_COUNT]; ++}; ++ + /* Transmit Descriptor - Advanced */ + union ixgbe_adv_tx_desc { + struct { +@@ -2863,6 +2948,7 @@ typedef u32 ixgbe_autoneg_advertised; + /* Link speed */ + typedef u32 ixgbe_link_speed; + #define IXGBE_LINK_SPEED_UNKNOWN 0 ++#define IXGBE_LINK_SPEED_10_FULL 0x0002 + #define IXGBE_LINK_SPEED_100_FULL 0x0008 + #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 + #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +@@ -3059,7 +3145,9 @@ enum ixgbe_phy_type { + ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, ++ ixgbe_phy_x550em_xfi, + ixgbe_phy_x550em_ext_t, ++ ixgbe_phy_ext_1g_t, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, +@@ -3078,6 +3166,7 @@ enum ixgbe_phy_type { + ixgbe_phy_qsfp_unknown, + ixgbe_phy_sfp_unsupported, + ixgbe_phy_sgmii, ++ ixgbe_phy_fw, + ixgbe_phy_generic + }; + +@@ -3352,6 +3441,7 @@ struct ixgbe_mac_operations { + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); ++ s32 (*init_led_link_act)(struct ixgbe_hw *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); +@@ -3372,9 +3462,11 @@ struct ixgbe_mac_operations { + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); ++ void (*fc_autoneg)(struct ixgbe_hw *); + + /* Manageability interface */ +- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); ++ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, ++ const char *); + s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*disable_rx)(struct ixgbe_hw *hw); +@@ -3416,10 +3508,24 @@ struct ixgbe_phy_operations { + s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*enter_lplu)(struct ixgbe_hw *); + s32 (*handle_lasi)(struct ixgbe_hw *hw); +- s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, +- u16 *value); +- s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, +- u16 value); ++ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, ++ u8 *value); ++ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, ++ u8 value); ++}; ++ ++struct ixgbe_link_operations { ++ s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); ++ s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 *val); ++ s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); ++ s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 val); ++}; ++ ++struct ixgbe_link_info { ++ struct ixgbe_link_operations ops; ++ u8 addr; + }; + + struct ixgbe_eeprom_info { +@@ -3462,6 +3568,7 @@ struct ixgbe_mac_info { + u8 san_mac_rar_index; + struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool set_lben; ++ u8 led_link_act; + }; + + struct ixgbe_phy_info { +@@ -3477,6 +3584,8 @@ struct ixgbe_phy_info { + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; ++ ixgbe_link_speed eee_speeds_supported; ++ ixgbe_link_speed eee_speeds_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; +@@ -3523,6 +3632,7 @@ struct ixgbe_hw { + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; ++ struct ixgbe_link_info link; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; +@@ -3546,6 +3656,7 @@ struct ixgbe_info { + const struct ixgbe_eeprom_operations *eeprom_ops; + const struct ixgbe_phy_operations *phy_ops; + const struct ixgbe_mbx_operations *mbx_ops; ++ const struct ixgbe_link_operations *link_ops; + const u32 *mvals; + }; + +@@ -3593,17 +3704,35 @@ struct ixgbe_info { + #define IXGBE_FUSES0_REV_MASK (3u << 6) + + #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) ++#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) + #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) + #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) + #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) + #define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) + #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) + #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) + #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) + #define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) + #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) + #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) + ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31) ++ + #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) + #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) + +@@ -3618,6 +3747,7 @@ struct ixgbe_info { + #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) + #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) + #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) ++#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28) + #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) + #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) + +@@ -3627,6 +3757,8 @@ struct ixgbe_info { + #define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) + #define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) + ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10) ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11) + #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) + #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) + +@@ -3675,8 +3807,13 @@ struct ixgbe_info { + + #define IXGBE_NW_MNG_IF_SEL 0x00011178 + #define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) +-#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) +-#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) ++#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) ++#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ + #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 + #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +index f2b1d48..6ea0d6a 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +@@ -95,6 +95,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) + { + s32 status; + u32 ctrl, i; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); +@@ -105,10 +106,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) + ixgbe_clear_tx_pending(hw); + + mac_reset_top: ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status) { ++ hw_dbg(hw, "semaphore failed with %d", status); ++ return IXGBE_ERR_SWFW_SYNC; ++ } ++ + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(1000, 1200); + + /* Poll for reset bit to self-clear indicating reset is complete */ +@@ -780,8 +788,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) + ixgbe_link_speed speed; + bool link_up; + +- /* +- * Link should be up in order for the blink bit in the LED control ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ /* Link should be up in order for the blink bit in the LED control + * register to work. Force link and speed in the MAC if link is down. + * This will be reversed when we stop the blinking. + */ +@@ -814,6 +824,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) + u32 macc_reg; + u32 ledctl_reg; + ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); +@@ -851,6 +864,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_X540, + .blink_led_stop = &ixgbe_blink_led_stop_X540, + .set_rar = &ixgbe_set_rar_generic, +@@ -866,6 +880,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, ++ .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = NULL, +@@ -911,7 +926,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = { + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, + .set_phy_power = &ixgbe_set_copper_phy_power, +- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + }; + + static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +index 77a60aa..3236248 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +@@ -28,11 +28,15 @@ + + static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); + static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); ++static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); ++static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); ++static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); + + static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) + { + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; ++ struct ixgbe_link_info *link = &hw->link; + + /* Start with X540 invariants, since so simular */ + ixgbe_get_invariants_X540(hw); +@@ -40,6 +44,46 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + ++ link->addr = IXGBE_CS4227; ++ ++ return 0; ++} ++ ++static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_phy_info *phy = &hw->phy; ++ ++ /* Start with X540 invariants, since so similar */ ++ ixgbe_get_invariants_X540(hw); ++ ++ phy->ops.set_phy_power = NULL; ++ ++ return 0; ++} ++ ++static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ ++ /* Start with X540 invariants, since so simular */ ++ ixgbe_get_invariants_X540(hw); ++ ++ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) ++ phy->ops.set_phy_power = NULL; ++ ++ return 0; ++} ++ ++static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_phy_info *phy = &hw->phy; ++ ++ /* Start with X540 invariants, since so similar */ ++ ixgbe_get_invariants_X540(hw); ++ ++ phy->ops.set_phy_power = NULL; ++ + return 0; + } + +@@ -69,8 +113,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) + */ + static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) + { +- return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, +- value); ++ return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); + } + + /** +@@ -83,8 +126,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) + */ + static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) + { +- return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, +- value); ++ return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); + } + + /** +@@ -290,6 +332,9 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ hw->phy.type = ixgbe_phy_x550em_xfi; ++ break; + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: +@@ -301,9 +346,21 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + /* Fallthrough */ +- case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); ++ case IXGBE_DEV_ID_X550EM_X_1G_T: ++ hw->phy.type = ixgbe_phy_ext_1g_t; ++ break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ hw->phy.type = ixgbe_phy_fw; ++ hw->phy.ops.read_reg = NULL; ++ hw->phy.ops.write_reg = NULL; ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; ++ break; + default: + break; + } +@@ -322,6 +379,280 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + return IXGBE_NOT_IMPLEMENTED; + } + ++/** ++ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to read from ++ * @reg: I2C device register to read from ++ * @val: pointer to location to receive read value ++ * ++ * Returns an error code on error. ++ **/ ++static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 *val) ++{ ++ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); ++} ++ ++/** ++ * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to read from ++ * @reg: I2C device register to read from ++ * @val: pointer to location to receive read value ++ * ++ * Returns an error code on error. ++ **/ ++static s32 ++ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 *val) ++{ ++ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); ++} ++ ++/** ++ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to write to ++ * @reg: I2C device register to write to ++ * @val: value to write ++ * ++ * Returns an error code on error. ++ **/ ++static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, ++ u8 addr, u16 reg, u16 val) ++{ ++ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); ++} ++ ++/** ++ * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to write to ++ * @reg: I2C device register to write to ++ * @val: value to write ++ * ++ * Returns an error code on error. ++ **/ ++static s32 ++ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, ++ u8 addr, u16 reg, u16 val) ++{ ++ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); ++} ++ ++/** ++ * ixgbe_fw_phy_activity - Perform an activity on a PHY ++ * @hw: pointer to hardware structure ++ * @activity: activity to perform ++ * @data: Pointer to 4 32-bit words of data ++ */ ++s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, ++ u32 (*data)[FW_PHY_ACT_DATA_COUNT]) ++{ ++ union { ++ struct ixgbe_hic_phy_activity_req cmd; ++ struct ixgbe_hic_phy_activity_resp rsp; ++ } hic; ++ u16 retries = FW_PHY_ACT_RETRIES; ++ s32 rc; ++ u32 i; ++ ++ do { ++ memset(&hic, 0, sizeof(hic)); ++ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; ++ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; ++ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; ++ hic.cmd.port_number = hw->bus.lan_id; ++ hic.cmd.activity_id = cpu_to_le16(activity); ++ for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) ++ hic.cmd.data[i] = cpu_to_be32((*data)[i]); ++ ++ rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (rc) ++ return rc; ++ if (hic.rsp.hdr.cmd_or_resp.ret_status == ++ FW_CEM_RESP_STATUS_SUCCESS) { ++ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) ++ (*data)[i] = be32_to_cpu(hic.rsp.data[i]); ++ return 0; ++ } ++ usleep_range(20, 30); ++ --retries; ++ } while (retries > 0); ++ ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++} ++ ++static const struct { ++ u16 fw_speed; ++ ixgbe_link_speed phy_speed; ++} ixgbe_fw_map[] = { ++ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, ++}; ++ ++/** ++ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) ++{ ++ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ u16 phy_speeds; ++ u16 phy_id_lo; ++ s32 rc; ++ u16 i; ++ ++ if (hw->phy.id) ++ return 0; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); ++ if (rc) ++ return rc; ++ ++ hw->phy.speeds_supported = 0; ++ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; ++ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { ++ if (phy_speeds & ixgbe_fw_map[i].fw_speed) ++ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; ++ } ++ ++ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; ++ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; ++ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; ++ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; ++ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) ++ return IXGBE_ERR_PHY_ADDR_INVALID; ++ ++ hw->phy.autoneg_advertised = hw->phy.speeds_supported; ++ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; ++ return 0; ++} ++ ++static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data); ++/** ++ * ixgbe_identify_phy_fw - Get PHY type based on firmware command ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) ++{ ++ s32 rc; ++ u16 value=0; ++ ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; ++ ++#if 0 /* Try also to get PHY ID through MDIO by using C22 in read_reg op. ++ * By hilbert ++ */ ++ rc = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &value); ++ hw_err(hw, "####rc:%x, PHY ID-1:%x\n", rc, value); ++#endif ++ ++ hw->phy.type = ixgbe_phy_fw; ++#if 0 /* We still need read/write ops later, don't NULL it. By hilbert */ ++ hw->phy.ops.read_reg = NULL; ++ hw->phy.ops.write_reg = NULL; ++#endif ++ return ixgbe_get_phy_id_fw(hw); ++} ++ ++/** ++ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) ++{ ++ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ ++ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; ++ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); ++} ++ ++/** ++ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) ++{ ++ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ u16 i; ++ ++ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) ++ return 0; ++ ++ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ++ hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); ++ return IXGBE_ERR_INVALID_LINK_SETTINGS; ++ } ++ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_full: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ case ixgbe_fc_rx_pause: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ case ixgbe_fc_tx_pause: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ default: ++ break; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { ++ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) ++ setup[0] |= ixgbe_fw_map[i].fw_speed; ++ } ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; ++ ++ if (hw->phy.eee_speeds_advertised) ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); ++ if (rc) ++ return rc; ++ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) ++ return IXGBE_ERR_OVERTEMP; ++ return 0; ++} ++ ++/** ++ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ */ ++static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) ++{ ++ if (hw->fc.requested_mode == ixgbe_fc_default) ++ hw->fc.requested_mode = ixgbe_fc_full; ++ ++ return ixgbe_setup_fw_link(hw); ++} ++ + /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * +@@ -544,41 +875,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + return status; + } + +-/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface +- * command assuming that the semaphore is already obtained. +- * @hw: pointer to hardware structure +- * @offset: offset of word in the EEPROM to read +- * @data: word read from the EEPROM +- * +- * Reads a 16 bit word from the EEPROM using the hostif. +- **/ +-static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, +- u16 *data) +-{ +- s32 status; +- struct ixgbe_hic_read_shadow_ram buffer; +- +- buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; +- buffer.hdr.req.buf_lenh = 0; +- buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; +- buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; +- +- /* convert offset from words to bytes */ +- buffer.address = cpu_to_be32(offset * 2); +- /* one word */ +- buffer.length = cpu_to_be16(sizeof(u16)); +- +- status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), +- IXGBE_HI_COMMAND_TIMEOUT, false); +- if (status) +- return status; +- +- *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, +- FW_NVM_DATA_OFFSET); +- +- return 0; +-} +- + /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read +@@ -590,6 +886,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) + { ++ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; +@@ -597,7 +894,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u32 i; + + /* Take semaphore for the entire operation. */ +- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) { + hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); + return status; +@@ -620,10 +917,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + buffer.pad2 = 0; + buffer.pad3 = 0; + +- status = ixgbe_host_interface_command(hw, &buffer, +- sizeof(buffer), +- IXGBE_HI_COMMAND_TIMEOUT, +- false); ++ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), ++ IXGBE_HI_COMMAND_TIMEOUT); + if (status) { + hw_dbg(hw, "Host interface command failed\n"); + goto out; +@@ -647,7 +942,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + } + + out: +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ hw->mac.ops.release_swfw_sync(hw, mask); + return status; + } + +@@ -818,15 +1113,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) + **/ + static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) + { +- s32 status = 0; ++ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; ++ struct ixgbe_hic_read_shadow_ram buffer; ++ s32 status; + +- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { +- status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +- } else { +- status = IXGBE_ERR_SWFW_SYNC; ++ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; ++ buffer.hdr.req.buf_lenh = 0; ++ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; ++ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; ++ ++ /* convert offset from words to bytes */ ++ buffer.address = cpu_to_be32(offset * 2); ++ /* one word */ ++ buffer.length = cpu_to_be16(sizeof(u16)); ++ ++ status = hw->mac.ops.acquire_swfw_sync(hw, mask); ++ if (status) ++ return status; ++ ++ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), ++ IXGBE_HI_COMMAND_TIMEOUT); ++ if (!status) { ++ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, ++ FW_NVM_DATA_OFFSET); + } + ++ hw->mac.ops.release_swfw_sync(hw, mask); + return status; + } + +@@ -1130,47 +1442,17 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + return ret; + } + +-/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. ++/** ++ * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration + * @hw: pointer to hardware structure +- * @speed: the link speed to force + * +- * Configures the integrated KR PHY to use iXFI mode. Used to connect an +- * internal and external PHY at a specific speed, without autonegotiation. ++ * iXfI configuration needed for ixgbe_mac_X550EM_x devices. + **/ +-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) ++static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) + { + s32 status; + u32 reg_val; + +- /* Disable AN and force speed to 10G Serial. */ +- status = ixgbe_read_iosf_sb_reg_x550(hw, +- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); +- if (status) +- return status; +- +- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; +- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; +- +- /* Select forced link speed for internal PHY. */ +- switch (*speed) { +- case IXGBE_LINK_SPEED_10GB_FULL: +- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; +- break; +- case IXGBE_LINK_SPEED_1GB_FULL: +- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; +- break; +- default: +- /* Other link speeds are not supported by internal KR PHY. */ +- return IXGBE_ERR_LINK_SETUP; +- } +- +- status = ixgbe_write_iosf_sb_reg_x550(hw, +- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); +- if (status) +- return status; +- + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), +@@ -1230,20 +1512,111 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); +- if (status) +- return status; ++ return status; ++} + +- /* Toggle port SW reset by AN reset. */ +- status = ixgbe_read_iosf_sb_reg_x550(hw, ++/** ++ * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the ++ * internal PHY ++ * @hw: pointer to hardware structure ++ **/ ++static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ u32 link_ctrl; ++ ++ /* Restart auto-negotiation. */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-negotiation did not complete\n"); ++ return status; ++ } ++ ++ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); ++ ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ u32 flx_mask_st20; ++ ++ /* Indicate to FW that AN restart has been asserted */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-negotiation did not complete\n"); ++ return status; ++ } ++ ++ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); ++ } ++ ++ return status; ++} ++ ++/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. ++ * @hw: pointer to hardware structure ++ * @speed: the link speed to force ++ * ++ * Configures the integrated KR PHY to use iXFI mode. Used to connect an ++ * internal and external PHY at a specific speed, without autonegotiation. ++ **/ ++static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ s32 status; ++ u32 reg_val; ++ ++ /* iXFI is only supported with X552 */ ++ if (mac->type != ixgbe_mac_X550EM_x) ++ return IXGBE_ERR_LINK_SETUP; ++ ++ /* Disable AN and force speed to 10G Serial. */ ++ status = ixgbe_read_iosf_sb_reg_x550(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + +- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; ++ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; ++ ++ /* Select forced link speed for internal PHY. */ ++ switch (*speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; ++ break; ++ default: ++ /* Other link speeds are not supported by internal KR PHY. */ ++ return IXGBE_ERR_LINK_SETUP; ++ } ++ + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status) ++ return status; ++ ++ /* Additional configuration needed for x550em_x */ ++ if (hw->mac.type == ixgbe_mac_X550EM_x) { ++ status = ixgbe_setup_ixfi_x550em_x(hw); ++ if (status) ++ return status; ++ } ++ ++ /* Toggle port SW reset by AN reset. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; + } +@@ -1294,7 +1667,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + __always_unused bool autoneg_wait_to_complete) + { + s32 status; +- u16 slice, value; ++ u16 reg_slice, reg_val; + bool setup_linear = false; + + /* Check if SFP module is supported and linear */ +@@ -1310,71 +1683,68 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + if (status) + return status; + +- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { +- /* Configure CS4227 LINE side to 10G SR. */ +- slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); +- value = IXGBE_CS4227_SPEED_10G; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; ++ /* Configure internal PHY for KR/KX. */ ++ ixgbe_setup_kr_speed_x550em(hw, speed); + +- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); +- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; +- +- /* Configure CS4227 for HOST connection rate then type. */ +- slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); +- value = speed & IXGBE_LINK_SPEED_10GB_FULL ? +- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; ++ /* Configure CS4227 LINE side to proper mode. */ ++ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); ++ if (setup_linear) ++ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; ++ else ++ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + +- slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); +- if (setup_linear) +- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; +- else +- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; ++ status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, ++ reg_val); + +- /* Setup XFI internal link. */ +- status = ixgbe_setup_ixfi_x550em(hw, &speed); +- if (status) { +- hw_dbg(hw, "setup_ixfi failed with %d\n", status); +- return status; +- } +- } else { +- /* Configure internal PHY for KR/KX. */ +- status = ixgbe_setup_kr_speed_x550em(hw, speed); +- if (status) { +- hw_dbg(hw, "setup_kr_speed failed with %d\n", status); +- return status; +- } ++ return status; ++} + +- /* Configure CS4227 LINE side to proper mode. */ +- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); +- if (setup_linear) +- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; +- else +- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; +- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, +- slice, value); +- if (status) +- goto i2c_err; ++/** ++ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode ++ * @hw: pointer to hardware structure ++ * @speed: the link speed to force ++ * ++ * Configures the integrated PHY for native SFI mode. Used to connect the ++ * internal PHY directly to an SFP cage, without autonegotiation. ++ **/ ++static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ s32 status; ++ u32 reg_val; ++ ++ /* Disable all AN and force speed to 10G Serial. */ ++ status = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status) ++ return status; ++ ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ ++ /* Select forced link speed for internal PHY. */ ++ switch (*speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; ++ break; ++ default: ++ /* Other link speeds are not supported by internal PHY. */ ++ return IXGBE_ERR_LINK_SETUP; + } + +- return 0; ++ status = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ ++ /* Toggle port SW reset by AN reset. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); + +-i2c_err: +- hw_dbg(hw, "combined i2c access failed with %d\n", status); + return status; + } + +@@ -1390,45 +1760,39 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, + { + bool setup_linear = false; + u32 reg_phy_int; +- s32 rc; ++ s32 ret_val; + + /* Check if SFP module is supported and linear */ +- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); ++ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ +- if (rc == IXGBE_ERR_SFP_NOT_PRESENT) ++ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + +- if (!rc) +- return rc; ++ if (ret_val) ++ return ret_val; + +- /* Configure internal PHY for native SFI */ +- rc = hw->mac.ops.read_iosf_sb_reg(hw, +- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, +- ®_phy_int); +- if (rc) +- return rc; ++ /* Configure internal PHY for native SFI based on module type */ ++ ret_val = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); ++ if (ret_val) ++ return ret_val; + +- if (setup_linear) { +- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING; +- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR; +- } else { +- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING; +- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR; +- } ++ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; ++ if (!setup_linear) ++ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; + +- rc = hw->mac.ops.write_iosf_sb_reg(hw, +- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, +- reg_phy_int); +- if (rc) +- return rc; ++ ret_val = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); ++ if (ret_val) ++ return ret_val; + +- /* Setup XFI/SFI internal link */ +- return ixgbe_setup_ixfi_x550em(hw, &speed); ++ /* Setup SFI internal link. */ ++ return ixgbe_setup_sfi_x550a(hw, &speed); + } + + /** +@@ -1444,19 +1808,19 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + u32 reg_slice, slice_offset; + bool setup_linear = false; + u16 reg_phy_ext; +- s32 rc; ++ s32 ret_val; + + /* Check if SFP module is supported and linear */ +- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); ++ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ +- if (rc == IXGBE_ERR_SFP_NOT_PRESENT) ++ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + +- if (!rc) +- return rc; ++ if (ret_val) ++ return ret_val; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); +@@ -1464,16 +1828,16 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) + return IXGBE_ERR_PHY_ADDR_INVALID; + +- /* Get external PHY device id */ +- rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB, +- IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); +- if (rc) +- return rc; ++ /* Get external PHY SKU id */ ++ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); ++ if (ret_val) ++ return ret_val; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ +- if (reg_phy_ext == IXGBE_CS4223_PHY_ID) ++ if (reg_phy_ext == IXGBE_CS4223_SKU_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else +@@ -1481,12 +1845,28 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; ++ ++ ret_val = hw->phy.ops.read_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); ++ if (ret_val) ++ return ret_val; ++ ++ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | ++ (IXGBE_CS4227_EDC_MODE_SR << 1)); ++ + if (setup_linear) + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; +- return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, +- reg_phy_ext); ++ ++ ret_val = hw->phy.ops.write_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); ++ if (ret_val) ++ return ret_val; ++ ++ /* Flush previous write with a read */ ++ return hw->phy.ops.read_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + } + + /** +@@ -1515,8 +1895,10 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + +- /* If internal link mode is XFI, then setup XFI internal link. */ +- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { ++ /* If X552 and internal link mode is XFI, then setup XFI internal link. ++ */ ++ if (hw->mac.type == ixgbe_mac_X550EM_x && ++ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status) +@@ -1540,7 +1922,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, + bool link_up_wait_to_complete) + { + u32 status; +- u16 autoneg_status; ++ u16 i, autoneg_status; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; +@@ -1552,14 +1934,18 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, + if (status || !(*link_up)) + return status; + +- /* MAC link is up, so check external PHY link. +- * Read this twice back to back to indicate current status. +- */ +- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, +- &autoneg_status); +- if (status) +- return status; ++ /* MAC link is up, so check external PHY link. ++ * Link status is latching low, and can only be used to detect link ++ * drop, and not the current status of the link without performing ++ * back-to-back reads. ++ */ ++ for (i = 0; i < 2; i++) { ++ status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, ++ &autoneg_status); ++ ++ if (status) ++ return status; ++ } + + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) +@@ -1577,7 +1963,7 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) + { + struct ixgbe_mac_info *mac = &hw->mac; +- u32 lval, sval; ++ u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, +@@ -1611,12 +1997,183 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, + if (rc) + return rc; + +- lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); ++ if (rc) ++ return rc; ++ ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); ++ if (rc) ++ return rc; ++ ++ rc = ixgbe_restart_an_internal_phy_x550em(hw); ++ return rc; ++} ++ ++/** ++ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 lval, sval, flx_val; ++ s32 rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); ++ if (rc) ++ return rc; ++ ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, lval); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); ++ if (rc) ++ return rc; ++ ++ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; ++ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, sval); ++ if (rc) ++ return rc; ++ + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); ++ if (rc) ++ return rc; + +- return rc; ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); ++ if (rc) ++ return rc; ++ ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); ++ if (rc) ++ return rc; ++ ++ ixgbe_restart_an_internal_phy_x550em(hw); ++ ++ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); ++} ++ ++/** ++ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 ++ * @hw: pointer to hardware structure ++ * ++ * Enable flow control according to IEEE clause 37. ++ */ ++static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ ixgbe_link_speed speed; ++ bool link_up; ++ ++ /* AN should have completed when the cable was plugged in. ++ * Look for reasons to bail out. Bail out if: ++ * - FC autoneg is disabled, or if ++ * - link is not up. ++ */ ++ if (hw->fc.disable_fc_autoneg) ++ goto out; ++ ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ if (!link_up) ++ goto out; ++ ++ /* Check if auto-negotiation has completed */ ++ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); ++ if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { ++ status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ ++ /* Negotiate the flow control */ ++ status = ixgbe_negotiate_fc(hw, info[0], info[0], ++ FW_PHY_ACT_GET_LINK_INFO_FC_RX, ++ FW_PHY_ACT_GET_LINK_INFO_FC_TX, ++ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, ++ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); ++ ++out: ++ if (!status) { ++ hw->fc.fc_was_autonegged = true; ++ } else { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ } ++} ++ ++/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers ++ * @hw: pointer to hardware structure ++ **/ ++static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ ++ switch (mac->ops.get_media_type(hw)) { ++ case ixgbe_media_type_fiber: ++ mac->ops.setup_fc = NULL; ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; ++ break; ++ case ixgbe_media_type_copper: ++ if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && ++ hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { ++ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; ++ break; ++ } ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; ++ mac->ops.setup_fc = ixgbe_fc_autoneg_fw; ++ mac->ops.setup_link = ixgbe_setup_sgmii_fw; ++ mac->ops.check_link = ixgbe_check_mac_link_generic; ++ break; ++ case ixgbe_media_type_backplane: ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; ++ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; ++ break; ++ default: ++ break; ++ } + } + + /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers +@@ -1654,10 +2211,12 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) + ixgbe_set_soft_rate_select_speed; + break; + case ixgbe_media_type_copper: ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) ++ break; + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.setup_fc = ixgbe_setup_fc_generic; + mac->ops.check_link = ixgbe_check_link_t_X550em; +- return; ++ break; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) +@@ -1666,6 +2225,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) + default: + break; + } ++ ++ /* Additional modification for X550em_a devices */ ++ if (hw->mac.type == ixgbe_mac_x550em_a) ++ ixgbe_init_mac_link_ops_X550em_a(hw); + } + + /** ixgbe_setup_sfp_modules_X550em - Setup SFP module +@@ -1696,6 +2259,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) + { ++ if (hw->phy.type == ixgbe_phy_fw) { ++ *autoneg = true; ++ *speed = hw->phy.speeds_supported; ++ return 0; ++ } ++ + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ +@@ -1714,8 +2283,39 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { +- *speed = IXGBE_LINK_SPEED_10GB_FULL | +- IXGBE_LINK_SPEED_1GB_FULL; ++ switch (hw->phy.type) { ++ case ixgbe_phy_x550em_kx4: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL | ++ IXGBE_LINK_SPEED_2_5GB_FULL | ++ IXGBE_LINK_SPEED_10GB_FULL; ++ break; ++ case ixgbe_phy_x550em_xfi: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL | ++ IXGBE_LINK_SPEED_10GB_FULL; ++ break; ++ case ixgbe_phy_ext_1g_t: ++ case ixgbe_phy_sgmii: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ case ixgbe_phy_x550em_kr: ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* check different backplane modes */ ++ if (hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { ++ *speed = IXGBE_LINK_SPEED_2_5GB_FULL; ++ break; ++ } else if (hw->device_id == ++ IXGBE_DEV_ID_X550EM_A_KR_L) { ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ } ++ } ++ /* fall through */ ++ default: ++ *speed = IXGBE_LINK_SPEED_10GB_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ } + *autoneg = true; + } + return 0; +@@ -1742,7 +2342,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) +@@ -1750,7 +2350,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + + if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | +@@ -1759,7 +2359,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + + /* Global alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + + if (status) +@@ -1774,7 +2374,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -1789,14 +2389,14 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) + + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); ++ MDIO_MMD_AN, ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); ++ MDIO_MMD_AN, ®); + + if (status) + return status; +@@ -1827,21 +2427,34 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + /* Enable link status change alarm */ +- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); +- if (status) +- return status; + +- reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; ++ /* Enable the LASI interrupts on X552 devices to receive notifications ++ * of the link configurations of the external PHY and correspondingly ++ * support the configuration of the internal iXFI link, since iXFI does ++ * not support auto-negotiation. This is not required for X553 devices ++ * having KR support, which performs auto-negotiations and which is used ++ * as the internal link to the external PHY. Hence adding a check here ++ * to avoid enabling LASI interrupts for X553 devices. ++ */ ++ if (hw->mac.type != ixgbe_mac_x550em_a) { ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, ++ MDIO_MMD_AN, ®); ++ if (status) ++ return status; + +- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); +- if (status) +- return status; ++ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; ++ ++ status = hw->phy.ops.write_reg(hw, ++ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, ++ MDIO_MMD_AN, reg); ++ if (status) ++ return status; ++ } + + /* Enable high temperature failure and global fault alarms */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -1850,14 +2463,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + reg); + if (status) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -1866,14 +2479,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + reg); + if (status) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -1881,7 +2494,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + reg); + + return status; +@@ -1945,51 +2558,31 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + +- /* Restart auto-negotiation. */ +- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + +- return status; +-} +- +-/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. +- * @hw: pointer to hardware structure +- * +- * Configures the integrated KX4 PHY. +- **/ +-static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) +-{ +- s32 status; +- u32 reg_val; +- +- status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, +- IXGBE_SB_IOSF_TARGET_KX4_PCS0 + +- hw->bus.lan_id, ®_val); +- if (status) +- return status; +- +- reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | +- IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); +- +- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; ++ if (hw->mac.type == ixgbe_mac_x550em_a) { ++ /* Set lane mode to KR auto negotiation */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + +- /* Advertise 10G support. */ +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; ++ if (status) ++ return status; + +- /* Advertise 1G support. */ +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + +- /* Restart auto-negotiation. */ +- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; +- status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, +- IXGBE_SB_IOSF_TARGET_KX4_PCS0 + +- hw->bus.lan_id, reg_val); ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ } + +- return status; ++ return ixgbe_restart_an_internal_phy_x550em(hw); + } + + /** +@@ -2002,6 +2595,9 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + return 0; + ++ if (ixgbe_check_reset_blocked(hw)) ++ return 0; ++ + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); + } + +@@ -2019,14 +2615,12 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) + *link_up = false; + + /* read this twice back to back to indicate current status */ +- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, + &autoneg_status); + if (ret) + return ret; + +- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, + &autoneg_status); + if (ret) + return ret; +@@ -2057,7 +2651,8 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + +- if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { ++ if (!(hw->mac.type == ixgbe_mac_X550EM_x && ++ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); +@@ -2072,7 +2667,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) + return 0; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &speed); + if (status) + return status; +@@ -2133,10 +2728,10 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); ++ MDIO_MMD_VEND1, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); ++ MDIO_MMD_VEND1, phy_data); + + return 0; + } +@@ -2155,14 +2750,70 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); ++ MDIO_MMD_VEND1, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); ++ MDIO_MMD_VEND1, phy_data); + + return 0; + } + ++/** ++ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware ++ * @hw: pointer to the HW structure ++ * @maj: driver version major number ++ * @min: driver version minor number ++ * @build: driver version build number ++ * @sub: driver version sub build number ++ * @len: length of driver_ver string ++ * @driver_ver: driver string ++ * ++ * Sends driver version number to firmware through the manageability ++ * block. On success return 0 ++ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring ++ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. ++ **/ ++static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, ++ u8 build, u8 sub, u16 len, ++ const char *driver_ver) ++{ ++ struct ixgbe_hic_drv_info2 fw_cmd; ++ s32 ret_val; ++ int i; ++ ++ if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) ++ return IXGBE_ERR_INVALID_ARGUMENT; ++ ++ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; ++ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; ++ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; ++ fw_cmd.port_num = (u8)hw->bus.func; ++ fw_cmd.ver_maj = maj; ++ fw_cmd.ver_min = min; ++ fw_cmd.ver_build = build; ++ fw_cmd.ver_sub = sub; ++ fw_cmd.hdr.checksum = 0; ++ memcpy(fw_cmd.driver_string, driver_ver, len); ++ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, ++ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); ++ ++ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ++ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, ++ sizeof(fw_cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (ret_val) ++ continue; ++ ++ if (fw_cmd.hdr.cmd_or_resp.ret_status != ++ FW_CEM_RESP_STATUS_SUCCESS) ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++ return 0; ++ } ++ ++ return ret_val; ++} ++ + /** ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed +@@ -2179,7 +2830,7 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &an_lp_status); + if (status) + return status; +@@ -2208,7 +2859,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) + { + bool pause, asm_dir; + u32 reg_val; +- s32 rc; ++ s32 rc = 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { +@@ -2251,33 +2902,122 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) + return IXGBE_ERR_CONFIG; + } + +- if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && +- hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && +- hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) +- return 0; ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR_L: ++ rc = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ++ ®_val); ++ if (rc) ++ return rc; ++ ++ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); ++ if (pause) ++ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; ++ if (asm_dir) ++ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ rc = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ++ reg_val); ++ ++ /* This device does not fully support AN. */ ++ hw->fc.disable_fc_autoneg = true; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ hw->fc.disable_fc_autoneg = true; ++ break; ++ default: ++ break; ++ } ++ return rc; ++} + +- rc = hw->mac.ops.read_iosf_sb_reg(hw, +- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, +- ®_val); +- if (rc) +- return rc; ++/** ++ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 ++ * @hw: pointer to hardware structure ++ **/ ++static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) ++{ ++ u32 link_s1, lp_an_page_low, an_cntl_1; ++ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ ixgbe_link_speed speed; ++ bool link_up; + +- reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | +- IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); +- if (pause) +- reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; +- if (asm_dir) +- reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; +- rc = hw->mac.ops.write_iosf_sb_reg(hw, +- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), +- IXGBE_SB_IOSF_TARGET_KR_PHY, +- reg_val); ++ /* AN should have completed when the cable was plugged in. ++ * Look for reasons to bail out. Bail out if: ++ * - FC autoneg is disabled, or if ++ * - link is not up. ++ */ ++ if (hw->fc.disable_fc_autoneg) { ++ hw_err(hw, "Flow control autoneg is disabled"); ++ goto out; ++ } + +- /* This device does not fully support AN. */ +- hw->fc.disable_fc_autoneg = true; ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ if (!link_up) { ++ hw_err(hw, "The link is down"); ++ goto out; ++ } + +- return rc; ++ /* Check at auto-negotiation has completed */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_S1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); ++ ++ if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { ++ hw_dbg(hw, "Auto-Negotiation did not complete\n"); ++ status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ ++ /* Read the 10g AN autoc and LP ability registers and resolve ++ * local flow control settings accordingly ++ */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-Negotiation did not complete\n"); ++ goto out; ++ } ++ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-Negotiation did not complete\n"); ++ goto out; ++ } ++ ++ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, ++ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); ++ ++out: ++ if (!status) { ++ hw->fc.fc_was_autonegged = true; ++ } else { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ } ++} ++ ++/** ++ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings ++ * @hw: pointer to hardware structure ++ **/ ++static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) ++{ ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; + } + + /** ixgbe_enter_lplu_x550em - Transition to low power states +@@ -2326,7 +3066,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) + return ixgbe_set_copper_phy_power(hw, false); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &speed); + if (status) + return status; +@@ -2348,20 +3088,20 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &autoneg_reg); + if (status) + return status; + +- status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, ++ MDIO_MMD_AN, + &an_10g_cntl_reg); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ MDIO_MMD_AN, + &autoneg_reg); + if (status) + return status; +@@ -2378,6 +3118,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) + } + + /** ++ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) ++{ ++ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ ++ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) ++ return 0; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); ++ if (rc) ++ return rc; ++ memset(store, 0, sizeof(store)); ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); ++ if (rc) ++ return rc; ++ ++ return ixgbe_setup_fw_link(hw); ++} ++ ++/** ++ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) ++{ ++ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); ++ if (rc) ++ return rc; ++ ++ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { ++ ixgbe_shutdown_fw_phy(hw); ++ return IXGBE_ERR_OVERTEMP; ++ } ++ return 0; ++} ++ ++/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register + * @hw: pointer to hardware structure + * +@@ -2398,6 +3182,18 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) + hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; ++#if 1 /* Since by Intel FW(LEK8),LAN controller 1 default set port 0 use phy address 0 ++ * and port 1 use phy address 1, we swap it for Porsche2 platform. ++ * By hilbert. ++ */ ++ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { ++ /*hw_err(hw, "####swap phy address used for different lan id in LAN conroller-1\n");*/ ++ hw->phy.mdio.prtad = (hw->bus.lan_id == 0) ? (1) : (0); ++ /*hw_err(hw, "####lan id: %d, phy address:%d\n", ++ hw->bus.lan_id, ++ hw->phy.mdio.prtad);*/ ++ } ++#endif + } + } + +@@ -2433,7 +3229,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: +- phy->ops.setup_link = ixgbe_setup_kx4_x550em; ++ phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; +@@ -2442,6 +3238,12 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; ++ case ixgbe_phy_x550em_xfi: ++ /* link is managed by HW */ ++ phy->ops.setup_link = NULL; ++ phy->ops.read_reg = ixgbe_read_phy_reg_x550em; ++ phy->ops.write_reg = ixgbe_write_phy_reg_x550em; ++ break; + case ixgbe_phy_x550em_ext_t: + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode +@@ -2463,6 +3265,19 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; + break; ++ case ixgbe_phy_sgmii: ++ phy->ops.setup_link = NULL; ++ break; ++ case ixgbe_phy_fw: ++ phy->ops.setup_link = ixgbe_setup_fw_link; ++ phy->ops.reset = ixgbe_reset_phy_fw; ++ break; ++ case ixgbe_phy_ext_1g_t: ++ phy->ops.setup_link = NULL; ++ phy->ops.read_reg = NULL; ++ phy->ops.write_reg = NULL; ++ phy->ops.reset = NULL; ++ break; + default: + break; + } +@@ -2488,6 +3303,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) + /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: ++ case IXGBE_DEV_ID_X550EM_X_XFI: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + media_type = ixgbe_media_type_backplane; +@@ -2500,6 +3316,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: + media_type = ixgbe_media_type_copper; + break; + default: +@@ -2519,7 +3337,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, +- IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ MDIO_MMD_PMAPMD, + ®); + if (status) + return status; +@@ -2530,7 +3348,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + ®); + if (status) + return status; +@@ -2539,7 +3357,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, +- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ MDIO_MMD_VEND1, + reg); + if (status) + return status; +@@ -2567,6 +3385,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ /* Select fast MDIO clock speed for these devices */ ++ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); ++ hlreg0 |= IXGBE_HLREG0_MDCSPD; ++ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); ++ break; + default: + break; + } +@@ -2586,6 +3411,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) + u32 ctrl = 0; + u32 i; + bool link_up = false; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); +@@ -2613,6 +3439,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) + hw->phy.sfp_setup_needed = false; + } + ++ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) ++ return status; ++ + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); +@@ -2631,9 +3460,16 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) + ctrl = IXGBE_CTRL_RST; + } + ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status) { ++ hw_dbg(hw, "semaphore failed with %d", status); ++ return IXGBE_ERR_SWFW_SYNC; ++ } ++ + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(1000, 1200); + + /* Poll for reset bit to self-clear meaning reset is complete */ +@@ -2728,6 +3564,90 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, + } + + /** ++ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ **/ ++static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) ++{ ++ s32 status = 0; ++ u32 an_cntl = 0; ++ ++ /* Validate the requested mode */ ++ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ++ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ++ return IXGBE_ERR_INVALID_LINK_SETTINGS; ++ } ++ ++ if (hw->fc.requested_mode == ixgbe_fc_default) ++ hw->fc.requested_mode = ixgbe_fc_full; ++ ++ /* Set up the 1G and 10G flow control advertisement registers so the ++ * HW will be able to do FC autoneg once the cable is plugged in. If ++ * we link at 10G, the 1G advertisement is harmless and vice versa. ++ */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); ++ ++ if (status) { ++ hw_dbg(hw, "Auto-Negotiation did not complete\n"); ++ return status; ++ } ++ ++ /* The possible values of fc.requested_mode are: ++ * 0: Flow control is completely disabled ++ * 1: Rx flow control is enabled (we can receive pause frames, ++ * but not send pause frames). ++ * 2: Tx flow control is enabled (we can send pause frames but ++ * we do not support receiving pause frames). ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. ++ * other: Invalid. ++ */ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_none: ++ /* Flow control completely disabled by software override. */ ++ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); ++ break; ++ case ixgbe_fc_tx_pause: ++ /* Tx Flow control is enabled, and Rx Flow control is ++ * disabled by software override. ++ */ ++ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; ++ break; ++ case ixgbe_fc_rx_pause: ++ /* Rx Flow control is enabled and Tx Flow control is ++ * disabled by software override. Since there really ++ * isn't a way to advertise that we are capable of RX ++ * Pause ONLY, we will advertise that we support both ++ * symmetric and asymmetric Rx PAUSE, as such we fall ++ * through to the fc_full statement. Later, we will ++ * disable the adapter's ability to send PAUSE frames. ++ */ ++ case ixgbe_fc_full: ++ /* Flow control (both Rx and Tx) is enabled by SW override. */ ++ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ break; ++ default: ++ hw_err(hw, "Flow control param set incorrectly\n"); ++ return IXGBE_ERR_CONFIG; ++ } ++ ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); ++ ++ /* Restart auto-negotiation. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); ++ ++ return status; ++} ++ ++/** + * ixgbe_set_mux - Set mux for port 1 access with CS4227 + * @hw: pointer to hardware structure + * @state: set mux if 1, clear if 0 +@@ -2881,7 +3801,13 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + ++#if 0 /* To use C22 MDI access function created by our own. ++ * By hilbert ++ */ + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); ++#else ++ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); ++#endif + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +@@ -2914,7 +3840,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + .clear_vfta = &ixgbe_clear_vfta_generic, \ + .set_vfta = &ixgbe_set_vfta_generic, \ + .fc_enable = &ixgbe_fc_enable_generic, \ +- .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ ++ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ + .init_uta_tables = &ixgbe_init_uta_tables_generic, \ + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ +@@ -2933,6 +3859,7 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = &ixgbe_reset_hw_X540, + .get_media_type = &ixgbe_get_media_type_X540, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, +@@ -2947,12 +3874,14 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { + .prot_autoc_read = prot_autoc_read_generic, + .prot_autoc_write = prot_autoc_write_generic, + .setup_fc = ixgbe_setup_fc_generic, ++ .fc_autoneg = ixgbe_fc_autoneg, + }; + + static const struct ixgbe_mac_operations mac_ops_X550EM_x = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = &ixgbe_reset_hw_X550em, + .get_media_type = &ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, +@@ -2965,6 +3894,29 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { + .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .setup_fc = NULL, /* defined later */ ++ .fc_autoneg = ixgbe_fc_autoneg, ++ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, ++ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, ++}; ++ ++static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { ++ X550_COMMON_MAC ++ .led_on = NULL, ++ .led_off = NULL, ++ .init_led_link_act = NULL, ++ .reset_hw = &ixgbe_reset_hw_X550em, ++ .get_media_type = &ixgbe_get_media_type_X550em, ++ .get_san_mac_addr = NULL, ++ .get_wwn_prefix = NULL, ++ .setup_link = &ixgbe_setup_mac_link_X540, ++ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, ++ .get_bus_info = &ixgbe_get_bus_info_X550em, ++ .setup_sfp = ixgbe_setup_sfp_modules_X550em, ++ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, ++ .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, ++ .init_swfw_sync = &ixgbe_init_swfw_sync_X540, ++ .setup_fc = NULL, ++ .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, + }; +@@ -2973,6 +3925,28 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, ++ .reset_hw = ixgbe_reset_hw_X550em, ++ .get_media_type = ixgbe_get_media_type_X550em, ++ .get_san_mac_addr = NULL, ++ .get_wwn_prefix = NULL, ++ .setup_link = &ixgbe_setup_mac_link_X540, ++ .get_link_capabilities = ixgbe_get_link_capabilities_X550em, ++ .get_bus_info = ixgbe_get_bus_info_X550em, ++ .setup_sfp = ixgbe_setup_sfp_modules_X550em, ++ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, ++ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, ++ .setup_fc = ixgbe_setup_fc_x550em, ++ .fc_autoneg = ixgbe_fc_autoneg, ++ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, ++ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, ++}; ++ ++static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { ++ X550_COMMON_MAC ++ .led_on = ixgbe_led_on_generic, ++ .led_off = ixgbe_led_off_generic, ++ .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, +@@ -2984,6 +3958,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, ++ .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, + }; +@@ -3017,12 +3992,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .setup_link = &ixgbe_setup_phy_link_generic, \ +- .set_phy_power = NULL, \ +- .check_overtemp = &ixgbe_tn_check_overtemp, \ +- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, ++ .set_phy_power = NULL, + + static const struct ixgbe_phy_operations phy_ops_X550 = { + X550_COMMON_PHY ++ .check_overtemp = &ixgbe_tn_check_overtemp, + .init = NULL, + .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, +@@ -3031,19 +4005,27 @@ static const struct ixgbe_phy_operations phy_ops_X550 = { + + static const struct ixgbe_phy_operations phy_ops_X550EM_x = { + X550_COMMON_PHY ++ .check_overtemp = &ixgbe_tn_check_overtemp, + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, +- .read_i2c_combined = &ixgbe_read_i2c_combined_generic, +- .write_i2c_combined = &ixgbe_write_i2c_combined_generic, +- .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, +- .write_i2c_combined_unlocked = +- &ixgbe_write_i2c_combined_generic_unlocked, ++}; ++ ++static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { ++ X550_COMMON_PHY ++ .check_overtemp = NULL, ++ .init = ixgbe_init_phy_ops_X550em, ++ .identify = ixgbe_identify_phy_x550em, ++ .read_reg = NULL, ++ .write_reg = NULL, ++ .read_reg_mdi = NULL, ++ .write_reg_mdi = NULL, + }; + + static const struct ixgbe_phy_operations phy_ops_x550em_a = { + X550_COMMON_PHY ++ .check_overtemp = &ixgbe_tn_check_overtemp, + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_x550a, +@@ -3052,6 +4034,31 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = { + .write_reg_mdi = &ixgbe_write_phy_reg_mdi, + }; + ++static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { ++ X550_COMMON_PHY ++ .check_overtemp = ixgbe_check_overtemp_fw, ++ .init = ixgbe_init_phy_ops_X550em, ++ .identify = ixgbe_identify_phy_fw, ++#if 0 /* Declare C22 MDI directly access functions. By hilbert */ ++ .read_reg = NULL, ++ .write_reg = NULL, ++ .read_reg_mdi = NULL, ++ .write_reg_mdi = NULL, ++#else ++ .read_reg = &ixgbe_read_phy_reg_x550a, ++ .write_reg = &ixgbe_write_phy_reg_x550a, ++ .read_reg_mdi = &ixgbe_read_phy_reg_mdio, ++ .write_reg_mdi = &ixgbe_write_phy_reg_mdio, ++#endif ++}; ++ ++static const struct ixgbe_link_operations link_ops_x550em_x = { ++ .read_link = &ixgbe_read_i2c_combined_generic, ++ .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, ++ .write_link = &ixgbe_write_i2c_combined_generic, ++ .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked, ++}; ++ + static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550) + }; +@@ -3082,14 +4089,35 @@ const struct ixgbe_info ixgbe_X550EM_x_info = { + .phy_ops = &phy_ops_X550EM_x, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550EM_x, ++ .link_ops = &link_ops_x550em_x, ++}; ++ ++const struct ixgbe_info ixgbe_x550em_x_fw_info = { ++ .mac = ixgbe_mac_X550EM_x, ++ .get_invariants = ixgbe_get_invariants_X550_x_fw, ++ .mac_ops = &mac_ops_X550EM_x_fw, ++ .eeprom_ops = &eeprom_ops_X550EM_x, ++ .phy_ops = &phy_ops_x550em_x_fw, ++ .mbx_ops = &mbx_ops_generic, ++ .mvals = ixgbe_mvals_X550EM_x, + }; + + const struct ixgbe_info ixgbe_x550em_a_info = { + .mac = ixgbe_mac_x550em_a, +- .get_invariants = &ixgbe_get_invariants_X550_x, ++ .get_invariants = &ixgbe_get_invariants_X550_a, + .mac_ops = &mac_ops_x550em_a, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, + }; ++ ++const struct ixgbe_info ixgbe_x550em_a_fw_info = { ++ .mac = ixgbe_mac_x550em_a, ++ .get_invariants = ixgbe_get_invariants_X550_a_fw, ++ .mac_ops = &mac_ops_x550em_a_fw, ++ .eeprom_ops = &eeprom_ops_X550EM_x, ++ .phy_ops = &phy_ops_x550em_a_fw, ++ .mbx_ops = &mbx_ops_generic, ++ .mvals = ixgbe_mvals_x550em_a, ++}; +-- +2.7.4 + From c5cf09b2c34d1b010d02b57bd66ee9b715604fca Mon Sep 17 00:00:00 2001 From: PeterLin Date: Tue, 30 Apr 2019 15:38:55 +0800 Subject: [PATCH 12/20] update sdk config file --- .../fn-6254-dn-f/tau-fn-6254-dn-f.dsh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/tau-fn-6254-dn-f.dsh b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/tau-fn-6254-dn-f.dsh index efe3bbf04e88..98a0c8f4912e 100755 --- a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/tau-fn-6254-dn-f.dsh +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/fn-6254-dn-f/tau-fn-6254-dn-f.dsh @@ -630,4 +630,5 @@ port set property portlist=129-130 medium-type=kr port set property portlist=0-53 fec=disable port set adver portlist=129-130 speed-10g-kr port set property portlist=129-130 an=enable -port set property portlist=0-53,129-130 admin=enable +port set property portlist=0-53 admin=disable +port set property portlist=129-130 admin=enable From dd7ea361c94da61411d100dd85028273e499f3c6 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Thu, 2 May 2019 17:17:44 +0800 Subject: [PATCH 13/20] remove ixgbe kernel patch --- ...gbe-driver-for-pegatron-fn-6254-dn-f.patch | 4666 ----------------- 1 file changed, 4666 deletions(-) delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-driver-for-pegatron-fn-6254-dn-f.patch diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-driver-for-pegatron-fn-6254-dn-f.patch b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-driver-for-pegatron-fn-6254-dn-f.patch deleted file mode 100644 index 0ffce8ae2ecf..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/kernel_patch/0001-update-Intel-ixgbe-driver-for-pegatron-fn-6254-dn-f.patch +++ /dev/null @@ -1,4666 +0,0 @@ -From f55d2dcb51f86f58f43cf563045fe6c4dfd590e0 Mon Sep 17 00:00:00 2001 -From: PeterLin -Date: Thu, 11 Apr 2019 14:21:33 +0800 -Subject: [PATCH] update Intel ixgbe driver for pegatron fn-6254-dn-f - ---- - drivers/net/ethernet/intel/ixgbe/ixgbe.h | 10 + - drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 28 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 15 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 439 ++++-- - drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 7 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 103 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 85 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 407 +++--- - drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 27 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 153 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 20 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 1668 +++++++++++++++++----- - 12 files changed, 2282 insertions(+), 680 deletions(-) - -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h -index b06e32d..255ec3b 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h -@@ -89,6 +89,7 @@ - - /* Supported Rx Buffer Sizes */ - #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ -+#define IXGBE_RXBUFFER_1536 1536 - #define IXGBE_RXBUFFER_2K 2048 - #define IXGBE_RXBUFFER_3K 3072 - #define IXGBE_RXBUFFER_4K 4096 -@@ -661,6 +662,9 @@ struct ixgbe_adapter { - #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) - #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) - #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) -+#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) -+#define IXGBE_FLAG2_EEE_ENABLED BIT(15) -+#define IXGBE_FLAG2_RX_LEGACY BIT(16) - - /* Tx fast path data */ - int num_tx_queues; -@@ -861,7 +865,9 @@ enum ixgbe_boards { - board_X540, - board_X550, - board_X550EM_x, -+ board_x550em_x_fw, - board_x550em_a, -+ board_x550em_a_fw, - }; - - extern const struct ixgbe_info ixgbe_82598_info; -@@ -869,7 +875,9 @@ extern const struct ixgbe_info ixgbe_82599_info; - extern const struct ixgbe_info ixgbe_X540_info; - extern const struct ixgbe_info ixgbe_X550_info; - extern const struct ixgbe_info ixgbe_X550EM_x_info; -+extern const struct ixgbe_info ixgbe_x550em_x_fw_info; - extern const struct ixgbe_info ixgbe_x550em_a_info; -+extern const struct ixgbe_info ixgbe_x550em_a_fw_info; - #ifdef CONFIG_IXGBE_DCB - extern const struct dcbnl_rtnl_ops dcbnl_ops; - #endif -@@ -1027,4 +1035,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, - struct ixgbe_ring *tx_ring); - u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); - void ixgbe_store_reta(struct ixgbe_adapter *adapter); -+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, -+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); - #endif /* _IXGBE_H_ */ -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c -index fb51be7..8a32eb7 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c -@@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) - case ixgbe_phy_tn: - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.check_link = &ixgbe_check_phy_link_tnx; -- phy->ops.get_firmware_version = -- &ixgbe_get_phy_firmware_version_tnx; - break; - case ixgbe_phy_nl: - phy->ops.reset = &ixgbe_reset_phy_nl; -@@ -177,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) - **/ - static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) - { --#ifndef CONFIG_SPARC -- u32 regval; -- u32 i; --#endif - s32 ret_val; - - ret_val = ixgbe_start_hw_generic(hw); -- --#ifndef CONFIG_SPARC -- /* Disable relaxed ordering */ -- for (i = 0; ((i < hw->mac.max_tx_queues) && -- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { -- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); -- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; -- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); -- } -- -- for (i = 0; ((i < hw->mac.max_rx_queues) && -- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { -- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); -- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | -- IXGBE_DCA_RXCTRL_HEAD_WRO_EN); -- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); -- } --#endif - if (ret_val) - return ret_val; - -@@ -367,7 +343,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) - } - - /* Negotiate the fc mode to use */ -- ixgbe_fc_autoneg(hw); -+ hw->mac.ops.fc_autoneg(hw); - - /* Disable any previous flow control settings */ - fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); -@@ -1179,6 +1155,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { - .get_link_capabilities = &ixgbe_get_link_capabilities_82598, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, -@@ -1193,6 +1170,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = { - .set_vfta = &ixgbe_set_vfta_82598, - .fc_enable = &ixgbe_fc_enable_82598, - .setup_fc = ixgbe_setup_fc_generic, -+ .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = NULL, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, - .release_swfw_sync = &ixgbe_release_swfw_sync, -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c -index 63b2500..d602637 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c -@@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) - case ixgbe_phy_tn: - phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; -- phy->ops.get_firmware_version = -- &ixgbe_get_phy_firmware_version_tnx; - break; - default: - break; -@@ -1451,7 +1449,7 @@ do { \ - * @atr_input: input bitstream to compute the hash on - * @input_mask: mask for the input bitstream - * -- * This function serves two main purposes. First it applys the input_mask -+ * This function serves two main purposes. First it applies the input_mask - * to the atr_input resulting in a cleaned up atr_input data stream. - * Secondly it computes the hash and stores it in the bkt_hash field at - * the end of the input byte stream. This way it will be available for -@@ -1591,15 +1589,17 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - - switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { - case 0x0000: -- /* mask VLAN ID, fall through to mask VLAN priority */ -+ /* mask VLAN ID */ - fdirm |= IXGBE_FDIRM_VLANID; -+ /* fall through */ - case 0x0FFF: - /* mask VLAN priority */ - fdirm |= IXGBE_FDIRM_VLANP; - break; - case 0xE000: -- /* mask VLAN ID only, fall through */ -+ /* mask VLAN ID only */ - fdirm |= IXGBE_FDIRM_VLANID; -+ /* fall through */ - case 0xEFFF: - /* no VLAN fields masked */ - break; -@@ -1610,8 +1610,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - - switch (input_mask->formatted.flex_bytes & 0xFFFF) { - case 0x0000: -- /* Mask Flex Bytes, fall through */ -+ /* Mask Flex Bytes */ - fdirm |= IXGBE_FDIRM_FLEX; -+ /* fall through */ - case 0xFFFF: - break; - default: -@@ -2204,6 +2205,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { - .get_link_capabilities = &ixgbe_get_link_capabilities_82599, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, -@@ -2219,6 +2221,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = { - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .setup_fc = ixgbe_setup_fc_generic, -+ .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = &ixgbe_setup_sfp_modules_82599, -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c -index ad33622..fd055cc 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c -@@ -79,16 +79,28 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) - - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: -- hw->mac.ops.check_link(hw, &speed, &link_up, false); -- /* if link is down, assume supported */ -- if (link_up) -- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? -+ /* flow control autoneg black list */ -+ switch (hw->device_id) { -+ case IXGBE_DEV_ID_X550EM_A_SFP: -+ case IXGBE_DEV_ID_X550EM_A_SFP_N: -+ supported = false; -+ break; -+ default: -+ hw->mac.ops.check_link(hw, &speed, &link_up, false); -+ /* if link is down, assume supported */ -+ if (link_up) -+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? - true : false; -- else -- supported = true; -+ else -+ supported = true; -+ } -+ - break; - case ixgbe_media_type_backplane: -- supported = true; -+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) -+ supported = false; -+ else -+ supported = true; - break; - case ixgbe_media_type_copper: - /* only some copper devices support flow control autoneg */ -@@ -100,6 +112,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) - case IXGBE_DEV_ID_X550T1: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: - supported = true; - break; - default: -@@ -109,6 +123,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) - break; - } - -+ if (!supported) -+ hw_dbg(hw, "Device %x does not support flow control autoneg\n", -+ hw->device_id); -+ - return supported; - } - -@@ -153,7 +171,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) - if (ret_val) - return ret_val; - -- /* only backplane uses autoc so fall though */ -+ /* fall through - only backplane uses autoc */ - case ixgbe_media_type_fiber: - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - -@@ -279,6 +297,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) - s32 ret_val; - u32 ctrl_ext; - u16 device_caps; -+#if 1 //by hilbert -+ s32 rc; -+ u16 regVal=0; -+#endif - - /* Set the media type */ - hw->phy.media_type = hw->mac.ops.get_media_type(hw); -@@ -298,10 +320,12 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) - IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); - IXGBE_WRITE_FLUSH(hw); - -- /* Setup flow control */ -- ret_val = hw->mac.ops.setup_fc(hw); -- if (ret_val) -- return ret_val; -+ /* Setup flow control if method for doing so */ -+ if (hw->mac.ops.setup_fc) { -+ ret_val = hw->mac.ops.setup_fc(hw); -+ if (ret_val) -+ return ret_val; -+ } - - /* Cashe bit indicating need for crosstalk fix */ - switch (hw->mac.type) { -@@ -322,6 +346,67 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) - /* Clear adapter stopped flag */ - hw->adapter_stopped = false; - -+#if 1 /* To modify speed LED polarity and configure led on only for speed 1G in M88E1512 -+ * for Porsche2 platform. By hilbert -+ * From 88E1512 datasheet: -+ * Page register: 0x16 -+ * LED functon control register: 0x10 in page 3 -+ * LED polarity control register: 0x11 in page 3 -+ */ -+ -+ if (hw->mac.type == ixgbe_mac_x550em_a && -+ (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { -+ /* For M88E1512, to select page 3 in register 0x16 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+#if 0 //for debug -+ /* For M88E1512, read from register 0x16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x16, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "phy register read failed, rc:%x\n", rc); -+ } -+ hw_err(hw, "####read phy register 0x16 again, value:%x\n", regVal); -+#endif -+ /* For M88E1512, read from page 3, register 0x11 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x11, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led polarity register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 0x11 with polarity bit set */ -+ regVal |= 0x01; -+ rc = hw->phy.ops.write_reg(hw, 0x11, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led polarity register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, read from page 3, register 16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 16 with only 1000M led on */ -+ regVal = (regVal & 0xFFF0) | 0x0007; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write page 22 back to default 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ } -+#endif - return 0; - } - -@@ -346,25 +431,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) - } - IXGBE_WRITE_FLUSH(hw); - --#ifndef CONFIG_SPARC -- /* Disable relaxed ordering */ -- for (i = 0; i < hw->mac.max_tx_queues; i++) { -- u32 regval; -- -- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); -- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; -- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); -- } -- -- for (i = 0; i < hw->mac.max_rx_queues; i++) { -- u32 regval; -- -- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); -- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | -- IXGBE_DCA_RXCTRL_HEAD_WRO_EN); -- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); -- } --#endif - return 0; - } - -@@ -390,6 +456,10 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) - status = hw->mac.ops.start_hw(hw); - } - -+ /* Initialize the LED link active for LED blink support */ -+ if (hw->mac.ops.init_led_link_act) -+ hw->mac.ops.init_led_link_act(hw); -+ - return status; - } - -@@ -773,22 +843,100 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) - } - - /** -+ * ixgbe_init_led_link_act_generic - Store the LED index link/activity. -+ * @hw: pointer to hardware structure -+ * -+ * Store the index for the link active LED. This will be used to support -+ * blinking the LED. -+ **/ -+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ u32 led_reg, led_mode; -+ u16 i; -+ -+ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); -+ -+ /* Get LED link active from the LEDCTL register */ -+ for (i = 0; i < 4; i++) { -+ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); -+ -+ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == -+ IXGBE_LED_LINK_ACTIVE) { -+ mac->led_link_act = i; -+ return 0; -+ } -+ } -+ -+ /* If LEDCTL register does not have the LED link active set, then use -+ * known MAC defaults. -+ */ -+ switch (hw->mac.type) { -+ case ixgbe_mac_x550em_a: -+ mac->led_link_act = 0; -+ break; -+ case ixgbe_mac_X550EM_x: -+ mac->led_link_act = 1; -+ break; -+ default: -+ mac->led_link_act = 2; -+ } -+ -+ return 0; -+} -+ -+/** - * ixgbe_led_on_generic - Turns on the software controllable LEDs. - * @hw: pointer to hardware structure - * @index: led number to turn on - **/ - s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) - { -- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); -- -- if (index > 3) -- return IXGBE_ERR_PARAM; -- -- /* To turn on the LED, set mode to ON. */ -- led_reg &= ~IXGBE_LED_MODE_MASK(index); -- led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); -- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); -- IXGBE_WRITE_FLUSH(hw); -+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); -+ s32 rc; -+ u16 regVal; -+ -+ /* following led behavior was modified by hilbert, -+ * to force led on through C22 MDI command. -+ */ -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* For M88E1512, to select page 3 in register 22 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, read from page 3, register 16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 16 with force led on */ -+ regVal = (regVal & 0xFF00) | 0x0099; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write page 22 back to default 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ } else { -+ if (index > 3) -+ return IXGBE_ERR_PARAM; -+ -+ /* To turn on the LED, set mode to ON. */ -+ led_reg &= ~IXGBE_LED_MODE_MASK(index); -+ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); -+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); -+ IXGBE_WRITE_FLUSH(hw); -+ } - - return 0; - } -@@ -801,15 +949,50 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) - s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) - { - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); -- -- if (index > 3) -- return IXGBE_ERR_PARAM; -- -- /* To turn off the LED, set mode to OFF. */ -- led_reg &= ~IXGBE_LED_MODE_MASK(index); -- led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); -- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); -- IXGBE_WRITE_FLUSH(hw); -+ s32 rc; -+ u16 regVal; -+ -+ /* following led behavior was modified by hilbert, -+ * to force led on through C22 MDI command. -+ */ -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* For M88E1512, to select page 3 in register 22 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, read from page 3, register 16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 16 with force led on */ -+ regVal = (regVal & 0xFF00) | 0x0088; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write page 22 back to default 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ } else { -+ if (index > 3) -+ return IXGBE_ERR_PARAM; -+ -+ /* To turn off the LED, set mode to OFF. */ -+ led_reg &= ~IXGBE_LED_MODE_MASK(index); -+ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); -+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); -+ IXGBE_WRITE_FLUSH(hw); -+ } - - return 0; - } -@@ -2127,7 +2310,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) - } - - /* Negotiate the fc mode to use */ -- ixgbe_fc_autoneg(hw); -+ hw->mac.ops.fc_autoneg(hw); - - /* Disable any previous flow control settings */ - mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); -@@ -2231,8 +2414,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) - * Find the intersection between advertised settings and link partner's - * advertised settings - **/ --static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, -- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) -+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, -+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) - { - if ((!(adv_reg)) || (!(lp_reg))) - return IXGBE_ERR_FC_NOT_NEGOTIATED; -@@ -3334,6 +3517,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - else - *speed = IXGBE_LINK_SPEED_100_FULL; - break; -+ case IXGBE_LINKS_SPEED_10_X550EM_A: -+ *speed = IXGBE_LINK_SPEED_UNKNOWN; -+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || -+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { -+ *speed = IXGBE_LINK_SPEED_10_FULL; -+ } -+ break; - default: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - } -@@ -3491,7 +3681,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, - rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; - for (; i < (num_pb / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); -- /* Fall through to configure remaining packet buffers */ -+ /* fall through - configure remaining packet buffers */ - case (PBA_STRATEGY_EQUAL): - /* Divide the remaining Rx packet buffer evenly among the TCs */ - rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; -@@ -3530,7 +3720,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, - * Calculates the checksum for some buffer on a specified length. The - * checksum calculated is returned. - **/ --static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) -+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) - { - u32 i; - u8 sum = 0; -@@ -3545,43 +3735,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) - } - - /** -- * ixgbe_host_interface_command - Issue command to manageability block -+ * ixgbe_hic_unlocked - Issue command to manageability block unlocked - * @hw: pointer to the HW structure -- * @buffer: contains the command to write and where the return status will -- * be placed -+ * @buffer: command to write and where the return status will be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion -- * @return_data: read and return data from the buffer (true) or not (false) -- * Needed because FW structures are big endian and decoding of -- * these fields can be 8 bit or 16 bit based on command. Decoding -- * is not easily understood without making a table of commands. -- * So we will leave this up to the caller to read back the data -- * in these cases. - * -- * Communicates with the manageability block. On success return 0 -- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. -+ * Communicates with the manageability block. On success return 0 -+ * else returns semaphore error when encountering an error acquiring -+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. -+ * -+ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held -+ * by the caller. - **/ --s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, -- u32 length, u32 timeout, -- bool return_data) -+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, -+ u32 timeout) - { -- u32 hdr_size = sizeof(struct ixgbe_hic_hdr); -- u32 hicr, i, bi, fwsts; -- u16 buf_len, dword_len; -- union { -- struct ixgbe_hic_hdr hdr; -- u32 u32arr[1]; -- } *bp = buffer; -- s32 status; -+ u32 hicr, i, fwsts; -+ u16 dword_len; - - if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } -- /* Take management host interface semaphore */ -- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); -- if (status) -- return status; - - /* Set bit 9 of FWSTS clearing FW reset indication */ - fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); -@@ -3591,15 +3767,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if (!(hicr & IXGBE_HICR_EN)) { - hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); -- status = IXGBE_ERR_HOST_INTERFACE_COMMAND; -- goto rel_out; -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } - - /* Calculate length in DWORDs. We must be DWORD aligned */ - if (length % sizeof(u32)) { - hw_dbg(hw, "Buffer length failure, not aligned to dword"); -- status = IXGBE_ERR_INVALID_ARGUMENT; -- goto rel_out; -+ return IXGBE_ERR_INVALID_ARGUMENT; - } - - dword_len = length >> 2; -@@ -3609,7 +3783,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - */ - for (i = 0; i < dword_len; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, -- i, cpu_to_le32(bp->u32arr[i])); -+ i, cpu_to_le32(buffer[i])); - - /* Setting this bit tells the ARC that a new command is pending. */ - IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); -@@ -3623,11 +3797,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - - /* Check command successful completion. */ - if ((timeout && i == timeout) || -- !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { -- hw_dbg(hw, "Command has failed with no status valid.\n"); -- status = IXGBE_ERR_HOST_INTERFACE_COMMAND; -- goto rel_out; -+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; -+ -+ return 0; -+} -+ -+/** -+ * ixgbe_host_interface_command - Issue command to manageability block -+ * @hw: pointer to the HW structure -+ * @buffer: contains the command to write and where the return status will -+ * be placed -+ * @length: length of buffer, must be multiple of 4 bytes -+ * @timeout: time in ms to wait for command completion -+ * @return_data: read and return data from the buffer (true) or not (false) -+ * Needed because FW structures are big endian and decoding of -+ * these fields can be 8 bit or 16 bit based on command. Decoding -+ * is not easily understood without making a table of commands. -+ * So we will leave this up to the caller to read back the data -+ * in these cases. -+ * -+ * Communicates with the manageability block. On success return 0 -+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. -+ **/ -+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, -+ u32 length, u32 timeout, -+ bool return_data) -+{ -+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr); -+ union { -+ struct ixgbe_hic_hdr hdr; -+ u32 u32arr[1]; -+ } *bp = buffer; -+ u16 buf_len, dword_len; -+ s32 status; -+ u32 bi; -+ -+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { -+ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } -+ /* Take management host interface semaphore */ -+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); -+ if (status) -+ return status; -+ -+ status = ixgbe_hic_unlocked(hw, buffer, length, timeout); -+ if (status) -+ goto rel_out; - - if (!return_data) - goto rel_out; -@@ -3674,6 +3891,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - * @min: driver version minor number - * @build: driver version build number - * @sub: driver version sub build number -+ * @len: length of driver_ver string -+ * @driver_ver: driver string - * - * Sends driver version number to firmware through the manageability - * block. On success return 0 -@@ -3681,7 +3900,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - **/ - s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, -- u8 build, u8 sub) -+ u8 build, u8 sub, __always_unused u16 len, -+ __always_unused const char *driver_ver) - { - struct ixgbe_hic_drv_info fw_cmd; - int i; -@@ -4033,15 +4253,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - speedcnt++; - highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; - -- /* If we already have link at this speed, just jump out */ -- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, -- false); -- if (status) -- return status; -- -- if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up) -- goto out; -- - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: -@@ -4093,15 +4304,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) - highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; - -- /* If we already have link at this speed, just jump out */ -- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, -- false); -- if (status) -- return status; -- -- if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up) -- goto out; -- - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: -@@ -4208,4 +4410,23 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, - hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); - return; - } -+ -+ /* Set RS1 */ -+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, -+ IXGBE_I2C_EEPROM_DEV_ADDR2, -+ &eeprom_data); -+ if (status) { -+ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); -+ return; -+ } -+ -+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; -+ -+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, -+ IXGBE_I2C_EEPROM_DEV_ADDR2, -+ eeprom_data); -+ if (status) { -+ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); -+ return; -+ } - } -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h -index 6d4c260..e083732 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h -@@ -49,6 +49,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); - - s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); - s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); -+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); - - s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); - s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); -@@ -110,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); - void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); - s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); - s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, -- u8 build, u8 ver); -+ u8 build, u8 ver, u16 len, const char *str); -+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); - s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, - u32 timeout, bool return_data); -+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); -+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, -+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]); - void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); - bool ixgbe_mng_present(struct ixgbe_hw *hw); - bool ixgbe_mng_enabled(struct ixgbe_hw *hw); -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c -index a137e06..6b23b74 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c -@@ -172,6 +172,7 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) - case IXGBE_DEV_ID_82598_BX: - case IXGBE_DEV_ID_82599_KR: - case IXGBE_DEV_ID_X550EM_X_KR: -+ case IXGBE_DEV_ID_X550EM_X_XFI: - return SUPPORTED_10000baseKR_Full; - default: - return SUPPORTED_10000baseKX4_Full | -@@ -237,6 +238,7 @@ static int ixgbe_get_settings(struct net_device *netdev, - case ixgbe_phy_tn: - case ixgbe_phy_aq: - case ixgbe_phy_x550em_ext_t: -+ case ixgbe_phy_fw: - case ixgbe_phy_cu_unknown: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; -@@ -394,6 +396,9 @@ static int ixgbe_set_settings(struct net_device *netdev, - if (ecmd->advertising & ADVERTISED_100baseT_Full) - advertised |= IXGBE_LINK_SPEED_100_FULL; - -+ if (ecmd->advertising & ADVERTISED_10baseT_Full) -+ advertised |= IXGBE_LINK_SPEED_10_FULL; -+ - if (old == advertised) - return err; - /* this sets the link speed and restarts auto-neg */ -@@ -491,6 +496,59 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) - { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -+ -+ /* 2018/11/14 pega-julia modified start */ -+ /* Purpose : Add for light OOB LED static. */ -+ -+ struct ixgbe_hw *hw = &adapter->hw; -+ u16 regVal; -+ s32 rc; -+ -+ /* For M88E1512, write 3 in (page 0,register 22)[Page Address Register] to goto page 3 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ -+ /* For M88E1512, read from (page 3, register 16)[LED Function Control Register] */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ /*hw_err(hw, "[Pega Debug] : current register value = 0x%x\n", regVal);*/ -+ if (rc) -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ -+ if (data == 0) /* Turn off OOB LED. */ -+ { -+ /* For M88E1512, write to (page 3, register 16) with force led off */ -+ regVal = (regVal & 0xFF00) | 0x0088; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ else if (data == 1) /* Turn on OOB LED. */ -+ { -+ /* For M88E1512, write to (page 3, register 16) with force led on */ -+ regVal = (regVal & 0xFF00) | 0x0099; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ else /* Switch OOB LED back to normal. */ -+ { -+ /* For M88E1512, set led back to nornmal in (page 3, register 16). */ -+ regVal = (regVal & 0xFF00) | 0x0017; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write 0 in (page 0, register 22) to back to page 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ -+ /* 2018/11/14 pega-julia modified end */ - } - - static int ixgbe_get_regs_len(struct net_device *netdev) -@@ -2219,22 +2277,61 @@ static int ixgbe_set_phys_id(struct net_device *netdev, - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - -+ /* Modified by hilbert for C22 MDI directly access */ -+ s32 rc; -+ u16 regVal; -+ /* Modified by hilbert done */ -+ -+ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) -+ return -EOPNOTSUPP; -+ - switch (state) { - case ETHTOOL_ID_ACTIVE: - adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - return 2; - - case ETHTOOL_ID_ON: -- hw->mac.ops.led_on(hw, hw->bus.func); -+ hw->mac.ops.led_on(hw, hw->mac.led_link_act); - break; - - case ETHTOOL_ID_OFF: -- hw->mac.ops.led_off(hw, hw->bus.func); -+ hw->mac.ops.led_off(hw, hw->mac.led_link_act); - break; - - case ETHTOOL_ID_INACTIVE: - /* Restore LED settings */ -- IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); -+ /* Modified by hilbert for C22 MDI directly access */ -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* For M88E1512, to select page 3 in register 22 */ -+ regVal = 0x03; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, read from page 3, register 16 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); -+ if (rc) { -+ hw_err(hw, "led function control register read failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write to page 3 register 16 with force led on */ -+ regVal = (regVal & 0xFF00) | 0x0017; -+ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "led function control register write failed, rc:%x\n", rc); -+ } -+ -+ /* For M88E1512, write page 22 back to default 0 */ -+ regVal = 0x00; -+ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); -+ if (rc) { -+ hw_err(hw, "page register write failed, rc:%x\n", rc); -+ } -+ } else { -+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); -+ } - break; - } - -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c -index a5428b6..66753f1 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c -@@ -84,7 +84,9 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_X540] = &ixgbe_X540_info, - [board_X550] = &ixgbe_X550_info, - [board_X550EM_x] = &ixgbe_X550EM_x_info, -+ [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, - [board_x550em_a] = &ixgbe_x550em_a_info, -+ [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, - }; - - /* ixgbe_pci_tbl - PCI Device ID Table -@@ -129,9 +131,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, -+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, -+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, -@@ -139,6 +143,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, -+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, -+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, - /* required last entry */ - {0, } - }; -@@ -179,6 +185,7 @@ MODULE_VERSION(DRV_VERSION); - static struct workqueue_struct *ixgbe_wq; - - static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); -+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); - - static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, - u32 reg, u16 *value) -@@ -374,7 +381,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) - if (ixgbe_removed(reg_addr)) - return IXGBE_FAILED_READ_REG; - if (unlikely(hw->phy.nw_mng_if_sel & -- IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { -+ IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { - struct ixgbe_adapter *adapter; - int i; - -@@ -2446,6 +2453,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) - { - struct ixgbe_hw *hw = &adapter->hw; - u32 eicr = adapter->interrupt_event; -+ s32 rc; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; -@@ -2484,6 +2492,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) - return; - - break; -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: -+ rc = hw->phy.ops.check_overtemp(hw); -+ if (rc != IXGBE_ERR_OVERTEMP) -+ return; -+ break; - default: - if (adapter->hw.mac.type >= ixgbe_mac_X540) - return; -@@ -2530,6 +2544,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) - return; - } - return; -+ case ixgbe_mac_x550em_a: -+ if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { -+ adapter->interrupt_event = eicr; -+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; -+ ixgbe_service_event_schedule(adapter); -+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, -+ IXGBE_EICR_GPI_SDP0_X550EM_a); -+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, -+ IXGBE_EICR_GPI_SDP0_X550EM_a); -+ } -+ return; -+ case ixgbe_mac_X550: - case ixgbe_mac_X540: - if (!(eicr & IXGBE_EICR_TS)) - return; -@@ -5035,7 +5061,7 @@ static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) - static void ixgbe_configure(struct ixgbe_adapter *adapter) - { - struct ixgbe_hw *hw = &adapter->hw; -- -+ - ixgbe_configure_pb(adapter); - #ifdef CONFIG_IXGBE_DCB - ixgbe_configure_dcb(adapter); -@@ -5045,10 +5071,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) - * the VLVF registers will not be populated - */ - ixgbe_configure_virtualization(adapter); -- - ixgbe_set_rx_mode(adapter->netdev); - ixgbe_restore_vlan(adapter); -- -+ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: -@@ -5075,7 +5100,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) - default: - break; - } -- - #ifdef CONFIG_IXGBE_DCA - /* configure DCA */ - if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) -@@ -5291,6 +5315,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) - - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); -+ if (adapter->hw.phy.type == ixgbe_phy_fw) -+ ixgbe_watchdog_link_is_down(adapter); - ixgbe_down(adapter); - /* - * If SR-IOV enabled then wait a bit before bringing the adapter -@@ -5706,6 +5732,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) - break; - case ixgbe_mac_x550em_a: - adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; -+ switch (hw->device_id) { -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: -+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; -+ break; -+ default: -+ break; -+ } - /* fall through */ - case ixgbe_mac_X550EM_x: - #ifdef CONFIG_IXGBE_DCB -@@ -5719,6 +5753,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) - #endif /* IXGBE_FCOE */ - /* Fall Through */ - case ixgbe_mac_X550: -+ if (hw->mac.type == ixgbe_mac_X550) -+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - #ifdef CONFIG_IXGBE_DCA - adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; - #endif -@@ -6093,29 +6129,28 @@ int ixgbe_open(struct net_device *netdev) - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int err, queues; -- -+ - /* disallow open during test */ - if (test_bit(__IXGBE_TESTING, &adapter->state)) - return -EBUSY; -- -+ - netif_carrier_off(netdev); -- -+ - /* allocate transmit descriptors */ - err = ixgbe_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; -- -+ - /* allocate receive descriptors */ - err = ixgbe_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; -- -+ - ixgbe_configure(adapter); -- - err = ixgbe_request_irq(adapter); - if (err) - goto err_req_irq; -- -+ - /* Notify the stack of the actual queue counts. */ - if (adapter->num_rx_pools > 1) - queues = adapter->num_rx_queues_per_pool; -@@ -6791,6 +6826,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) - case IXGBE_LINK_SPEED_100_FULL: - speed_str = "100 Mbps"; - break; -+ case IXGBE_LINK_SPEED_10_FULL: -+ speed_str = "10 Mbps"; -+ break; - default: - speed_str = "unknown speed"; - break; -@@ -8013,6 +8051,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) - return ixgbe_ptp_set_ts_config(adapter, req); - case SIOCGHWTSTAMP: - return ixgbe_ptp_get_ts_config(adapter, req); -+ case SIOCGMIIPHY: -+ if (!adapter->hw.phy.ops.read_reg) -+ return -EOPNOTSUPP; -+ /* fall through */ - default: - return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); - } -@@ -9480,6 +9522,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) - hw->mac.ops = *ii->mac_ops; - hw->mac.type = ii->mac; - hw->mvals = ii->mvals; -+ if (ii->link_ops) -+ hw->link.ops = *ii->link_ops; - - /* EEPROM */ - hw->eeprom.ops = *ii->eeprom_ops; -@@ -9747,7 +9791,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) - "representative who provided you with this " - "hardware.\n"); - } -- strcpy(netdev->name, "eth%d"); -+ -+ /*2019/04/11, change OOB from eth2 to eth0, for pegatron fn-6524-dn-f, Peter5_Lin*/ -+ if(!strcmp("0000:04:00.0", pci_name(pdev))) -+ strcpy(netdev->name, "eth0"); -+ else if(!strcmp("0000:04:00.1", pci_name(pdev))) -+ strcpy(netdev->name, "eth1"); -+ else if(!strcmp("0000:03:00.0", pci_name(pdev))) -+ strcpy(netdev->name, "eth2"); -+ else if(!strcmp("0000:03:00.1", pci_name(pdev))) -+ strcpy(netdev->name, "eth3"); -+ - err = register_netdev(netdev); - if (err) - goto err_register; -@@ -9777,8 +9831,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) - * since os does not support feature - */ - if (hw->mac.ops.set_fw_drv_ver) -- hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, -- 0xFF); -+ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, -+ sizeof(ixgbe_driver_version) - 1, -+ ixgbe_driver_version); - - /* add san mac addr to netdev */ - ixgbe_add_sanmac_netdev(netdev); -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c -index b17464e..d914b40 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c -@@ -109,8 +109,8 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) - * - * Returns an error code on error. - */ --static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val, bool lock) -+s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, -+ u16 reg, u16 *val, bool lock) - { - u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 3; -@@ -178,36 +178,6 @@ static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, - } - - /** -- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation -- * @hw: pointer to the hardware structure -- * @addr: I2C bus address to read from -- * @reg: I2C device register to read from -- * @val: pointer to location to receive read value -- * -- * Returns an error code on error. -- */ --s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val) --{ -- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); --} -- --/** -- * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined -- * @hw: pointer to the hardware structure -- * @addr: I2C bus address to read from -- * @reg: I2C device register to read from -- * @val: pointer to location to receive read value -- * -- * Returns an error code on error. -- */ --s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val) --{ -- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); --} -- --/** - * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to -@@ -217,8 +187,8 @@ s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, - * - * Returns an error code on error. - */ --static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 val, bool lock) -+s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, -+ u16 reg, u16 val, bool lock) - { - u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 1; -@@ -273,33 +243,41 @@ static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, - } - - /** -- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation -- * @hw: pointer to the hardware structure -- * @addr: I2C bus address to write to -- * @reg: I2C device register to write to -- * @val: value to write -+ * ixgbe_probe_phy - Probe a single address for a PHY -+ * @hw: pointer to hardware structure -+ * @phy_addr: PHY address to probe - * -- * Returns an error code on error. -- */ --s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, -- u8 addr, u16 reg, u16 val) -+ * Returns true if PHY found -+ **/ -+static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) - { -- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); --} -+ u16 ext_ability = 0; - --/** -- * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined -- * @hw: pointer to the hardware structure -- * @addr: I2C bus address to write to -- * @reg: I2C device register to write to -- * @val: value to write -- * -- * Returns an error code on error. -- */ --s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, -- u8 addr, u16 reg, u16 val) --{ -- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); -+ hw->phy.mdio.prtad = phy_addr; -+ if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0) { -+ return false; -+ } -+ -+ if (ixgbe_get_phy_id(hw)) { -+ return false; -+ } -+ -+ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); -+ -+ if (hw->phy.type == ixgbe_phy_unknown) { -+ hw->phy.ops.read_reg(hw, -+ MDIO_PMA_EXTABLE, -+ MDIO_MMD_PMAPMD, -+ &ext_ability); -+ if (ext_ability & -+ (MDIO_PMA_EXTABLE_10GBT | -+ MDIO_PMA_EXTABLE_1000BT)) -+ hw->phy.type = ixgbe_phy_cu_unknown; -+ else -+ hw->phy.type = ixgbe_phy_generic; -+ } -+ -+ return true; - } - - /** -@@ -311,7 +289,7 @@ s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, - s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) - { - u32 phy_addr; -- u16 ext_ability = 0; -+ u32 status = IXGBE_ERR_PHY_ADDR_INVALID; - - if (!hw->phy.phy_semaphore_mask) { - if (hw->bus.lan_id) -@@ -320,37 +298,34 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - } - -- if (hw->phy.type == ixgbe_phy_unknown) { -- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { -- hw->phy.mdio.prtad = phy_addr; -- if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { -- ixgbe_get_phy_id(hw); -- hw->phy.type = -- ixgbe_get_phy_type_from_id(hw->phy.id); -- -- if (hw->phy.type == ixgbe_phy_unknown) { -- hw->phy.ops.read_reg(hw, -- MDIO_PMA_EXTABLE, -- MDIO_MMD_PMAPMD, -- &ext_ability); -- if (ext_ability & -- (MDIO_PMA_EXTABLE_10GBT | -- MDIO_PMA_EXTABLE_1000BT)) -- hw->phy.type = -- ixgbe_phy_cu_unknown; -- else -- hw->phy.type = -- ixgbe_phy_generic; -- } -+ if (hw->phy.type != ixgbe_phy_unknown) -+ return 0; - -- return 0; -- } -+ if (hw->phy.nw_mng_if_sel) { -+ phy_addr = (hw->phy.nw_mng_if_sel & -+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> -+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; -+ if (ixgbe_probe_phy(hw, phy_addr)) -+ return 0; -+ else -+ return IXGBE_ERR_PHY_ADDR_INVALID; -+ } -+ -+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { -+ if (ixgbe_probe_phy(hw, phy_addr)) { -+ status = 0; -+ break; - } -- /* indicate no PHY found */ -- hw->phy.mdio.prtad = MDIO_PRTAD_NONE; -- return IXGBE_ERR_PHY_ADDR_INVALID; - } -- return 0; -+ -+ /* Certain media types do not have a phy so an address will not -+ * be found and the code will take this path. Caller has to -+ * decide if it is an error or not. -+ */ -+ if (status) -+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; -+ -+ return status; - } - - /** -@@ -416,7 +391,8 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) - case TN1010_PHY_ID: - phy_type = ixgbe_phy_tn; - break; -- case X550_PHY_ID: -+ case X550_PHY_ID2: -+ case X550_PHY_ID3: - case X540_PHY_ID: - phy_type = ixgbe_phy_aq; - break; -@@ -427,6 +403,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) - phy_type = ixgbe_phy_nl; - break; - case X557_PHY_ID: -+ case X557_PHY_ID2: - phy_type = ixgbe_phy_x550em_ext_t; - break; - default: -@@ -477,11 +454,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) - */ - for (i = 0; i < 30; i++) { - msleep(100); -- hw->phy.ops.read_reg(hw, MDIO_CTRL1, -- MDIO_MMD_PHYXS, &ctrl); -- if (!(ctrl & MDIO_CTRL1_RESET)) { -- udelay(2); -- break; -+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { -+ status = hw->phy.ops.read_reg(hw, -+ IXGBE_MDIO_TX_VENDOR_ALARMS_3, -+ MDIO_MMD_PMAPMD, &ctrl); -+ if (status) -+ return status; -+ -+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { -+ udelay(2); -+ break; -+ } -+ } else { -+ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, -+ MDIO_MMD_PHYXS, &ctrl); -+ if (status) -+ return status; -+ -+ if (!(ctrl & MDIO_CTRL1_RESET)) { -+ udelay(2); -+ break; -+ } - } - } - -@@ -494,6 +487,98 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) - } - - /** -+ * ixgbe_read_phy_mdio - Reads a value from a specified PHY register without -+ * the SWFW lock. This Clasue 22 API is patched by Hilbert -+ * @hw: pointer to hardware structure -+ * @reg_addr: 32 bit address of PHY register to read -+ * @phy_data: Pointer to read data from PHY register -+ **/ -+s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, -+ u16 *phy_data) -+{ -+ u32 i, data, command; -+ -+ /* Setup and write the read command */ -+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | -+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | -+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | -+ IXGBE_MSCA_MDI_COMMAND; -+ -+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); -+ -+ /* Check every 10 usec to see if the address cycle completed. -+ * The MDI Command bit will clear when the operation is -+ * complete -+ */ -+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { -+ udelay(10); -+ -+ command = IXGBE_READ_REG(hw, IXGBE_MSCA); -+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) -+ break; -+ } -+ -+ -+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { -+ hw_dbg(hw, "PHY address command did not complete.\n"); -+ return IXGBE_ERR_PHY; -+ } -+ -+ /* Read operation is complete. Get the data -+ * from MSRWD -+ */ -+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); -+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT; -+ *phy_data = (u16)(data); -+ -+ return 0; -+} -+ -+/** -+ * ixgbe_write_phy_reg_mdio - Writes a value to specified PHY register -+ * without SWFW lock. This Clause 22 API is patched by Hilbert -+ * @hw: pointer to hardware structure -+ * @reg_addr: 32 bit PHY register to write -+ * @device_type: 5 bit device type -+ * @phy_data: Data to write to the PHY register -+ **/ -+s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, -+ u32 device_type, u16 phy_data) -+{ -+ u32 i, command; -+ -+ /* Put the data in the MDI single read and write data register*/ -+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); -+ -+ /* Setup and write the write command */ -+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | -+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | -+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | -+ IXGBE_MSCA_MDI_COMMAND; -+ -+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); -+ -+ /* -+ * Check every 10 usec to see if the address cycle completed. -+ * The MDI Command bit will clear when the operation is -+ * complete -+ */ -+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { -+ udelay(10); -+ -+ command = IXGBE_READ_REG(hw, IXGBE_MSCA); -+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) -+ break; -+ } -+ -+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { -+ hw_dbg(hw, "PHY write cmd didn't complete\n"); -+ return IXGBE_ERR_PHY; -+ } -+ -+ return 0; -+} -+/** - * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without - * the SWFW lock - * @hw: pointer to hardware structure -@@ -705,53 +790,52 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) - - ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); - -- if (speed & IXGBE_LINK_SPEED_10GB_FULL) { -- /* Set or unset auto-negotiation 10G advertisement */ -- hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, -- MDIO_MMD_AN, -- &autoneg_reg); -+ /* Set or unset auto-negotiation 10G advertisement */ -+ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); - -- autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) -- autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; -+ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; -+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && -+ (speed & IXGBE_LINK_SPEED_10GB_FULL)) -+ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; - -- hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, -- MDIO_MMD_AN, -- autoneg_reg); -- } -- -- if (speed & IXGBE_LINK_SPEED_1GB_FULL) { -- /* Set or unset auto-negotiation 1G advertisement */ -- hw->phy.ops.read_reg(hw, -- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -- MDIO_MMD_AN, -- &autoneg_reg); -+ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); - -- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) -- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; -+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -+ MDIO_MMD_AN, &autoneg_reg); - -- hw->phy.ops.write_reg(hw, -- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -- MDIO_MMD_AN, -- autoneg_reg); -+ if (hw->mac.type == ixgbe_mac_X550) { -+ /* Set or unset auto-negotiation 5G advertisement */ -+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; -+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && -+ (speed & IXGBE_LINK_SPEED_5GB_FULL)) -+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; -+ -+ /* Set or unset auto-negotiation 2.5G advertisement */ -+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; -+ if ((hw->phy.autoneg_advertised & -+ IXGBE_LINK_SPEED_2_5GB_FULL) && -+ (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) -+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; - } - -- if (speed & IXGBE_LINK_SPEED_100_FULL) { -- /* Set or unset auto-negotiation 100M advertisement */ -- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, -- MDIO_MMD_AN, -- &autoneg_reg); -+ /* Set or unset auto-negotiation 1G advertisement */ -+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; -+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && -+ (speed & IXGBE_LINK_SPEED_1GB_FULL)) -+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; - -- autoneg_reg &= ~(ADVERTISE_100FULL | -- ADVERTISE_100HALF); -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) -- autoneg_reg |= ADVERTISE_100FULL; -+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -+ MDIO_MMD_AN, autoneg_reg); - -- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, -- MDIO_MMD_AN, -- autoneg_reg); -- } -+ /* Set or unset auto-negotiation 100M advertisement */ -+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); -+ -+ autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); -+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && -+ (speed & IXGBE_LINK_SPEED_100_FULL)) -+ autoneg_reg |= ADVERTISE_100FULL; -+ -+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); - - /* Blocked by MNG FW so don't reset PHY */ - if (ixgbe_check_reset_blocked(hw)) -@@ -778,9 +862,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) - { -- -- /* -- * Clear autoneg_advertised and set new values based on input link -+ /* Clear autoneg_advertised and set new values based on input link - * speed. - */ - hw->phy.autoneg_advertised = 0; -@@ -788,14 +870,24 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - -+ if (speed & IXGBE_LINK_SPEED_5GB_FULL) -+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; -+ -+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) -+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; -+ - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (speed & IXGBE_LINK_SPEED_100_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; - -+ if (speed & IXGBE_LINK_SPEED_10_FULL) -+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; -+ - /* Setup link based on the new speed settings */ -- hw->phy.ops.setup_link(hw); -+ if (hw->phy.ops.setup_link) -+ hw->phy.ops.setup_link(hw); - - return 0; - } -@@ -830,6 +922,7 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; - break; - case ixgbe_mac_X550EM_x: -+ case ixgbe_mac_x550em_a: - hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; - break; - default: -@@ -986,40 +1079,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) - } - - /** -- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version -- * @hw: pointer to hardware structure -- * @firmware_version: pointer to the PHY Firmware Version -- **/ --s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, -- u16 *firmware_version) --{ -- s32 status; -- -- status = hw->phy.ops.read_reg(hw, TNX_FW_REV, -- MDIO_MMD_VEND1, -- firmware_version); -- -- return status; --} -- --/** -- * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version -- * @hw: pointer to hardware structure -- * @firmware_version: pointer to the PHY Firmware Version -- **/ --s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, -- u16 *firmware_version) --{ -- s32 status; -- -- status = hw->phy.ops.read_reg(hw, AQ_FW_REV, -- MDIO_MMD_VEND1, -- firmware_version); -- -- return status; --} -- --/** - * ixgbe_reset_phy_nl - Performs a PHY reset - * @hw: pointer to hardware structure - **/ -@@ -2398,9 +2457,7 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) - if (!on && ixgbe_mng_present(hw)) - return 0; - -- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -- ®); -+ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®); - if (status) - return status; - -@@ -2412,8 +2469,6 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) - reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; - } - -- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -- reg); -+ status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg); - return status; - } -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h -index cc735ec..e9f94ee 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h -@@ -84,8 +84,9 @@ - #define IXGBE_CS4227_GLOBAL_ID_LSB 0 - #define IXGBE_CS4227_GLOBAL_ID_MSB 1 - #define IXGBE_CS4227_SCRATCH 2 --#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */ --#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */ -+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F -+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ -+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ - #define IXGBE_CS4227_RESET_PENDING 0x1357 - #define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 - #define IXGBE_CS4227_RETRIES 15 -@@ -154,6 +155,12 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); - s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); -+#if 1 //by hilbert -+s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, -+ u32 device_type, u16 *phy_data); -+s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, -+ u32 device_type, u16 phy_data); -+#endif - s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); - s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - ixgbe_link_speed speed, -@@ -168,10 +175,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up); - s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); --s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, -- u16 *firmware_version); --s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, -- u16 *firmware_version); - - s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); - s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); -@@ -195,12 +198,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data); - s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 eeprom_data); --s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val); --s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 *val); --s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 val); --s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, -- u16 reg, u16 val); -+s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, -+ u16 *val, bool lock); -+s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, -+ u16 val, bool lock); - #endif /* _IXGBE_PHY_H_ */ -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -index 31d82e3..531990b 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -@@ -85,6 +85,7 @@ - #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC - #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD - #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE -+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 - #define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 - #define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 - #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 -@@ -92,6 +93,8 @@ - #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 - #define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 - #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE -+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 -+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 - - /* VF Device IDs */ - #define IXGBE_DEV_ID_82599_VF 0x10ED -@@ -1393,8 +1396,10 @@ struct ixgbe_thermal_sensor_data { - #define TN1010_PHY_ID 0x00A19410 - #define TNX_FW_REV 0xB - #define X540_PHY_ID 0x01540200 --#define X550_PHY_ID 0x01540220 -+#define X550_PHY_ID2 0x01540223 -+#define X550_PHY_ID3 0x01540221 - #define X557_PHY_ID 0x01540240 -+#define X557_PHY_ID2 0x01540250 - #define QT2022_PHY_ID 0x0043A400 - #define ATH_PHY_ID 0x03429050 - #define AQ_FW_REV 0x20 -@@ -1513,6 +1518,8 @@ enum { - #define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) - - /* VMOLR bitmasks */ -+#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ -+#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ - #define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ - #define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ - #define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ -@@ -1928,6 +1935,7 @@ enum { - #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 - #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 - #define IXGBE_LINKS_SPEED_100_82599 0x10000000 -+#define IXGBE_LINKS_SPEED_10_X550EM_A 0 - #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ - #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ - -@@ -2633,6 +2641,7 @@ enum ixgbe_fdir_pballoc_type { - #define FW_CEM_UNUSED_VER 0x0 - #define FW_CEM_MAX_RETRIES 3 - #define FW_CEM_RESP_STATUS_SUCCESS 0x1 -+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ - #define FW_READ_SHADOW_RAM_CMD 0x31 - #define FW_READ_SHADOW_RAM_LEN 0x6 - #define FW_WRITE_SHADOW_RAM_CMD 0x33 -@@ -2658,6 +2667,59 @@ enum ixgbe_fdir_pballoc_type { - #define FW_INT_PHY_REQ_LEN 10 - #define FW_INT_PHY_REQ_READ 0 - #define FW_INT_PHY_REQ_WRITE 1 -+#define FW_PHY_ACT_REQ_CMD 5 -+#define FW_PHY_ACT_DATA_COUNT 4 -+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) -+#define FW_PHY_ACT_INIT_PHY 1 -+#define FW_PHY_ACT_SETUP_LINK 2 -+#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) -+#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) -+#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) -+#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) -+#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) -+#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) -+#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) -+#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) -+#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) -+#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) -+#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ -+ HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u -+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u -+#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) -+#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) -+#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) -+#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) -+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) -+#define FW_PHY_ACT_GET_LINK_INFO 3 -+#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) -+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) -+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) -+#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) -+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) -+#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) -+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) -+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) -+#define FW_PHY_ACT_FORCE_LINK_DOWN 4 -+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) -+#define FW_PHY_ACT_PHY_SW_RESET 5 -+#define FW_PHY_ACT_PHY_HW_RESET 6 -+#define FW_PHY_ACT_GET_PHY_INFO 7 -+#define FW_PHY_ACT_UD_2 0x1002 -+#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) -+#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) -+#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) -+#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) -+#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) -+#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) -+#define FW_PHY_ACT_RETRIES 50 -+#define FW_PHY_INFO_SPEED_MASK 0xFFFu -+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u -+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu - - /* Host Interface Command Structures */ - struct ixgbe_hic_hdr { -@@ -2700,6 +2762,16 @@ struct ixgbe_hic_drv_info { - u16 pad2; /* end spacing to ensure length is mult. of dword2 */ - }; - -+struct ixgbe_hic_drv_info2 { -+ struct ixgbe_hic_hdr hdr; -+ u8 port_num; -+ u8 ver_sub; -+ u8 ver_build; -+ u8 ver_min; -+ u8 ver_maj; -+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; -+}; -+ - /* These need to be dword aligned */ - struct ixgbe_hic_read_shadow_ram { - union ixgbe_hic_hdr2 hdr; -@@ -2748,6 +2820,19 @@ struct ixgbe_hic_internal_phy_resp { - __be32 read_data; - }; - -+struct ixgbe_hic_phy_activity_req { -+ struct ixgbe_hic_hdr hdr; -+ u8 port_number; -+ u8 pad; -+ __le16 activity_id; -+ __be32 data[FW_PHY_ACT_DATA_COUNT]; -+}; -+ -+struct ixgbe_hic_phy_activity_resp { -+ struct ixgbe_hic_hdr hdr; -+ __be32 data[FW_PHY_ACT_DATA_COUNT]; -+}; -+ - /* Transmit Descriptor - Advanced */ - union ixgbe_adv_tx_desc { - struct { -@@ -2863,6 +2948,7 @@ typedef u32 ixgbe_autoneg_advertised; - /* Link speed */ - typedef u32 ixgbe_link_speed; - #define IXGBE_LINK_SPEED_UNKNOWN 0 -+#define IXGBE_LINK_SPEED_10_FULL 0x0002 - #define IXGBE_LINK_SPEED_100_FULL 0x0008 - #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 - #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 -@@ -3059,7 +3145,9 @@ enum ixgbe_phy_type { - ixgbe_phy_aq, - ixgbe_phy_x550em_kr, - ixgbe_phy_x550em_kx4, -+ ixgbe_phy_x550em_xfi, - ixgbe_phy_x550em_ext_t, -+ ixgbe_phy_ext_1g_t, - ixgbe_phy_cu_unknown, - ixgbe_phy_qt, - ixgbe_phy_xaui, -@@ -3078,6 +3166,7 @@ enum ixgbe_phy_type { - ixgbe_phy_qsfp_unknown, - ixgbe_phy_sfp_unsupported, - ixgbe_phy_sgmii, -+ ixgbe_phy_fw, - ixgbe_phy_generic - }; - -@@ -3352,6 +3441,7 @@ struct ixgbe_mac_operations { - s32 (*led_off)(struct ixgbe_hw *, u32); - s32 (*blink_led_start)(struct ixgbe_hw *, u32); - s32 (*blink_led_stop)(struct ixgbe_hw *, u32); -+ s32 (*init_led_link_act)(struct ixgbe_hw *); - - /* RAR, Multicast, VLAN */ - s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); -@@ -3372,9 +3462,11 @@ struct ixgbe_mac_operations { - /* Flow Control */ - s32 (*fc_enable)(struct ixgbe_hw *); - s32 (*setup_fc)(struct ixgbe_hw *); -+ void (*fc_autoneg)(struct ixgbe_hw *); - - /* Manageability interface */ -- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); -+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, -+ const char *); - s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); - s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); - void (*disable_rx)(struct ixgbe_hw *hw); -@@ -3416,10 +3508,24 @@ struct ixgbe_phy_operations { - s32 (*set_phy_power)(struct ixgbe_hw *, bool on); - s32 (*enter_lplu)(struct ixgbe_hw *); - s32 (*handle_lasi)(struct ixgbe_hw *hw); -- s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, -- u16 *value); -- s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, -- u16 value); -+ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, -+ u8 *value); -+ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, -+ u8 value); -+}; -+ -+struct ixgbe_link_operations { -+ s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); -+ s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, -+ u16 *val); -+ s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); -+ s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, -+ u16 val); -+}; -+ -+struct ixgbe_link_info { -+ struct ixgbe_link_operations ops; -+ u8 addr; - }; - - struct ixgbe_eeprom_info { -@@ -3462,6 +3568,7 @@ struct ixgbe_mac_info { - u8 san_mac_rar_index; - struct ixgbe_thermal_sensor_data thermal_sensor_data; - bool set_lben; -+ u8 led_link_act; - }; - - struct ixgbe_phy_info { -@@ -3477,6 +3584,8 @@ struct ixgbe_phy_info { - bool reset_disable; - ixgbe_autoneg_advertised autoneg_advertised; - ixgbe_link_speed speeds_supported; -+ ixgbe_link_speed eee_speeds_supported; -+ ixgbe_link_speed eee_speeds_advertised; - enum ixgbe_smart_speed smart_speed; - bool smart_speed_active; - bool multispeed_fiber; -@@ -3523,6 +3632,7 @@ struct ixgbe_hw { - struct ixgbe_addr_filter_info addr_ctrl; - struct ixgbe_fc_info fc; - struct ixgbe_phy_info phy; -+ struct ixgbe_link_info link; - struct ixgbe_eeprom_info eeprom; - struct ixgbe_bus_info bus; - struct ixgbe_mbx_info mbx; -@@ -3546,6 +3656,7 @@ struct ixgbe_info { - const struct ixgbe_eeprom_operations *eeprom_ops; - const struct ixgbe_phy_operations *phy_ops; - const struct ixgbe_mbx_operations *mbx_ops; -+ const struct ixgbe_link_operations *link_ops; - const u32 *mvals; - }; - -@@ -3593,17 +3704,35 @@ struct ixgbe_info { - #define IXGBE_FUSES0_REV_MASK (3u << 6) - - #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) -+#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) - #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) - #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) - #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) - #define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) -+#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) - #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) - #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) - #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) - #define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) - #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) - #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) - -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) -+#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31) -+ - #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) - #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) - -@@ -3618,6 +3747,7 @@ struct ixgbe_info { - #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) - #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) - #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) -+#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28) - #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) - #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) - -@@ -3627,6 +3757,8 @@ struct ixgbe_info { - #define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) - #define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) - -+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10) -+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11) - #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) - #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) - -@@ -3675,8 +3807,13 @@ struct ixgbe_info { - - #define IXGBE_NW_MNG_IF_SEL 0x00011178 - #define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) --#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) --#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) -+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) -+#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) -+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ - #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 - #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ - (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c -index f2b1d48..6ea0d6a 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c -@@ -95,6 +95,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) - { - s32 status; - u32 ctrl, i; -+ u32 swfw_mask = hw->phy.phy_semaphore_mask; - - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); -@@ -105,10 +106,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) - ixgbe_clear_tx_pending(hw); - - mac_reset_top: -+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); -+ if (status) { -+ hw_dbg(hw, "semaphore failed with %d", status); -+ return IXGBE_ERR_SWFW_SYNC; -+ } -+ - ctrl = IXGBE_CTRL_RST; - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); -+ hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(1000, 1200); - - /* Poll for reset bit to self-clear indicating reset is complete */ -@@ -780,8 +788,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) - ixgbe_link_speed speed; - bool link_up; - -- /* -- * Link should be up in order for the blink bit in the LED control -+ if (index > 3) -+ return IXGBE_ERR_PARAM; -+ -+ /* Link should be up in order for the blink bit in the LED control - * register to work. Force link and speed in the MAC if link is down. - * This will be reversed when we stop the blinking. - */ -@@ -814,6 +824,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) - u32 macc_reg; - u32 ledctl_reg; - -+ if (index > 3) -+ return IXGBE_ERR_PARAM; -+ - /* Restore the LED to its default value. */ - ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); -@@ -851,6 +864,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { - .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_X540, - .blink_led_stop = &ixgbe_blink_led_stop_X540, - .set_rar = &ixgbe_set_rar_generic, -@@ -866,6 +880,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = { - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .setup_fc = ixgbe_setup_fc_generic, -+ .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = NULL, -@@ -911,7 +926,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = { - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, - .set_phy_power = &ixgbe_set_copper_phy_power, -- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, - }; - - static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c -index 77a60aa..3236248 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c -@@ -28,11 +28,15 @@ - - static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); - static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); -+static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); -+static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); -+static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); - - static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) - { - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; -+ struct ixgbe_link_info *link = &hw->link; - - /* Start with X540 invariants, since so simular */ - ixgbe_get_invariants_X540(hw); -@@ -40,6 +44,46 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) - if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) - phy->ops.set_phy_power = NULL; - -+ link->addr = IXGBE_CS4227; -+ -+ return 0; -+} -+ -+static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_phy_info *phy = &hw->phy; -+ -+ /* Start with X540 invariants, since so similar */ -+ ixgbe_get_invariants_X540(hw); -+ -+ phy->ops.set_phy_power = NULL; -+ -+ return 0; -+} -+ -+static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ struct ixgbe_phy_info *phy = &hw->phy; -+ -+ /* Start with X540 invariants, since so simular */ -+ ixgbe_get_invariants_X540(hw); -+ -+ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) -+ phy->ops.set_phy_power = NULL; -+ -+ return 0; -+} -+ -+static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_phy_info *phy = &hw->phy; -+ -+ /* Start with X540 invariants, since so similar */ -+ ixgbe_get_invariants_X540(hw); -+ -+ phy->ops.set_phy_power = NULL; -+ - return 0; - } - -@@ -69,8 +113,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) - */ - static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) - { -- return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, -- value); -+ return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); - } - - /** -@@ -83,8 +126,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) - */ - static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) - { -- return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, -- value); -+ return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); - } - - /** -@@ -290,6 +332,9 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) - case IXGBE_DEV_ID_X550EM_X_KX4: - hw->phy.type = ixgbe_phy_x550em_kx4; - break; -+ case IXGBE_DEV_ID_X550EM_X_XFI: -+ hw->phy.type = ixgbe_phy_x550em_xfi; -+ break; - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: -@@ -301,9 +346,21 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) - else - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - /* Fallthrough */ -- case IXGBE_DEV_ID_X550EM_X_1G_T: - case IXGBE_DEV_ID_X550EM_X_10G_T: - return ixgbe_identify_phy_generic(hw); -+ case IXGBE_DEV_ID_X550EM_X_1G_T: -+ hw->phy.type = ixgbe_phy_ext_1g_t; -+ break; -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: -+ hw->phy.type = ixgbe_phy_fw; -+ hw->phy.ops.read_reg = NULL; -+ hw->phy.ops.write_reg = NULL; -+ if (hw->bus.lan_id) -+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; -+ else -+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; -+ break; - default: - break; - } -@@ -322,6 +379,280 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, - return IXGBE_NOT_IMPLEMENTED; - } - -+/** -+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation -+ * @hw: pointer to the hardware structure -+ * @addr: I2C bus address to read from -+ * @reg: I2C device register to read from -+ * @val: pointer to location to receive read value -+ * -+ * Returns an error code on error. -+ **/ -+static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, -+ u16 reg, u16 *val) -+{ -+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); -+} -+ -+/** -+ * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation -+ * @hw: pointer to the hardware structure -+ * @addr: I2C bus address to read from -+ * @reg: I2C device register to read from -+ * @val: pointer to location to receive read value -+ * -+ * Returns an error code on error. -+ **/ -+static s32 -+ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, -+ u16 reg, u16 *val) -+{ -+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); -+} -+ -+/** -+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation -+ * @hw: pointer to the hardware structure -+ * @addr: I2C bus address to write to -+ * @reg: I2C device register to write to -+ * @val: value to write -+ * -+ * Returns an error code on error. -+ **/ -+static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, -+ u8 addr, u16 reg, u16 val) -+{ -+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); -+} -+ -+/** -+ * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation -+ * @hw: pointer to the hardware structure -+ * @addr: I2C bus address to write to -+ * @reg: I2C device register to write to -+ * @val: value to write -+ * -+ * Returns an error code on error. -+ **/ -+static s32 -+ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, -+ u8 addr, u16 reg, u16 val) -+{ -+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); -+} -+ -+/** -+ * ixgbe_fw_phy_activity - Perform an activity on a PHY -+ * @hw: pointer to hardware structure -+ * @activity: activity to perform -+ * @data: Pointer to 4 32-bit words of data -+ */ -+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, -+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]) -+{ -+ union { -+ struct ixgbe_hic_phy_activity_req cmd; -+ struct ixgbe_hic_phy_activity_resp rsp; -+ } hic; -+ u16 retries = FW_PHY_ACT_RETRIES; -+ s32 rc; -+ u32 i; -+ -+ do { -+ memset(&hic, 0, sizeof(hic)); -+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; -+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; -+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; -+ hic.cmd.port_number = hw->bus.lan_id; -+ hic.cmd.activity_id = cpu_to_le16(activity); -+ for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) -+ hic.cmd.data[i] = cpu_to_be32((*data)[i]); -+ -+ rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), -+ IXGBE_HI_COMMAND_TIMEOUT, -+ true); -+ if (rc) -+ return rc; -+ if (hic.rsp.hdr.cmd_or_resp.ret_status == -+ FW_CEM_RESP_STATUS_SUCCESS) { -+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) -+ (*data)[i] = be32_to_cpu(hic.rsp.data[i]); -+ return 0; -+ } -+ usleep_range(20, 30); -+ --retries; -+ } while (retries > 0); -+ -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; -+} -+ -+static const struct { -+ u16 fw_speed; -+ ixgbe_link_speed phy_speed; -+} ixgbe_fw_map[] = { -+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, -+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, -+}; -+ -+/** -+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command -+ * @hw: pointer to hardware structure -+ * -+ * Returns error code -+ */ -+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) -+{ -+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ u16 phy_speeds; -+ u16 phy_id_lo; -+ s32 rc; -+ u16 i; -+ -+ if (hw->phy.id) -+ return 0; -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); -+ if (rc) -+ return rc; -+ -+ hw->phy.speeds_supported = 0; -+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; -+ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { -+ if (phy_speeds & ixgbe_fw_map[i].fw_speed) -+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; -+ } -+ -+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; -+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; -+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; -+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; -+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) -+ return IXGBE_ERR_PHY_ADDR_INVALID; -+ -+ hw->phy.autoneg_advertised = hw->phy.speeds_supported; -+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | -+ IXGBE_LINK_SPEED_1GB_FULL; -+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; -+ return 0; -+} -+ -+static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, -+ u32 device_type, u16 *phy_data); -+/** -+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command -+ * @hw: pointer to hardware structure -+ * -+ * Returns error code -+ */ -+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) -+{ -+ s32 rc; -+ u16 value=0; -+ -+ if (hw->bus.lan_id) -+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; -+ else -+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; -+ -+#if 0 /* Try also to get PHY ID through MDIO by using C22 in read_reg op. -+ * By hilbert -+ */ -+ rc = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &value); -+ hw_err(hw, "####rc:%x, PHY ID-1:%x\n", rc, value); -+#endif -+ -+ hw->phy.type = ixgbe_phy_fw; -+#if 0 /* We still need read/write ops later, don't NULL it. By hilbert */ -+ hw->phy.ops.read_reg = NULL; -+ hw->phy.ops.write_reg = NULL; -+#endif -+ return ixgbe_get_phy_id_fw(hw); -+} -+ -+/** -+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY -+ * @hw: pointer to hardware structure -+ * -+ * Returns error code -+ */ -+static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) -+{ -+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ -+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; -+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); -+} -+ -+/** -+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs -+ * @hw: pointer to hardware structure -+ */ -+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) -+{ -+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ s32 rc; -+ u16 i; -+ -+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) -+ return 0; -+ -+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { -+ hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); -+ return IXGBE_ERR_INVALID_LINK_SETTINGS; -+ } -+ -+ switch (hw->fc.requested_mode) { -+ case ixgbe_fc_full: -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << -+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; -+ break; -+ case ixgbe_fc_rx_pause: -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << -+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; -+ break; -+ case ixgbe_fc_tx_pause: -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << -+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; -+ break; -+ default: -+ break; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { -+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) -+ setup[0] |= ixgbe_fw_map[i].fw_speed; -+ } -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; -+ -+ if (hw->phy.eee_speeds_advertised) -+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); -+ if (rc) -+ return rc; -+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) -+ return IXGBE_ERR_OVERTEMP; -+ return 0; -+} -+ -+/** -+ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs -+ * @hw: pointer to hardware structure -+ * -+ * Called at init time to set up flow control. -+ */ -+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) -+{ -+ if (hw->fc.requested_mode == ixgbe_fc_default) -+ hw->fc.requested_mode = ixgbe_fc_full; -+ -+ return ixgbe_setup_fw_link(hw); -+} -+ - /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params - * @hw: pointer to hardware structure - * -@@ -544,41 +875,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - return status; - } - --/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface -- * command assuming that the semaphore is already obtained. -- * @hw: pointer to hardware structure -- * @offset: offset of word in the EEPROM to read -- * @data: word read from the EEPROM -- * -- * Reads a 16 bit word from the EEPROM using the hostif. -- **/ --static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, -- u16 *data) --{ -- s32 status; -- struct ixgbe_hic_read_shadow_ram buffer; -- -- buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; -- buffer.hdr.req.buf_lenh = 0; -- buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; -- buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; -- -- /* convert offset from words to bytes */ -- buffer.address = cpu_to_be32(offset * 2); -- /* one word */ -- buffer.length = cpu_to_be16(sizeof(u16)); -- -- status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), -- IXGBE_HI_COMMAND_TIMEOUT, false); -- if (status) -- return status; -- -- *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, -- FW_NVM_DATA_OFFSET); -- -- return 0; --} -- - /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read -@@ -590,6 +886,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) - { -+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; - struct ixgbe_hic_read_shadow_ram buffer; - u32 current_word = 0; - u16 words_to_read; -@@ -597,7 +894,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u32 i; - - /* Take semaphore for the entire operation. */ -- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); -+ status = hw->mac.ops.acquire_swfw_sync(hw, mask); - if (status) { - hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); - return status; -@@ -620,10 +917,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - buffer.pad2 = 0; - buffer.pad3 = 0; - -- status = ixgbe_host_interface_command(hw, &buffer, -- sizeof(buffer), -- IXGBE_HI_COMMAND_TIMEOUT, -- false); -+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), -+ IXGBE_HI_COMMAND_TIMEOUT); - if (status) { - hw_dbg(hw, "Host interface command failed\n"); - goto out; -@@ -647,7 +942,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - } - - out: -- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); -+ hw->mac.ops.release_swfw_sync(hw, mask); - return status; - } - -@@ -818,15 +1113,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) - **/ - static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) - { -- s32 status = 0; -+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; -+ struct ixgbe_hic_read_shadow_ram buffer; -+ s32 status; - -- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { -- status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); -- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); -- } else { -- status = IXGBE_ERR_SWFW_SYNC; -+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; -+ buffer.hdr.req.buf_lenh = 0; -+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; -+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; -+ -+ /* convert offset from words to bytes */ -+ buffer.address = cpu_to_be32(offset * 2); -+ /* one word */ -+ buffer.length = cpu_to_be16(sizeof(u16)); -+ -+ status = hw->mac.ops.acquire_swfw_sync(hw, mask); -+ if (status) -+ return status; -+ -+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), -+ IXGBE_HI_COMMAND_TIMEOUT); -+ if (!status) { -+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, -+ FW_NVM_DATA_OFFSET); - } - -+ hw->mac.ops.release_swfw_sync(hw, mask); - return status; - } - -@@ -1130,47 +1442,17 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - return ret; - } - --/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. -+/** -+ * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration - * @hw: pointer to hardware structure -- * @speed: the link speed to force - * -- * Configures the integrated KR PHY to use iXFI mode. Used to connect an -- * internal and external PHY at a specific speed, without autonegotiation. -+ * iXfI configuration needed for ixgbe_mac_X550EM_x devices. - **/ --static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -+static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) - { - s32 status; - u32 reg_val; - -- /* Disable AN and force speed to 10G Serial. */ -- status = ixgbe_read_iosf_sb_reg_x550(hw, -- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); -- if (status) -- return status; -- -- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; -- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; -- -- /* Select forced link speed for internal PHY. */ -- switch (*speed) { -- case IXGBE_LINK_SPEED_10GB_FULL: -- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; -- break; -- case IXGBE_LINK_SPEED_1GB_FULL: -- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; -- break; -- default: -- /* Other link speeds are not supported by internal KR PHY. */ -- return IXGBE_ERR_LINK_SETUP; -- } -- -- status = ixgbe_write_iosf_sb_reg_x550(hw, -- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -- if (status) -- return status; -- - /* Disable training protocol FSM. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), -@@ -1230,20 +1512,111 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -- if (status) -- return status; -+ return status; -+} - -- /* Toggle port SW reset by AN reset. */ -- status = ixgbe_read_iosf_sb_reg_x550(hw, -+/** -+ * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the -+ * internal PHY -+ * @hw: pointer to hardware structure -+ **/ -+static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) -+{ -+ s32 status; -+ u32 link_ctrl; -+ -+ /* Restart auto-negotiation. */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-negotiation did not complete\n"); -+ return status; -+ } -+ -+ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; -+ status = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); -+ -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ u32 flx_mask_st20; -+ -+ /* Indicate to FW that AN restart has been asserted */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-negotiation did not complete\n"); -+ return status; -+ } -+ -+ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; -+ status = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); -+ } -+ -+ return status; -+} -+ -+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. -+ * @hw: pointer to hardware structure -+ * @speed: the link speed to force -+ * -+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an -+ * internal and external PHY at a specific speed, without autonegotiation. -+ **/ -+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ s32 status; -+ u32 reg_val; -+ -+ /* iXFI is only supported with X552 */ -+ if (mac->type != ixgbe_mac_X550EM_x) -+ return IXGBE_ERR_LINK_SETUP; -+ -+ /* Disable AN and force speed to 10G Serial. */ -+ status = ixgbe_read_iosf_sb_reg_x550(hw, -+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; - -- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; -+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; -+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; -+ -+ /* Select forced link speed for internal PHY. */ -+ switch (*speed) { -+ case IXGBE_LINK_SPEED_10GB_FULL: -+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; -+ break; -+ case IXGBE_LINK_SPEED_1GB_FULL: -+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; -+ break; -+ default: -+ /* Other link speeds are not supported by internal KR PHY. */ -+ return IXGBE_ERR_LINK_SETUP; -+ } -+ - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -+ if (status) -+ return status; -+ -+ /* Additional configuration needed for x550em_x */ -+ if (hw->mac.type == ixgbe_mac_X550EM_x) { -+ status = ixgbe_setup_ixfi_x550em_x(hw); -+ if (status) -+ return status; -+ } -+ -+ /* Toggle port SW reset by AN reset. */ -+ status = ixgbe_restart_an_internal_phy_x550em(hw); - - return status; - } -@@ -1294,7 +1667,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, - __always_unused bool autoneg_wait_to_complete) - { - s32 status; -- u16 slice, value; -+ u16 reg_slice, reg_val; - bool setup_linear = false; - - /* Check if SFP module is supported and linear */ -@@ -1310,71 +1683,68 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, - if (status) - return status; - -- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { -- /* Configure CS4227 LINE side to 10G SR. */ -- slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); -- value = IXGBE_CS4227_SPEED_10G; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -+ /* Configure internal PHY for KR/KX. */ -+ ixgbe_setup_kr_speed_x550em(hw, speed); - -- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); -- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -- -- /* Configure CS4227 for HOST connection rate then type. */ -- slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); -- value = speed & IXGBE_LINK_SPEED_10GB_FULL ? -- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -+ /* Configure CS4227 LINE side to proper mode. */ -+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); -+ if (setup_linear) -+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; -+ else -+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; - -- slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); -- if (setup_linear) -- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; -- else -- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -+ status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, -+ reg_val); - -- /* Setup XFI internal link. */ -- status = ixgbe_setup_ixfi_x550em(hw, &speed); -- if (status) { -- hw_dbg(hw, "setup_ixfi failed with %d\n", status); -- return status; -- } -- } else { -- /* Configure internal PHY for KR/KX. */ -- status = ixgbe_setup_kr_speed_x550em(hw, speed); -- if (status) { -- hw_dbg(hw, "setup_kr_speed failed with %d\n", status); -- return status; -- } -+ return status; -+} - -- /* Configure CS4227 LINE side to proper mode. */ -- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); -- if (setup_linear) -- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; -- else -- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; -- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, -- slice, value); -- if (status) -- goto i2c_err; -+/** -+ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode -+ * @hw: pointer to hardware structure -+ * @speed: the link speed to force -+ * -+ * Configures the integrated PHY for native SFI mode. Used to connect the -+ * internal PHY directly to an SFP cage, without autonegotiation. -+ **/ -+static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ s32 status; -+ u32 reg_val; -+ -+ /* Disable all AN and force speed to 10G Serial. */ -+ status = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); -+ if (status) -+ return status; -+ -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; -+ -+ /* Select forced link speed for internal PHY. */ -+ switch (*speed) { -+ case IXGBE_LINK_SPEED_10GB_FULL: -+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; -+ break; -+ case IXGBE_LINK_SPEED_1GB_FULL: -+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; -+ break; -+ default: -+ /* Other link speeds are not supported by internal PHY. */ -+ return IXGBE_ERR_LINK_SETUP; - } - -- return 0; -+ status = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -+ -+ /* Toggle port SW reset by AN reset. */ -+ status = ixgbe_restart_an_internal_phy_x550em(hw); - --i2c_err: -- hw_dbg(hw, "combined i2c access failed with %d\n", status); - return status; - } - -@@ -1390,45 +1760,39 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, - { - bool setup_linear = false; - u32 reg_phy_int; -- s32 rc; -+ s32 ret_val; - - /* Check if SFP module is supported and linear */ -- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); -+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - - /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. - */ -- if (rc == IXGBE_ERR_SFP_NOT_PRESENT) -+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; - -- if (!rc) -- return rc; -+ if (ret_val) -+ return ret_val; - -- /* Configure internal PHY for native SFI */ -- rc = hw->mac.ops.read_iosf_sb_reg(hw, -- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, -- ®_phy_int); -- if (rc) -- return rc; -+ /* Configure internal PHY for native SFI based on module type */ -+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); -+ if (ret_val) -+ return ret_val; - -- if (setup_linear) { -- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING; -- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR; -- } else { -- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING; -- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR; -- } -+ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; -+ if (!setup_linear) -+ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; - -- rc = hw->mac.ops.write_iosf_sb_reg(hw, -- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, -- reg_phy_int); -- if (rc) -- return rc; -+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); -+ if (ret_val) -+ return ret_val; - -- /* Setup XFI/SFI internal link */ -- return ixgbe_setup_ixfi_x550em(hw, &speed); -+ /* Setup SFI internal link. */ -+ return ixgbe_setup_sfi_x550a(hw, &speed); - } - - /** -@@ -1444,19 +1808,19 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, - u32 reg_slice, slice_offset; - bool setup_linear = false; - u16 reg_phy_ext; -- s32 rc; -+ s32 ret_val; - - /* Check if SFP module is supported and linear */ -- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); -+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - - /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. - */ -- if (rc == IXGBE_ERR_SFP_NOT_PRESENT) -+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; - -- if (!rc) -- return rc; -+ if (ret_val) -+ return ret_val; - - /* Configure internal PHY for KR/KX. */ - ixgbe_setup_kr_speed_x550em(hw, speed); -@@ -1464,16 +1828,16 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, - if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) - return IXGBE_ERR_PHY_ADDR_INVALID; - -- /* Get external PHY device id */ -- rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB, -- IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); -- if (rc) -- return rc; -+ /* Get external PHY SKU id */ -+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, -+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); -+ if (ret_val) -+ return ret_val; - - /* When configuring quad port CS4223, the MAC instance is part - * of the slice offset. - */ -- if (reg_phy_ext == IXGBE_CS4223_PHY_ID) -+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID) - slice_offset = (hw->bus.lan_id + - (hw->bus.instance_id << 1)) << 12; - else -@@ -1481,12 +1845,28 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, - - /* Configure CS4227/CS4223 LINE side to proper mode. */ - reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; -+ -+ ret_val = hw->phy.ops.read_reg(hw, reg_slice, -+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); -+ if (ret_val) -+ return ret_val; -+ -+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | -+ (IXGBE_CS4227_EDC_MODE_SR << 1)); -+ - if (setup_linear) - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; - else - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; -- return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, -- reg_phy_ext); -+ -+ ret_val = hw->phy.ops.write_reg(hw, reg_slice, -+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); -+ if (ret_val) -+ return ret_val; -+ -+ /* Flush previous write with a read */ -+ return hw->phy.ops.read_reg(hw, reg_slice, -+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - } - - /** -@@ -1515,8 +1895,10 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, - else - force_speed = IXGBE_LINK_SPEED_1GB_FULL; - -- /* If internal link mode is XFI, then setup XFI internal link. */ -- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { -+ /* If X552 and internal link mode is XFI, then setup XFI internal link. -+ */ -+ if (hw->mac.type == ixgbe_mac_X550EM_x && -+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - status = ixgbe_setup_ixfi_x550em(hw, &force_speed); - - if (status) -@@ -1540,7 +1922,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, - bool link_up_wait_to_complete) - { - u32 status; -- u16 autoneg_status; -+ u16 i, autoneg_status; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; -@@ -1552,14 +1934,18 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, - if (status || !(*link_up)) - return status; - -- /* MAC link is up, so check external PHY link. -- * Read this twice back to back to indicate current status. -- */ -- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -- &autoneg_status); -- if (status) -- return status; -+ /* MAC link is up, so check external PHY link. -+ * Link status is latching low, and can only be used to detect link -+ * drop, and not the current status of the link without performing -+ * back-to-back reads. -+ */ -+ for (i = 0; i < 2; i++) { -+ status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, -+ &autoneg_status); -+ -+ if (status) -+ return status; -+ } - - /* If external PHY link is not up, then indicate link not up */ - if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) -@@ -1577,7 +1963,7 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) - { - struct ixgbe_mac_info *mac = &hw->mac; -- u32 lval, sval; -+ u32 lval, sval, flx_val; - s32 rc; - - rc = mac->ops.read_iosf_sb_reg(hw, -@@ -1611,12 +1997,183 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, - if (rc) - return rc; - -- lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); -+ if (rc) -+ return rc; -+ -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); -+ if (rc) -+ return rc; -+ -+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; -+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; -+ -+ rc = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); -+ if (rc) -+ return rc; -+ -+ rc = ixgbe_restart_an_internal_phy_x550em(hw); -+ return rc; -+} -+ -+/** -+ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs -+ * @hw: pointer to hardware structure -+ */ -+static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, -+ bool autoneg_wait) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ u32 lval, sval, flx_val; -+ s32 rc; -+ -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); -+ if (rc) -+ return rc; -+ -+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; -+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; -+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; -+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; -+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; -+ rc = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval); -+ if (rc) -+ return rc; -+ -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); -+ if (rc) -+ return rc; -+ -+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; -+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; -+ rc = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval); -+ if (rc) -+ return rc; -+ - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, lval); -+ if (rc) -+ return rc; - -- return rc; -+ rc = mac->ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); -+ if (rc) -+ return rc; -+ -+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; -+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; -+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; -+ -+ rc = mac->ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); -+ if (rc) -+ return rc; -+ -+ ixgbe_restart_an_internal_phy_x550em(hw); -+ -+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); -+} -+ -+/** -+ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 -+ * @hw: pointer to hardware structure -+ * -+ * Enable flow control according to IEEE clause 37. -+ */ -+static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) -+{ -+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; -+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ ixgbe_link_speed speed; -+ bool link_up; -+ -+ /* AN should have completed when the cable was plugged in. -+ * Look for reasons to bail out. Bail out if: -+ * - FC autoneg is disabled, or if -+ * - link is not up. -+ */ -+ if (hw->fc.disable_fc_autoneg) -+ goto out; -+ -+ hw->mac.ops.check_link(hw, &speed, &link_up, false); -+ if (!link_up) -+ goto out; -+ -+ /* Check if auto-negotiation has completed */ -+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); -+ if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { -+ status = IXGBE_ERR_FC_NOT_NEGOTIATED; -+ goto out; -+ } -+ -+ /* Negotiate the flow control */ -+ status = ixgbe_negotiate_fc(hw, info[0], info[0], -+ FW_PHY_ACT_GET_LINK_INFO_FC_RX, -+ FW_PHY_ACT_GET_LINK_INFO_FC_TX, -+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, -+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); -+ -+out: -+ if (!status) { -+ hw->fc.fc_was_autonegged = true; -+ } else { -+ hw->fc.fc_was_autonegged = false; -+ hw->fc.current_mode = hw->fc.requested_mode; -+ } -+} -+ -+/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers -+ * @hw: pointer to hardware structure -+ **/ -+static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) -+{ -+ struct ixgbe_mac_info *mac = &hw->mac; -+ -+ switch (mac->ops.get_media_type(hw)) { -+ case ixgbe_media_type_fiber: -+ mac->ops.setup_fc = NULL; -+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; -+ break; -+ case ixgbe_media_type_copper: -+ if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && -+ hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { -+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; -+ break; -+ } -+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; -+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw; -+ mac->ops.setup_link = ixgbe_setup_sgmii_fw; -+ mac->ops.check_link = ixgbe_check_mac_link_generic; -+ break; -+ case ixgbe_media_type_backplane: -+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; -+ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; -+ break; -+ default: -+ break; -+ } - } - - /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers -@@ -1654,10 +2211,12 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) - ixgbe_set_soft_rate_select_speed; - break; - case ixgbe_media_type_copper: -+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) -+ break; - mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; - mac->ops.setup_fc = ixgbe_setup_fc_generic; - mac->ops.check_link = ixgbe_check_link_t_X550em; -- return; -+ break; - case ixgbe_media_type_backplane: - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || - hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) -@@ -1666,6 +2225,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) - default: - break; - } -+ -+ /* Additional modification for X550em_a devices */ -+ if (hw->mac.type == ixgbe_mac_x550em_a) -+ ixgbe_init_mac_link_ops_X550em_a(hw); - } - - /** ixgbe_setup_sfp_modules_X550em - Setup SFP module -@@ -1696,6 +2259,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) - { -+ if (hw->phy.type == ixgbe_phy_fw) { -+ *autoneg = true; -+ *speed = hw->phy.speeds_supported; -+ return 0; -+ } -+ - /* SFP */ - if (hw->phy.media_type == ixgbe_media_type_fiber) { - /* CS4227 SFP must not enable auto-negotiation */ -@@ -1714,8 +2283,39 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - else - *speed = IXGBE_LINK_SPEED_10GB_FULL; - } else { -- *speed = IXGBE_LINK_SPEED_10GB_FULL | -- IXGBE_LINK_SPEED_1GB_FULL; -+ switch (hw->phy.type) { -+ case ixgbe_phy_x550em_kx4: -+ *speed = IXGBE_LINK_SPEED_1GB_FULL | -+ IXGBE_LINK_SPEED_2_5GB_FULL | -+ IXGBE_LINK_SPEED_10GB_FULL; -+ break; -+ case ixgbe_phy_x550em_xfi: -+ *speed = IXGBE_LINK_SPEED_1GB_FULL | -+ IXGBE_LINK_SPEED_10GB_FULL; -+ break; -+ case ixgbe_phy_ext_1g_t: -+ case ixgbe_phy_sgmii: -+ *speed = IXGBE_LINK_SPEED_1GB_FULL; -+ break; -+ case ixgbe_phy_x550em_kr: -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* check different backplane modes */ -+ if (hw->phy.nw_mng_if_sel & -+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { -+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL; -+ break; -+ } else if (hw->device_id == -+ IXGBE_DEV_ID_X550EM_A_KR_L) { -+ *speed = IXGBE_LINK_SPEED_1GB_FULL; -+ break; -+ } -+ } -+ /* fall through */ -+ default: -+ *speed = IXGBE_LINK_SPEED_10GB_FULL | -+ IXGBE_LINK_SPEED_1GB_FULL; -+ break; -+ } - *autoneg = true; - } - return 0; -@@ -1742,7 +2342,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - - /* Vendor alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - - if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) -@@ -1750,7 +2350,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - - /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - - if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | -@@ -1759,7 +2359,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - - /* Global alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - - if (status) -@@ -1774,7 +2374,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { - /* device fault alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -1789,14 +2389,14 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) - - /* Vendor alarm 2 triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); -+ MDIO_MMD_AN, ®); - - if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) - return status; - - /* link connect/disconnect event occurred */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); -+ MDIO_MMD_AN, ®); - - if (status) - return status; -@@ -1827,21 +2427,34 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); - - /* Enable link status change alarm */ -- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); -- if (status) -- return status; - -- reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; -+ /* Enable the LASI interrupts on X552 devices to receive notifications -+ * of the link configurations of the external PHY and correspondingly -+ * support the configuration of the internal iXFI link, since iXFI does -+ * not support auto-negotiation. This is not required for X553 devices -+ * having KR support, which performs auto-negotiations and which is used -+ * as the internal link to the external PHY. Hence adding a check here -+ * to avoid enabling LASI interrupts for X553 devices. -+ */ -+ if (hw->mac.type != ixgbe_mac_x550em_a) { -+ status = hw->phy.ops.read_reg(hw, -+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, -+ MDIO_MMD_AN, ®); -+ if (status) -+ return status; - -- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); -- if (status) -- return status; -+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; -+ -+ status = hw->phy.ops.write_reg(hw, -+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, -+ MDIO_MMD_AN, reg); -+ if (status) -+ return status; -+ } - - /* Enable high temperature failure and global fault alarms */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -1850,14 +2463,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) - IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - reg); - if (status) - return status; - - /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -1866,14 +2479,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) - IXGBE_MDIO_GLOBAL_ALARM_1_INT); - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - reg); - if (status) - return status; - - /* Enable chip-wide vendor alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -1881,7 +2494,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) - reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - reg); - - return status; -@@ -1945,51 +2558,31 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; - -- /* Restart auto-negotiation. */ -- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - -- return status; --} -- --/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. -- * @hw: pointer to hardware structure -- * -- * Configures the integrated KX4 PHY. -- **/ --static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) --{ -- s32 status; -- u32 reg_val; -- -- status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, -- IXGBE_SB_IOSF_TARGET_KX4_PCS0 + -- hw->bus.lan_id, ®_val); -- if (status) -- return status; -- -- reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | -- IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); -- -- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; -+ if (hw->mac.type == ixgbe_mac_x550em_a) { -+ /* Set lane mode to KR auto negotiation */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - -- /* Advertise 10G support. */ -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) -- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; -+ if (status) -+ return status; - -- /* Advertise 1G support. */ -- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) -- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; -+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; -+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; -+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - -- /* Restart auto-negotiation. */ -- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; -- status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, -- IXGBE_SB_IOSF_TARGET_KX4_PCS0 + -- hw->bus.lan_id, reg_val); -+ status = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); -+ } - -- return status; -+ return ixgbe_restart_an_internal_phy_x550em(hw); - } - - /** -@@ -2002,6 +2595,9 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) - return 0; - -+ if (ixgbe_check_reset_blocked(hw)) -+ return 0; -+ - return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); - } - -@@ -2019,14 +2615,12 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) - *link_up = false; - - /* read this twice back to back to indicate current status */ -- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - if (ret) - return ret; - -- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - if (ret) - return ret; -@@ -2057,7 +2651,8 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; - -- if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { -+ if (!(hw->mac.type == ixgbe_mac_X550EM_x && -+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - return ixgbe_setup_kr_speed_x550em(hw, speed); -@@ -2072,7 +2667,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) - return 0; - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &speed); - if (status) - return status; -@@ -2133,10 +2728,10 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) - - /* To turn on the LED, set mode to ON. */ - hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); -+ MDIO_MMD_VEND1, &phy_data); - phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; - hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); -+ MDIO_MMD_VEND1, phy_data); - - return 0; - } -@@ -2155,14 +2750,70 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) - - /* To turn on the LED, set mode to ON. */ - hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); -+ MDIO_MMD_VEND1, &phy_data); - phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; - hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); -+ MDIO_MMD_VEND1, phy_data); - - return 0; - } - -+/** -+ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware -+ * @hw: pointer to the HW structure -+ * @maj: driver version major number -+ * @min: driver version minor number -+ * @build: driver version build number -+ * @sub: driver version sub build number -+ * @len: length of driver_ver string -+ * @driver_ver: driver string -+ * -+ * Sends driver version number to firmware through the manageability -+ * block. On success return 0 -+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring -+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. -+ **/ -+static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, -+ u8 build, u8 sub, u16 len, -+ const char *driver_ver) -+{ -+ struct ixgbe_hic_drv_info2 fw_cmd; -+ s32 ret_val; -+ int i; -+ -+ if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) -+ return IXGBE_ERR_INVALID_ARGUMENT; -+ -+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; -+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; -+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; -+ fw_cmd.port_num = (u8)hw->bus.func; -+ fw_cmd.ver_maj = maj; -+ fw_cmd.ver_min = min; -+ fw_cmd.ver_build = build; -+ fw_cmd.ver_sub = sub; -+ fw_cmd.hdr.checksum = 0; -+ memcpy(fw_cmd.driver_string, driver_ver, len); -+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, -+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); -+ -+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { -+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, -+ sizeof(fw_cmd), -+ IXGBE_HI_COMMAND_TIMEOUT, -+ true); -+ if (ret_val) -+ continue; -+ -+ if (fw_cmd.hdr.cmd_or_resp.ret_status != -+ FW_CEM_RESP_STATUS_SUCCESS) -+ return IXGBE_ERR_HOST_INTERFACE_COMMAND; -+ return 0; -+ } -+ -+ return ret_val; -+} -+ - /** ixgbe_get_lcd_x550em - Determine lowest common denominator - * @hw: pointer to hardware structure - * @lcd_speed: pointer to lowest common link speed -@@ -2179,7 +2830,7 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, - *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; - - status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &an_lp_status); - if (status) - return status; -@@ -2208,7 +2859,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) - { - bool pause, asm_dir; - u32 reg_val; -- s32 rc; -+ s32 rc = 0; - - /* Validate the requested mode */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { -@@ -2251,33 +2902,122 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) - return IXGBE_ERR_CONFIG; - } - -- if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && -- hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && -- hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) -- return 0; -+ switch (hw->device_id) { -+ case IXGBE_DEV_ID_X550EM_X_KR: -+ case IXGBE_DEV_ID_X550EM_A_KR: -+ case IXGBE_DEV_ID_X550EM_A_KR_L: -+ rc = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, -+ ®_val); -+ if (rc) -+ return rc; -+ -+ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | -+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); -+ if (pause) -+ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; -+ if (asm_dir) -+ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; -+ rc = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, -+ reg_val); -+ -+ /* This device does not fully support AN. */ -+ hw->fc.disable_fc_autoneg = true; -+ break; -+ case IXGBE_DEV_ID_X550EM_X_XFI: -+ hw->fc.disable_fc_autoneg = true; -+ break; -+ default: -+ break; -+ } -+ return rc; -+} - -- rc = hw->mac.ops.read_iosf_sb_reg(hw, -- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, -- ®_val); -- if (rc) -- return rc; -+/** -+ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 -+ * @hw: pointer to hardware structure -+ **/ -+static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) -+{ -+ u32 link_s1, lp_an_page_low, an_cntl_1; -+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; -+ ixgbe_link_speed speed; -+ bool link_up; - -- reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | -- IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); -- if (pause) -- reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; -- if (asm_dir) -- reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; -- rc = hw->mac.ops.write_iosf_sb_reg(hw, -- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -- IXGBE_SB_IOSF_TARGET_KR_PHY, -- reg_val); -+ /* AN should have completed when the cable was plugged in. -+ * Look for reasons to bail out. Bail out if: -+ * - FC autoneg is disabled, or if -+ * - link is not up. -+ */ -+ if (hw->fc.disable_fc_autoneg) { -+ hw_err(hw, "Flow control autoneg is disabled"); -+ goto out; -+ } - -- /* This device does not fully support AN. */ -- hw->fc.disable_fc_autoneg = true; -+ hw->mac.ops.check_link(hw, &speed, &link_up, false); -+ if (!link_up) { -+ hw_err(hw, "The link is down"); -+ goto out; -+ } - -- return rc; -+ /* Check at auto-negotiation has completed */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_LINK_S1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); -+ -+ if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { -+ hw_dbg(hw, "Auto-Negotiation did not complete\n"); -+ status = IXGBE_ERR_FC_NOT_NEGOTIATED; -+ goto out; -+ } -+ -+ /* Read the 10g AN autoc and LP ability registers and resolve -+ * local flow control settings accordingly -+ */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-Negotiation did not complete\n"); -+ goto out; -+ } -+ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-Negotiation did not complete\n"); -+ goto out; -+ } -+ -+ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, -+ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, -+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, -+ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, -+ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); -+ -+out: -+ if (!status) { -+ hw->fc.fc_was_autonegged = true; -+ } else { -+ hw->fc.fc_was_autonegged = false; -+ hw->fc.current_mode = hw->fc.requested_mode; -+ } -+} -+ -+/** -+ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings -+ * @hw: pointer to hardware structure -+ **/ -+static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) -+{ -+ hw->fc.fc_was_autonegged = false; -+ hw->fc.current_mode = hw->fc.requested_mode; - } - - /** ixgbe_enter_lplu_x550em - Transition to low power states -@@ -2326,7 +3066,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) - return ixgbe_set_copper_phy_power(hw, false); - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &speed); - if (status) - return status; -@@ -2348,20 +3088,20 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) - - /* Clear AN completed indication */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &autoneg_reg); - if (status) - return status; - -- status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, -+ MDIO_MMD_AN, - &an_10g_cntl_reg); - if (status) - return status; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, -- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, -+ MDIO_MMD_AN, - &autoneg_reg); - if (status) - return status; -@@ -2378,6 +3118,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) - } - - /** -+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs -+ * @hw: pointer to hardware structure -+ */ -+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) -+{ -+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ s32 rc; -+ -+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) -+ return 0; -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); -+ if (rc) -+ return rc; -+ memset(store, 0, sizeof(store)); -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); -+ if (rc) -+ return rc; -+ -+ return ixgbe_setup_fw_link(hw); -+} -+ -+/** -+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp -+ * @hw: pointer to hardware structure -+ */ -+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) -+{ -+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; -+ s32 rc; -+ -+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); -+ if (rc) -+ return rc; -+ -+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { -+ ixgbe_shutdown_fw_phy(hw); -+ return IXGBE_ERR_OVERTEMP; -+ } -+ return 0; -+} -+ -+/** - * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register - * @hw: pointer to hardware structure - * -@@ -2398,6 +3182,18 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) - hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; -+#if 1 /* Since by Intel FW(LEK8),LAN controller 1 default set port 0 use phy address 0 -+ * and port 1 use phy address 1, we swap it for Porsche2 platform. -+ * By hilbert. -+ */ -+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { -+ /*hw_err(hw, "####swap phy address used for different lan id in LAN conroller-1\n");*/ -+ hw->phy.mdio.prtad = (hw->bus.lan_id == 0) ? (1) : (0); -+ /*hw_err(hw, "####lan id: %d, phy address:%d\n", -+ hw->bus.lan_id, -+ hw->phy.mdio.prtad);*/ -+ } -+#endif - } - } - -@@ -2433,7 +3229,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) - /* Set functions pointers based on phy type */ - switch (hw->phy.type) { - case ixgbe_phy_x550em_kx4: -- phy->ops.setup_link = ixgbe_setup_kx4_x550em; -+ phy->ops.setup_link = NULL; - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; -@@ -2442,6 +3238,12 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; -+ case ixgbe_phy_x550em_xfi: -+ /* link is managed by HW */ -+ phy->ops.setup_link = NULL; -+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em; -+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em; -+ break; - case ixgbe_phy_x550em_ext_t: - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode -@@ -2463,6 +3265,19 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) - phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; - phy->ops.reset = ixgbe_reset_phy_t_X550em; - break; -+ case ixgbe_phy_sgmii: -+ phy->ops.setup_link = NULL; -+ break; -+ case ixgbe_phy_fw: -+ phy->ops.setup_link = ixgbe_setup_fw_link; -+ phy->ops.reset = ixgbe_reset_phy_fw; -+ break; -+ case ixgbe_phy_ext_1g_t: -+ phy->ops.setup_link = NULL; -+ phy->ops.read_reg = NULL; -+ phy->ops.write_reg = NULL; -+ phy->ops.reset = NULL; -+ break; - default: - break; - } -@@ -2488,6 +3303,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) - /* Fallthrough */ - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_KX4: -+ case IXGBE_DEV_ID_X550EM_X_XFI: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - media_type = ixgbe_media_type_backplane; -@@ -2500,6 +3316,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) - case IXGBE_DEV_ID_X550EM_X_1G_T: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: - media_type = ixgbe_media_type_copper; - break; - default: -@@ -2519,7 +3337,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) - - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_TX_VENDOR_ALARMS_3, -- IXGBE_MDIO_PMA_PMD_DEV_TYPE, -+ MDIO_MMD_PMAPMD, - ®); - if (status) - return status; -@@ -2530,7 +3348,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) - if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - ®); - if (status) - return status; -@@ -2539,7 +3357,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) - - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, -- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, -+ MDIO_MMD_VEND1, - reg); - if (status) - return status; -@@ -2567,6 +3385,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) - hlreg0 &= ~IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - break; -+ case IXGBE_DEV_ID_X550EM_A_1G_T: -+ case IXGBE_DEV_ID_X550EM_A_1G_T_L: -+ /* Select fast MDIO clock speed for these devices */ -+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); -+ hlreg0 |= IXGBE_HLREG0_MDCSPD; -+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); -+ break; - default: - break; - } -@@ -2586,6 +3411,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) - u32 ctrl = 0; - u32 i; - bool link_up = false; -+ u32 swfw_mask = hw->phy.phy_semaphore_mask; - - /* Call adapter stop to disable Tx/Rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); -@@ -2613,6 +3439,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) - hw->phy.sfp_setup_needed = false; - } - -+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) -+ return status; -+ - /* Reset PHY */ - if (!hw->phy.reset_disable && hw->phy.ops.reset) - hw->phy.ops.reset(hw); -@@ -2631,9 +3460,16 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) - ctrl = IXGBE_CTRL_RST; - } - -+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); -+ if (status) { -+ hw_dbg(hw, "semaphore failed with %d", status); -+ return IXGBE_ERR_SWFW_SYNC; -+ } -+ - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); -+ hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(1000, 1200); - - /* Poll for reset bit to self-clear meaning reset is complete */ -@@ -2728,6 +3564,90 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, - } - - /** -+ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control -+ * @hw: pointer to hardware structure -+ * -+ * Called at init time to set up flow control. -+ **/ -+static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) -+{ -+ s32 status = 0; -+ u32 an_cntl = 0; -+ -+ /* Validate the requested mode */ -+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { -+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); -+ return IXGBE_ERR_INVALID_LINK_SETTINGS; -+ } -+ -+ if (hw->fc.requested_mode == ixgbe_fc_default) -+ hw->fc.requested_mode = ixgbe_fc_full; -+ -+ /* Set up the 1G and 10G flow control advertisement registers so the -+ * HW will be able to do FC autoneg once the cable is plugged in. If -+ * we link at 10G, the 1G advertisement is harmless and vice versa. -+ */ -+ status = hw->mac.ops.read_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); -+ -+ if (status) { -+ hw_dbg(hw, "Auto-Negotiation did not complete\n"); -+ return status; -+ } -+ -+ /* The possible values of fc.requested_mode are: -+ * 0: Flow control is completely disabled -+ * 1: Rx flow control is enabled (we can receive pause frames, -+ * but not send pause frames). -+ * 2: Tx flow control is enabled (we can send pause frames but -+ * we do not support receiving pause frames). -+ * 3: Both Rx and Tx flow control (symmetric) are enabled. -+ * other: Invalid. -+ */ -+ switch (hw->fc.requested_mode) { -+ case ixgbe_fc_none: -+ /* Flow control completely disabled by software override. */ -+ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | -+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); -+ break; -+ case ixgbe_fc_tx_pause: -+ /* Tx Flow control is enabled, and Rx Flow control is -+ * disabled by software override. -+ */ -+ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; -+ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; -+ break; -+ case ixgbe_fc_rx_pause: -+ /* Rx Flow control is enabled and Tx Flow control is -+ * disabled by software override. Since there really -+ * isn't a way to advertise that we are capable of RX -+ * Pause ONLY, we will advertise that we support both -+ * symmetric and asymmetric Rx PAUSE, as such we fall -+ * through to the fc_full statement. Later, we will -+ * disable the adapter's ability to send PAUSE frames. -+ */ -+ case ixgbe_fc_full: -+ /* Flow control (both Rx and Tx) is enabled by SW override. */ -+ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | -+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; -+ break; -+ default: -+ hw_err(hw, "Flow control param set incorrectly\n"); -+ return IXGBE_ERR_CONFIG; -+ } -+ -+ status = hw->mac.ops.write_iosf_sb_reg(hw, -+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), -+ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); -+ -+ /* Restart auto-negotiation. */ -+ status = ixgbe_restart_an_internal_phy_x550em(hw); -+ -+ return status; -+} -+ -+/** - * ixgbe_set_mux - Set mux for port 1 access with CS4227 - * @hw: pointer to hardware structure - * @state: set mux if 1, clear if 0 -@@ -2881,7 +3801,13 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - if (hw->mac.ops.acquire_swfw_sync(hw, mask)) - return IXGBE_ERR_SWFW_SYNC; - -+#if 0 /* To use C22 MDI access function created by our own. -+ * By hilbert -+ */ - status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); -+#else -+ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); -+#endif - hw->mac.ops.release_swfw_sync(hw, mask); - - return status; -@@ -2914,7 +3840,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - .clear_vfta = &ixgbe_clear_vfta_generic, \ - .set_vfta = &ixgbe_set_vfta_generic, \ - .fc_enable = &ixgbe_fc_enable_generic, \ -- .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ -+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ - .init_uta_tables = &ixgbe_init_uta_tables_generic, \ - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ -@@ -2933,6 +3859,7 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_generic, - .led_off = ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = &ixgbe_reset_hw_X540, - .get_media_type = &ixgbe_get_media_type_X540, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, -@@ -2947,12 +3874,14 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { - .prot_autoc_read = prot_autoc_read_generic, - .prot_autoc_write = prot_autoc_write_generic, - .setup_fc = ixgbe_setup_fc_generic, -+ .fc_autoneg = ixgbe_fc_autoneg, - }; - - static const struct ixgbe_mac_operations mac_ops_X550EM_x = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_t_x550em, - .led_off = ixgbe_led_off_t_x550em, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = &ixgbe_reset_hw_X550em, - .get_media_type = &ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, -@@ -2965,6 +3894,29 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { - .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .setup_fc = NULL, /* defined later */ -+ .fc_autoneg = ixgbe_fc_autoneg, -+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, -+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, -+}; -+ -+static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { -+ X550_COMMON_MAC -+ .led_on = NULL, -+ .led_off = NULL, -+ .init_led_link_act = NULL, -+ .reset_hw = &ixgbe_reset_hw_X550em, -+ .get_media_type = &ixgbe_get_media_type_X550em, -+ .get_san_mac_addr = NULL, -+ .get_wwn_prefix = NULL, -+ .setup_link = &ixgbe_setup_mac_link_X540, -+ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, -+ .get_bus_info = &ixgbe_get_bus_info_X550em, -+ .setup_sfp = ixgbe_setup_sfp_modules_X550em, -+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, -+ .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, -+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540, -+ .setup_fc = NULL, -+ .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, - }; -@@ -2973,6 +3925,28 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_t_x550em, - .led_off = ixgbe_led_off_t_x550em, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, -+ .reset_hw = ixgbe_reset_hw_X550em, -+ .get_media_type = ixgbe_get_media_type_X550em, -+ .get_san_mac_addr = NULL, -+ .get_wwn_prefix = NULL, -+ .setup_link = &ixgbe_setup_mac_link_X540, -+ .get_link_capabilities = ixgbe_get_link_capabilities_X550em, -+ .get_bus_info = ixgbe_get_bus_info_X550em, -+ .setup_sfp = ixgbe_setup_sfp_modules_X550em, -+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, -+ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, -+ .setup_fc = ixgbe_setup_fc_x550em, -+ .fc_autoneg = ixgbe_fc_autoneg, -+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, -+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, -+}; -+ -+static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { -+ X550_COMMON_MAC -+ .led_on = ixgbe_led_on_generic, -+ .led_off = ixgbe_led_off_generic, -+ .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = ixgbe_reset_hw_X550em, - .get_media_type = ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, -@@ -2984,6 +3958,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { - .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, - .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, - .setup_fc = ixgbe_setup_fc_x550em, -+ .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, - }; -@@ -3017,12 +3992,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ - .setup_link = &ixgbe_setup_phy_link_generic, \ -- .set_phy_power = NULL, \ -- .check_overtemp = &ixgbe_tn_check_overtemp, \ -- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, -+ .set_phy_power = NULL, - - static const struct ixgbe_phy_operations phy_ops_X550 = { - X550_COMMON_PHY -+ .check_overtemp = &ixgbe_tn_check_overtemp, - .init = NULL, - .identify = &ixgbe_identify_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, -@@ -3031,19 +4005,27 @@ static const struct ixgbe_phy_operations phy_ops_X550 = { - - static const struct ixgbe_phy_operations phy_ops_X550EM_x = { - X550_COMMON_PHY -+ .check_overtemp = &ixgbe_tn_check_overtemp, - .init = &ixgbe_init_phy_ops_X550em, - .identify = &ixgbe_identify_phy_x550em, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, -- .read_i2c_combined = &ixgbe_read_i2c_combined_generic, -- .write_i2c_combined = &ixgbe_write_i2c_combined_generic, -- .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, -- .write_i2c_combined_unlocked = -- &ixgbe_write_i2c_combined_generic_unlocked, -+}; -+ -+static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { -+ X550_COMMON_PHY -+ .check_overtemp = NULL, -+ .init = ixgbe_init_phy_ops_X550em, -+ .identify = ixgbe_identify_phy_x550em, -+ .read_reg = NULL, -+ .write_reg = NULL, -+ .read_reg_mdi = NULL, -+ .write_reg_mdi = NULL, - }; - - static const struct ixgbe_phy_operations phy_ops_x550em_a = { - X550_COMMON_PHY -+ .check_overtemp = &ixgbe_tn_check_overtemp, - .init = &ixgbe_init_phy_ops_X550em, - .identify = &ixgbe_identify_phy_x550em, - .read_reg = &ixgbe_read_phy_reg_x550a, -@@ -3052,6 +4034,31 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = { - .write_reg_mdi = &ixgbe_write_phy_reg_mdi, - }; - -+static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { -+ X550_COMMON_PHY -+ .check_overtemp = ixgbe_check_overtemp_fw, -+ .init = ixgbe_init_phy_ops_X550em, -+ .identify = ixgbe_identify_phy_fw, -+#if 0 /* Declare C22 MDI directly access functions. By hilbert */ -+ .read_reg = NULL, -+ .write_reg = NULL, -+ .read_reg_mdi = NULL, -+ .write_reg_mdi = NULL, -+#else -+ .read_reg = &ixgbe_read_phy_reg_x550a, -+ .write_reg = &ixgbe_write_phy_reg_x550a, -+ .read_reg_mdi = &ixgbe_read_phy_reg_mdio, -+ .write_reg_mdi = &ixgbe_write_phy_reg_mdio, -+#endif -+}; -+ -+static const struct ixgbe_link_operations link_ops_x550em_x = { -+ .read_link = &ixgbe_read_i2c_combined_generic, -+ .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, -+ .write_link = &ixgbe_write_i2c_combined_generic, -+ .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked, -+}; -+ - static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X550) - }; -@@ -3082,14 +4089,35 @@ const struct ixgbe_info ixgbe_X550EM_x_info = { - .phy_ops = &phy_ops_X550EM_x, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X550EM_x, -+ .link_ops = &link_ops_x550em_x, -+}; -+ -+const struct ixgbe_info ixgbe_x550em_x_fw_info = { -+ .mac = ixgbe_mac_X550EM_x, -+ .get_invariants = ixgbe_get_invariants_X550_x_fw, -+ .mac_ops = &mac_ops_X550EM_x_fw, -+ .eeprom_ops = &eeprom_ops_X550EM_x, -+ .phy_ops = &phy_ops_x550em_x_fw, -+ .mbx_ops = &mbx_ops_generic, -+ .mvals = ixgbe_mvals_X550EM_x, - }; - - const struct ixgbe_info ixgbe_x550em_a_info = { - .mac = ixgbe_mac_x550em_a, -- .get_invariants = &ixgbe_get_invariants_X550_x, -+ .get_invariants = &ixgbe_get_invariants_X550_a, - .mac_ops = &mac_ops_x550em_a, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_x550em_a, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_x550em_a, - }; -+ -+const struct ixgbe_info ixgbe_x550em_a_fw_info = { -+ .mac = ixgbe_mac_x550em_a, -+ .get_invariants = ixgbe_get_invariants_X550_a_fw, -+ .mac_ops = &mac_ops_x550em_a_fw, -+ .eeprom_ops = &eeprom_ops_X550EM_x, -+ .phy_ops = &phy_ops_x550em_a_fw, -+ .mbx_ops = &mbx_ops_generic, -+ .mvals = ixgbe_mvals_x550em_a, -+}; --- -2.7.4 - From 03f759f50d3245bed4e40c025445c74614489e05 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Fri, 21 Jun 2019 14:49:57 +0800 Subject: [PATCH 14/20] 1. add ixgbe driver for fn-6254-dn-f 2. add Pega fn-6254-dn-f & porsche project to nephos 3. modify HWMON driver 4. add platform status checking service (PSU/FAN) --- .../installer.conf | 1 + platform/nephos/one-image.mk | 11 +- platform/nephos/platform-modules-pegatron.mk | 4 +- platform/nephos/rules.mk | 5 +- .../common/modules/pegatron_hwmon_mcu.c | 149 +- .../debian/rules | 7 +- .../fn-6254-dn-f/modules/Makefile | 2 +- .../pegatron_fn_6254_dn_f_ixgbe/Makefile | 44 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe.h | 1040 ++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c | 1220 ++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c | 2277 ++++ .../ixgbe_common.c | 4432 +++++++ .../ixgbe_common.h | 224 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c | 410 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h | 171 + .../ixgbe_dcb_82598.c | 288 + .../ixgbe_dcb_82598.h | 97 + .../ixgbe_dcb_82599.c | 369 + .../ixgbe_dcb_82599.h | 125 + .../ixgbe_dcb_nl.c | 809 ++ .../ixgbe_debugfs.c | 276 + .../ixgbe_ethtool.c | 3379 +++++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c | 1080 ++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h | 88 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c | 1228 ++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c | 10254 ++++++++++++++++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c | 460 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h | 128 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h | 121 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c | 2474 ++++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h | 205 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c | 1343 ++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c | 1608 +++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h | 73 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c | 230 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h | 3820 ++++++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c | 943 ++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h | 40 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c | 4123 +++++++ .../modules/pegatron_fn_6254_dn_f_psu.c | 329 + .../fn-6254-dn-f/scripts/sensors | 4 +- .../fn_6254_dn_f-platform-init.service | 2 +- .../fn_6254_dn_f-platform-status.service | 12 + ...rs.py => pegatron_fn_6254_dn_f_sensors.py} | 18 +- .../utils/pegatron_fn_6254_dn_f_status.py | 164 + .../utils/pegatron_fn_6254_dn_f_util.py | 130 +- 46 files changed, 44092 insertions(+), 125 deletions(-) create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/Makefile create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_psu.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-status.service rename platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/{fn_6254_dn_f_sensors.py => pegatron_fn_6254_dn_f_sensors.py} (86%) create mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_status.py diff --git a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/installer.conf b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/installer.conf index 925a32fc0c3a..41dcfd39d2f8 100755 --- a/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/installer.conf +++ b/device/pegatron/x86_64-pegatron_fn_6254_dn_f-r0/installer.conf @@ -1,3 +1,4 @@ CONSOLE_PORT=0x3f8 CONSOLE_DEV=0 CONSOLE_SPEED=115200 +ONIE_PLATFORM_EXTRA_CMDLINE_LINUX="module_blacklist=ixgbe" diff --git a/platform/nephos/one-image.mk b/platform/nephos/one-image.mk index e3a62c0afd67..d2524b9a5a98 100644 --- a/platform/nephos/one-image.mk +++ b/platform/nephos/one-image.mk @@ -5,15 +5,10 @@ $(SONIC_ONE_IMAGE)_MACHINE = nephos $(SONIC_ONE_IMAGE)_IMAGE_TYPE = onie $(SONIC_ONE_IMAGE)_INSTALLS += $(NEPHOS_MODULE) $(SONIC_ONE_IMAGE)_LAZY_INSTALLS += $(INGRASYS_S9130_32X_PLATFORM_MODULE) \ -<<<<<<< HEAD - $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ - $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ - $(PEGATRON_PORSCHE_PLATFORM_MODULE) \ - $(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE) -======= $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ - $(CIG_CS6436_56P_PLATFORM_MODULE) ->>>>>>> e680892f390d2135e8741894aee62db464c32022 + $(CIG_CS6436_56P_PLATFORM_MODULE) \ + $(PEGATRON_PORSCHE_PLATFORM_MODULE) \ + $(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE) $(SONIC_ONE_IMAGE)_DOCKERS += $(SONIC_INSTALL_DOCKER_IMAGES) SONIC_INSTALLERS += $(SONIC_ONE_IMAGE) diff --git a/platform/nephos/platform-modules-pegatron.mk b/platform/nephos/platform-modules-pegatron.mk index ac5bd16cf9c7..ef32c9cbc0e8 100755 --- a/platform/nephos/platform-modules-pegatron.mk +++ b/platform/nephos/platform-modules-pegatron.mk @@ -1,7 +1,7 @@ # Pegatron Platform modules -PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION = 0.1 -PEGATRON_FN_6254_DN_F_PLATFORM_MODULE_VERSION = 0.1 +PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION = 0.0.1 +PEGATRON_FN_6254_DN_F_PLATFORM_MODULE_VERSION = 1.0.0 export PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION export PEGATRON_FN_6254_DN_F_PLATFORM_MODULE_VERSION diff --git a/platform/nephos/rules.mk b/platform/nephos/rules.mk index 892850239f54..2a02abbc4202 100644 --- a/platform/nephos/rules.mk +++ b/platform/nephos/rules.mk @@ -1,12 +1,9 @@ include $(PLATFORM_PATH)/sai.mk -<<<<<<< HEAD -include $(PLATFORM_PATH)/platform-modules-pegatron.mk -======= include $(PLATFORM_PATH)/nephos-modules.mk ->>>>>>> e680892f390d2135e8741894aee62db464c32022 include $(PLATFORM_PATH)/platform-modules-ingrasys.mk include $(PLATFORM_PATH)/platform-modules-accton.mk include $(PLATFORM_PATH)/platform-modules-cig.mk +include $(PLATFORM_PATH)/platform-modules-pegatron.mk include $(PLATFORM_PATH)/docker-syncd-nephos.mk include $(PLATFORM_PATH)/docker-syncd-nephos-rpc.mk include $(PLATFORM_PATH)/one-image.mk diff --git a/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c b/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c index 01b64bce9019..556ff050e3e3 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c +++ b/platform/nephos/sonic-platform-modules-pegatron/common/modules/pegatron_hwmon_mcu.c @@ -42,10 +42,10 @@ #define FAN_ENABLE_COMMAND 0x21 #define FAN_LED_SETTO_MANUAL_COMMAND 0x30 #define FAN_LED_SETTO_AUTO_COMMAND 0x31 -#define FAN_LED_GREENON_COMMAND 0x40 -#define FAN_LED_GREENOFF_COMMAND 0x41 -#define FAN_LED_AMBERON_COMMAND 0x50 -#define FAN_LED_AMBEROFF_COMMAND 0x51 +#define FAN_LED_GREENOFF_COMMAND 0x40 +#define FAN_LED_GREENON_COMMAND 0x41 +#define FAN_LED_AMBEROFF_COMMAND 0x50 +#define FAN_LED_AMBERON_COMMAND 0x51 #define SMART_FAN_ENABLE_BIT 0 #define SMART_FAN_SETTING_ENABLE_BIT 0 #define SA56004X_REMOTE_TEMP_ALERT_BIT 4 @@ -57,12 +57,14 @@ enum fan_alert { - FAN_OUTER_RPM_OVER_ALERT_BIT = 0, - FAN_OUTER_RPM_UNDER_ALERT_BIT, + FAN_WRONG_AIRFLOW = 0, + FAN_OUTER_RPM_OVER_ALERT_BIT, + FAN_OUTER_RPM_UNDER_ALERT_BIT, + FAN_OUTER_RPM_ZERO_ALERT_BIT, FAN_INNER_RPM_OVER_ALERT_BIT, FAN_INNER_RPM_UNDER_ALERT_BIT, - FAN_CONNECT_ALERT_BIT, - FAN_DISCONNECT_ALERT_BIT, + FAN_INNER_RPM_ZERO_ALERT_BIT, + FAN_NOTCONNECT_ALERT_BIT, }; enum fan_status @@ -130,8 +132,9 @@ enum hwmon_mcu_register MONITOR_ADC_VOLTAGE_REG = 96, - LM_0X49_TEMP_REG = 112, - LM_0X48_TEMP_REG, + LM_0X48_TEMP_REG = 112, + LM_0X49_TEMP_REG, + LM_0X4A_TEMP_REG, SA56004X_LOCAL_TEMP_REG, SA56004X_REMOTE_TEMP_REG, @@ -723,9 +726,10 @@ static ssize_t get_fan_enable(struct device *dev, struct device_attribute *da, static ssize_t set_fan_enable(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); u8 data = 0; - u8 reg = SF_PWM_MID_REG; + u8 reg = FAN1_STATUS_REG + attr->index; long val = 0; if (kstrtol(buf, 10, &val)) @@ -762,9 +766,10 @@ static ssize_t get_fan_led_auto(struct device *dev, struct device_attribute *da, static ssize_t set_fan_led_auto(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); u8 data = 0; - u8 reg = SF_PWM_MID_REG; + u8 reg = FAN1_STATUS_REG + attr->index; long val = 0; if (kstrtol(buf, 10, &val)) @@ -801,9 +806,10 @@ static ssize_t get_fan_led_green(struct device *dev, struct device_attribute *da static ssize_t set_fan_led_green(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); u8 data = 0; - u8 reg = SF_PWM_MID_REG; + u8 reg = FAN1_STATUS_REG + attr->index; long val = 0; if (kstrtol(buf, 10, &val)) @@ -840,9 +846,10 @@ static ssize_t get_fan_led_amber(struct device *dev, struct device_attribute *da static ssize_t set_fan_led_amber(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); u8 data = 0; - u8 reg = SF_PWM_MID_REG; + u8 reg = FAN1_STATUS_REG + attr->index; long val = 0; if (kstrtol(buf, 10, &val)) @@ -915,7 +922,22 @@ static ssize_t get_temp_alert(struct device *dev, struct device_attribute *da, data = pega_hwmon_mcu_read(client, reg); DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); - GET_BIT(data, SA56004X_REMOTE_TEMP_ALERT_BIT + attr->index, val); + GET_BIT(data, attr->index, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t get_fan_wrong_airflow_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_ALERT_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_WRONG_AIRFLOW, val); return sprintf(buf, "%d\n", val); } @@ -950,6 +972,21 @@ static ssize_t get_fan_outerRPMUnder_alert(struct device *dev, struct device_att return sprintf(buf, "%d\n", val); } +static ssize_t get_fan_outerRPMZero_alert(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, val = 0; + u8 reg = FAN1_ALERT_REG + attr->index; + + data = pega_hwmon_mcu_read(client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, FAN_OUTER_RPM_ZERO_ALERT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + static ssize_t get_fan_innerRPMOver_alert(struct device *dev, struct device_attribute *da, char *buf) { @@ -980,7 +1017,7 @@ static ssize_t get_fan_innerRPMUnder_alert(struct device *dev, struct device_att return sprintf(buf, "%d\n", val); } -static ssize_t get_fan_connect_alert(struct device *dev, struct device_attribute *da, +static ssize_t get_fan_innerRPMZero_alert(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); @@ -990,12 +1027,12 @@ static ssize_t get_fan_connect_alert(struct device *dev, struct device_attribute data = pega_hwmon_mcu_read(client, reg); DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); - GET_BIT(data, FAN_CONNECT_ALERT_BIT, val); + GET_BIT(data, FAN_INNER_RPM_ZERO_ALERT_BIT, val); return sprintf(buf, "%d\n", val); } -static ssize_t get_fan_disconnect_alert(struct device *dev, struct device_attribute *da, +static ssize_t get_fan_notconnect_alert(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); @@ -1005,7 +1042,7 @@ static ssize_t get_fan_disconnect_alert(struct device *dev, struct device_attrib data = pega_hwmon_mcu_read(client, reg); DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); - GET_BIT(data, FAN_DISCONNECT_ALERT_BIT, val); + GET_BIT(data, FAN_NOTCONNECT_ALERT_BIT, val); return sprintf(buf, "%d\n", val); } @@ -1074,7 +1111,7 @@ static ssize_t get_adc_vol(struct device *dev, struct device_attribute *da, data = pega_hwmon_mcu_read(client, reg); DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); - return sprintf(buf, "%d.%02d\n", data/1000, (data/10)%12); + return sprintf(buf, "%d.%02d\n", data/1000, data%1000); } static ssize_t get_hwmon_temp(struct device *dev, struct device_attribute *da, @@ -1083,7 +1120,7 @@ static ssize_t get_hwmon_temp(struct device *dev, struct device_attribute *da, struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); u8 data = 0; - u8 reg = LM_0X49_TEMP_REG + attr->index; + u8 reg = LM_0X48_TEMP_REG + attr->index; data = pega_hwmon_mcu_read(client, reg); DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); @@ -1099,12 +1136,14 @@ static ssize_t get_hwmon_temp(struct device *dev, struct device_attribute *da, static SENSOR_DEVICE_ATTR(fan##_num##_led_green, S_IRUGO | S_IWUSR, get_fan_led_green, set_fan_led_green, _num-1); \ static SENSOR_DEVICE_ATTR(fan##_num##_led_amber, S_IRUGO | S_IWUSR, get_fan_led_amber, set_fan_led_amber, _num-1); \ static SENSOR_DEVICE_ATTR(fan##_num##_status_alert, S_IRUGO, get_fan_status_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_wrongAirflow_alert, S_IRUGO, get_fan_wrong_airflow_alert, NULL, _num-1); \ static SENSOR_DEVICE_ATTR(fan##_num##_outerRPMOver_alert, S_IRUGO, get_fan_outerRPMOver_alert, NULL, _num-1); \ static SENSOR_DEVICE_ATTR(fan##_num##_outerRPMUnder_alert, S_IRUGO, get_fan_outerRPMUnder_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_outerRPMZero_alert, S_IRUGO, get_fan_outerRPMZero_alert, NULL, _num-1); \ static SENSOR_DEVICE_ATTR(fan##_num##_innerRPMOver_alert, S_IRUGO, get_fan_innerRPMOver_alert, NULL, _num-1); \ static SENSOR_DEVICE_ATTR(fan##_num##_innerRPMUnder_alert, S_IRUGO, get_fan_innerRPMUnder_alert, NULL, _num-1); \ - static SENSOR_DEVICE_ATTR(fan##_num##_connect_alert, S_IRUGO, get_fan_connect_alert, NULL, _num-1); \ - static SENSOR_DEVICE_ATTR(fan##_num##_disconnect_alert, S_IRUGO, get_fan_disconnect_alert, NULL, _num-1) + static SENSOR_DEVICE_ATTR(fan##_num##_innerRPMZero_alert, S_IRUGO, get_fan_innerRPMZero_alert, NULL, _num-1); \ + static SENSOR_DEVICE_ATTR(fan##_num##_notconnect_alert, S_IRUGO, get_fan_notconnect_alert, NULL, _num-1); SET_FAN_ATTR(1);SET_FAN_ATTR(2);SET_FAN_ATTR(3);SET_FAN_ATTR(4);SET_FAN_ATTR(5); @@ -1135,10 +1174,12 @@ static SENSOR_DEVICE_ATTR(smartFan_max_pwm, S_IRUGO | S_IWUSR, get_smartFan_max static SENSOR_DEVICE_ATTR(smartFan_mid_pwm, S_IRUGO | S_IWUSR, get_smartFan_mid_pwm, set_smartFan_mid_pwm, 0); static SENSOR_DEVICE_ATTR(smartFan_min_pwm, S_IRUGO | S_IWUSR, get_smartFan_min_pwm, set_smartFan_min_pwm, 0); -static SENSOR_DEVICE_ATTR(lm75_49_temp_alert, S_IRUGO, get_temp_alert, NULL, 3); -static SENSOR_DEVICE_ATTR(lm75_48_temp_alert, S_IRUGO, get_temp_alert, NULL, 2); -static SENSOR_DEVICE_ATTR(SA56004X_Ltemp_alert, S_IRUGO, get_temp_alert, NULL, 1); -static SENSOR_DEVICE_ATTR(SA56004X_Rtemp_alert, S_IRUGO, get_temp_alert, NULL, 0); +static SENSOR_DEVICE_ATTR(lm75_48_temp_alert, S_IRUGO, get_temp_alert, NULL, 5); +static SENSOR_DEVICE_ATTR(lm75_49_temp_alert, S_IRUGO, get_temp_alert, NULL, 4); +static SENSOR_DEVICE_ATTR(lm75_4a_temp_alert, S_IRUGO, get_temp_alert, NULL, 3); +static SENSOR_DEVICE_ATTR(sa56004x_Ltemp_alert, S_IRUGO, get_temp_alert, NULL, 2); +static SENSOR_DEVICE_ATTR(sa56004x_Rtemp_alert, S_IRUGO, get_temp_alert, NULL, 1); +static SENSOR_DEVICE_ATTR(fanBoard_alert, S_IRUGO, get_temp_alert, NULL, 0); static SENSOR_DEVICE_ATTR(i2c_fb_timeout, S_IRUGO, get_i2c_timeout, NULL, 0); static SENSOR_DEVICE_ATTR(i2c_remote_timeout, S_IRUGO, get_i2c_timeout, NULL, 1); @@ -1147,10 +1188,11 @@ static SENSOR_DEVICE_ATTR(i2c_lm75_48_timeout, S_IRUGO, get_i2c_timeout, NULL, static SENSOR_DEVICE_ATTR(i2c_lm75_49_timeout, S_IRUGO, get_i2c_timeout, NULL, 4); static SENSOR_DEVICE_ATTR(alert_mode, S_IRUGO | S_IWUSR, get_alert_mode, set_alert_mode, 0); -static SENSOR_DEVICE_ATTR(lm75_49_temp, S_IRUGO, get_hwmon_temp, NULL, 0); -static SENSOR_DEVICE_ATTR(lm75_48_temp, S_IRUGO, get_hwmon_temp, NULL, 1); -static SENSOR_DEVICE_ATTR(SA56004_local_temp, S_IRUGO, get_hwmon_temp, NULL, 2); -static SENSOR_DEVICE_ATTR(SA56004_remote_temp, S_IRUGO, get_hwmon_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(lm75_48_temp, S_IRUGO, get_hwmon_temp, NULL, 0); +static SENSOR_DEVICE_ATTR(lm75_49_temp, S_IRUGO, get_hwmon_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(lm75_4a_temp, S_IRUGO, get_hwmon_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(sa56004x_local_temp, S_IRUGO, get_hwmon_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(sa56004x_remote_temp, S_IRUGO, get_hwmon_temp, NULL, 4); static struct attribute *pega_hwmon_mcu_attributes[] = { &sensor_dev_attr_mb_fw_upgrade.dev_attr.attr, @@ -1241,8 +1283,16 @@ static struct attribute *pega_hwmon_mcu_attributes[] = { &sensor_dev_attr_lm75_48_temp_alert.dev_attr.attr, &sensor_dev_attr_lm75_49_temp_alert.dev_attr.attr, - &sensor_dev_attr_SA56004X_Ltemp_alert.dev_attr.attr, - &sensor_dev_attr_SA56004X_Rtemp_alert.dev_attr.attr, + &sensor_dev_attr_lm75_4a_temp_alert.dev_attr.attr, + &sensor_dev_attr_sa56004x_Ltemp_alert.dev_attr.attr, + &sensor_dev_attr_sa56004x_Rtemp_alert.dev_attr.attr, + &sensor_dev_attr_fanBoard_alert.dev_attr.attr, + + &sensor_dev_attr_fan1_wrongAirflow_alert.dev_attr.attr, + &sensor_dev_attr_fan2_wrongAirflow_alert.dev_attr.attr, + &sensor_dev_attr_fan3_wrongAirflow_alert.dev_attr.attr, + &sensor_dev_attr_fan4_wrongAirflow_alert.dev_attr.attr, + &sensor_dev_attr_fan5_wrongAirflow_alert.dev_attr.attr, &sensor_dev_attr_fan1_outerRPMOver_alert.dev_attr.attr, &sensor_dev_attr_fan2_outerRPMOver_alert.dev_attr.attr, @@ -1256,6 +1306,12 @@ static struct attribute *pega_hwmon_mcu_attributes[] = { &sensor_dev_attr_fan4_outerRPMUnder_alert.dev_attr.attr, &sensor_dev_attr_fan5_outerRPMUnder_alert.dev_attr.attr, + &sensor_dev_attr_fan1_outerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan2_outerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan3_outerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan4_outerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan5_outerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan1_innerRPMOver_alert.dev_attr.attr, &sensor_dev_attr_fan2_innerRPMOver_alert.dev_attr.attr, &sensor_dev_attr_fan3_innerRPMOver_alert.dev_attr.attr, @@ -1268,17 +1324,17 @@ static struct attribute *pega_hwmon_mcu_attributes[] = { &sensor_dev_attr_fan4_innerRPMUnder_alert.dev_attr.attr, &sensor_dev_attr_fan5_innerRPMUnder_alert.dev_attr.attr, - &sensor_dev_attr_fan1_connect_alert.dev_attr.attr, - &sensor_dev_attr_fan2_connect_alert.dev_attr.attr, - &sensor_dev_attr_fan3_connect_alert.dev_attr.attr, - &sensor_dev_attr_fan4_connect_alert.dev_attr.attr, - &sensor_dev_attr_fan5_connect_alert.dev_attr.attr, + &sensor_dev_attr_fan1_innerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan2_innerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan3_innerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan4_innerRPMZero_alert.dev_attr.attr, + &sensor_dev_attr_fan5_innerRPMZero_alert.dev_attr.attr, - &sensor_dev_attr_fan1_disconnect_alert.dev_attr.attr, - &sensor_dev_attr_fan2_disconnect_alert.dev_attr.attr, - &sensor_dev_attr_fan3_disconnect_alert.dev_attr.attr, - &sensor_dev_attr_fan4_disconnect_alert.dev_attr.attr, - &sensor_dev_attr_fan5_disconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan1_notconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan2_notconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan3_notconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan4_notconnect_alert.dev_attr.attr, + &sensor_dev_attr_fan5_notconnect_alert.dev_attr.attr, &sensor_dev_attr_i2c_fb_timeout.dev_attr.attr, &sensor_dev_attr_i2c_remote_timeout.dev_attr.attr, @@ -1296,10 +1352,11 @@ static struct attribute *pega_hwmon_mcu_attributes[] = { &sensor_dev_attr_ADC7_vol.dev_attr.attr, &sensor_dev_attr_ADC8_vol.dev_attr.attr, - &sensor_dev_attr_lm75_49_temp.dev_attr.attr, &sensor_dev_attr_lm75_48_temp.dev_attr.attr, - &sensor_dev_attr_SA56004_local_temp.dev_attr.attr, - &sensor_dev_attr_SA56004_remote_temp.dev_attr.attr, + &sensor_dev_attr_lm75_49_temp.dev_attr.attr, + &sensor_dev_attr_lm75_4a_temp.dev_attr.attr, + &sensor_dev_attr_sa56004x_local_temp.dev_attr.attr, + &sensor_dev_attr_sa56004x_remote_temp.dev_attr.attr, NULL }; diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/rules b/platform/nephos/sonic-platform-modules-pegatron/debian/rules index 9fbb702c47a2..906bbc9ad981 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/rules +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/rules @@ -35,10 +35,8 @@ clean: dh_clean build: - #make modules -C $(KERNEL_SRC)/build M=$(MODULE_SRC) (for mod in $(MODULE_DIRS); do \ make modules -C $(KERNEL_SRC)/build M=$(MOD_SRC_DIR)/$${mod}/modules; \ - #$(PYTHON) $${mod}/setup.py build; \ done) binary: binary-arch binary-indep @@ -67,8 +65,11 @@ binary-indep: cp $(MOD_SRC_DIR)/$${mod}/$(UTILS_DIR)/* debian/$(PACKAGE_PRE_NAME)-$${mod}/usr/local/bin/; \ cp $(MOD_SRC_DIR)/$${mod}/$(SERVICE_DIR)/*.service debian/$(PACKAGE_PRE_NAME)-$${mod}/lib/systemd/system/; \ cp $(MOD_SRC_DIR)/$${mod}/$(SCRIPTS_DIR)/* debian/$(PACKAGE_PRE_NAME)-$${mod}/usr/bin/; \ - #$(PYTHON) $${mod}/setup.py install --root=$(MOD_SRC_DIR)/debian/$(PACKAGE_PRE_NAME)-$${mod} --install-layout=deb; \ done) + + # For fn-6254-dn-f ixgbe driver + cp $(MOD_SRC_DIR)/fn-6254-dn-f/$(MODULE_DIR)/pegatron_fn_6254_dn_f_ixgbe/pegatron_fn_6254_dn_f_ixgbe.ko debian/$(PACKAGE_PRE_NAME)-fn-6254-dn-f/$(KERNEL_SRC)/$(INSTALL_MOD_DIR); \ + # Resuming debhelper scripts dh_testroot dh_install diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/Makefile b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/Makefile index 9a4cb381bda8..0a75155f171f 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/Makefile +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/Makefile @@ -1 +1 @@ -obj-m:=pegatron_fn_6254_dn_f_cpld.o pegatron_hwmon_mcu.o pegatron_fn_6254_dn_f_sfp.o +obj-m += pegatron_fn_6254_dn_f_cpld.o pegatron_hwmon_mcu.o pegatron_fn_6254_dn_f_sfp.o pegatron_fn_6254_dn_f_psu.o pegatron_fn_6254_dn_f_ixgbe/ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/Makefile b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/Makefile new file mode 100644 index 000000000000..2e4a507e6b1e --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/Makefile @@ -0,0 +1,44 @@ +################################################################################ +# +# Intel 10 Gigabit PCI Express Linux driver +# Copyright(c) 1999 - 2013 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 10GbE PCI Express ethernet driver +# + +obj-m += pegatron_fn_6254_dn_f_ixgbe.o + +pegatron_fn_6254_dn_f_ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ + ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o + +pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_dcb.o ixgbe_dcb_82598.o \ + ixgbe_dcb_82599.o ixgbe_dcb_nl.o + +pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_sysfs.o +pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_debugfs.o +pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_fcoe.o diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h new file mode 100644 index 000000000000..255ec3b9c021 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h @@ -0,0 +1,1040 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_H_ +#define _IXGBE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb.h" +#if IS_ENABLED(CONFIG_FCOE) +#define IXGBE_FCOE +#include "ixgbe_fcoe.h" +#endif /* IS_ENABLED(CONFIG_FCOE) */ +#ifdef CONFIG_IXGBE_DCA +#include +#endif + +#include + +#ifdef CONFIG_NET_RX_BUSY_POLL +#define BP_EXTENDED_STATS +#endif +/* common prefix used by pr_<> macros */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* TX/RX descriptor defines */ +#define IXGBE_DEFAULT_TXD 512 +#define IXGBE_DEFAULT_TX_WORK 256 +#define IXGBE_MAX_TXD 4096 +#define IXGBE_MIN_TXD 64 + +#if (PAGE_SIZE < 8192) +#define IXGBE_DEFAULT_RXD 512 +#else +#define IXGBE_DEFAULT_RXD 128 +#endif +#define IXGBE_MAX_RXD 4096 +#define IXGBE_MIN_RXD 64 + +#define IXGBE_ETH_P_LLDP 0x88CC + +/* flow control */ +#define IXGBE_MIN_FCRTL 0x40 +#define IXGBE_MAX_FCRTL 0x7FF80 +#define IXGBE_MIN_FCRTH 0x600 +#define IXGBE_MAX_FCRTH 0x7FFF0 +#define IXGBE_DEFAULT_FCPAUSE 0xFFFF +#define IXGBE_MIN_FCPAUSE 0 +#define IXGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define IXGBE_RXBUFFER_1536 1536 +#define IXGBE_RXBUFFER_2K 2048 +#define IXGBE_RXBUFFER_3K 3072 +#define IXGBE_RXBUFFER_4K 4096 +#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +enum ixgbe_tx_flags { + /* cmd_type flags */ + IXGBE_TX_FLAGS_HW_VLAN = 0x01, + IXGBE_TX_FLAGS_TSO = 0x02, + IXGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IXGBE_TX_FLAGS_CC = 0x08, + IXGBE_TX_FLAGS_IPV4 = 0x10, + IXGBE_TX_FLAGS_CSUM = 0x20, + + /* software defined flags */ + IXGBE_TX_FLAGS_SW_VLAN = 0x40, + IXGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define IXGBE_MAX_VF_MC_ENTRIES 30 +#define IXGBE_MAX_VF_FUNCTIONS 64 +#define IXGBE_MAX_VFTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define IXGBE_MAX_PF_MACVLANS 15 +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#define IXGBE_82599_VF_DEVICE_ID 0x10ED +#define IXGBE_X540_VF_DEVICE_ID 0x1515 + +struct vf_data_storage { + struct pci_dev *vfdev; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; + u8 spoofchk_enabled; + bool rss_query_enabled; + u8 trusted; + int xcast_mode; + unsigned int vf_api; +}; + +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgbe_tx_buffer { + union ixgbe_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct ixgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + +struct ixgbe_queue_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ +}; + +struct ixgbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ixgbe_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + +#define IXGBE_TS_HDR_LEN 8 + +enum ixgbe_ring_state_t { + __IXGBE_TX_FDIR_INIT_DONE, + __IXGBE_TX_XPS_INIT_DONE, + __IXGBE_TX_DETECT_HANG, + __IXGBE_HANG_CHECK_ARMED, + __IXGBE_RX_RSC_ENABLED, + __IXGBE_RX_CSUM_UDP_ZERO_ERR, + __IXGBE_RX_FCOE, +}; + +struct ixgbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *netdev; + struct ixgbe_adapter *real_adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int pool; +}; + +#define check_for_tx_hang(ring) \ + test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ + test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ + set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +struct ixgbe_ring { + struct ixgbe_ring *next; /* pointer to next ring in q_vector */ + struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct ixgbe_fwd_adapter *l2_accel_priv; + void *desc; /* descriptor ring memory */ + union { + struct ixgbe_tx_buffer *tx_buffer_info; + struct ixgbe_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + + unsigned long last_rx_timestamp; + + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct ixgbe_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct ixgbe_tx_queue_stats tx_stats; + struct ixgbe_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +enum ixgbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, +#ifdef IXGBE_FCOE + RING_F_FCOE, +#endif /* IXGBE_FCOE */ + + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define IXGBE_MAX_RSS_INDICES 16 +#define IXGBE_MAX_RSS_INDICES_X550 63 +#define IXGBE_MAX_VMDQ_INDICES 64 +#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +#define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_L2A_QUEUES 4 +#define IXGBE_BAD_L2A_QUEUE 3 +#define IXGBE_MAX_MACVLANS 31 +#define IXGBE_MAX_DCBMACVLANS 8 + +struct ixgbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +} ____cacheline_internodealigned_in_smp; + +#define IXGBE_82599_VMDQ_8Q_MASK 0x78 +#define IXGBE_82599_VMDQ_4Q_MASK 0x7C +#define IXGBE_82599_VMDQ_2Q_MASK 0x7E + +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) +{ +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : + IXGBE_RXBUFFER_3K; +#endif + return IXGBE_RXBUFFER_2K; +} + +static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +{ +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? 1 : 0; +#endif + return 0; +} +#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) + +struct ixgbe_ring_container { + struct ixgbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define ixgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ixgbe_q_vector { + struct ixgbe_adapter *adapter; +#ifdef CONFIG_IXGBE_DCA + int cpu; /* CPU for DCA */ +#endif + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct ixgbe_ring_container rx, tx; + + struct napi_struct napi; + cpumask_t affinity_mask; + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + +#ifdef CONFIG_NET_RX_BUSY_POLL + atomic_t state; +#endif /* CONFIG_NET_RX_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +enum ixgbe_qv_state_t { + IXGBE_QV_STATE_IDLE = 0, + IXGBE_QV_STATE_NAPI, + IXGBE_QV_STATE_POLL, + IXGBE_QV_STATE_DISABLE +}; + +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_NAPI); +#ifdef BP_EXTENDED_STATS + if (rc != IXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + + return rc == IXGBE_QV_STATE_IDLE; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* called from ixgbe_low_latency_poll() */ +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_POLL); +#ifdef BP_EXTENDED_STATS + if (rc != IXGBE_QV_STATE_IDLE) + q_vector->rx.ring->stats.yields++; +#endif + return rc == IXGBE_QV_STATE_IDLE; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) +{ + return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; +} + +/* false if QV is currently owned */ +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_DISABLE); + + return rc == IXGBE_QV_STATE_IDLE; +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ +} + +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + return true; +} + +static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + return true; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +#ifdef CONFIG_IXGBE_HWMON + +#define IXGBE_HWMON_TYPE_LOC 0 +#define IXGBE_HWMON_TYPE_TEMP 1 +#define IXGBE_HWMON_TYPE_CAUTION 2 +#define IXGBE_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct ixgbe_hw *hw; + struct ixgbe_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4]; + unsigned int n_hwmon; +}; +#endif /* CONFIG_IXGBE_HWMON */ + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define IXGBE_MIN_RSC_ITR 24 +#define IXGBE_100K_ITR 40 +#define IXGBE_20K_ITR 200 +#define IXGBE_12K_ITR 336 + +/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define IXGBE_RX_DESC(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) +#define IXGBE_TX_DESC(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) +#define IXGBE_TX_CTXTDESC(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ +#ifdef IXGBE_FCOE +/* Use 3K as the baby jumbo frame size for FCoE */ +#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +#endif /* IXGBE_FCOE */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +#define MAX_MSIX_VECTORS_82599 64 +#define MAX_Q_VECTORS_82599 64 +#define MAX_MSIX_VECTORS_82598 18 +#define MAX_Q_VECTORS_82598 16 + +struct ixgbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 pool; + u16 state; /* bitmask */ +}; + +#define IXGBE_MAC_STATE_DEFAULT 0x1 +#define IXGBE_MAC_STATE_MODIFIED 0x2 +#define IXGBE_MAC_STATE_IN_USE 0x4 + +#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 +#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* default to trying for four seconds */ +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ + +/* board specific private data structure */ +struct ixgbe_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define IXGBE_FLAG_MSI_ENABLED BIT(1) +#define IXGBE_FLAG_MSIX_ENABLED BIT(3) +#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4) +#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5) +#define IXGBE_FLAG_RX_PS_ENABLED BIT(6) +#define IXGBE_FLAG_DCA_ENABLED BIT(8) +#define IXGBE_FLAG_DCA_CAPABLE BIT(9) +#define IXGBE_FLAG_IMIR_ENABLED BIT(10) +#define IXGBE_FLAG_MQ_CAPABLE BIT(11) +#define IXGBE_FLAG_DCB_ENABLED BIT(12) +#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13) +#define IXGBE_FLAG_VMDQ_ENABLED BIT(14) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15) +#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16) +#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19) +#define IXGBE_FLAG_FCOE_CAPABLE BIT(20) +#define IXGBE_FLAG_FCOE_ENABLED BIT(21) +#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) +#define IXGBE_FLAG_SRIOV_ENABLED BIT(23) +#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) +#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) +#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) +#define IXGBE_FLAG_DCB_CAPABLE BIT(27) +#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28) + + u32 flags2; +#define IXGBE_FLAG2_RSC_CAPABLE BIT(0) +#define IXGBE_FLAG2_RSC_ENABLED BIT(1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) +#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) +#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) +#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) +#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) +#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) +#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) +#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) +#define IXGBE_FLAG2_EEE_ENABLED BIT(15) +#define IXGBE_FLAG2_RX_LEGACY BIT(16) + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + + /* Port number used to identify VXLAN traffic */ + __be16 vxlan_port; + __be16 geneve_port; + + /* TX */ + struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + + /* RX */ + struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; + int num_rx_pools; /* == num_rx_queues in 82598 */ + int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ + u64 hw_csum_rx_error; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; + + /* DCB parameters */ + struct ieee_pfc *ixgbe_ieee_pfc; + struct ieee_ets *ixgbe_ieee_ets; + struct ixgbe_dcb_config dcb_cfg; + struct ixgbe_dcb_config temp_dcb_cfg; + u8 dcb_set_bitmap; + u8 dcbx_cap; + enum ixgbe_fc_mode last_lfc_mode; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* true count of q_vectors for device */ + struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + + u32 test_icr; + struct ixgbe_ring test_tx_ring; + struct ixgbe_ring test_rx_ring; + + /* structs defined in ixgbe_hw.h */ + struct ixgbe_hw hw; + u16 msg_enable; + struct ixgbe_hw_stats stats; + + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + unsigned long sfp_poll_time; + unsigned long link_check_timeout; + + struct timer_list service_timer; + struct work_struct service_task; + + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union ixgbe_atr_input fdir_mask; + int fdir_filter_count; + u32 fdir_pballoc; + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + +#ifdef IXGBE_FCOE + struct ixgbe_fcoe fcoe; +#endif /* IXGBE_FCOE */ + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 wol; + + u16 bridge_mode; + + u16 eeprom_verh; + u16 eeprom_verl; + u16 eeprom_cap; + + u32 interrupt_event; + u32 led_reg; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + spinlock_t tmreg_lock; + struct cyclecounter hw_cc; + struct timecounter hw_tc; + u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp)(struct ixgbe_adapter *); + + /* SR-IOV */ + DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + + u32 timer_event_accumulator; + u32 vferr_refcount; + struct ixgbe_mac_addr *mac_table; + struct kobject *info_kobj; +#ifdef CONFIG_IXGBE_HWMON + struct hwmon_buff *ixgbe_hwmon_buff; +#endif /* CONFIG_IXGBE_HWMON */ +#ifdef CONFIG_DEBUG_FS + struct dentry *ixgbe_dbg_adapter; +#endif /*CONFIG_DEBUG_FS*/ + + u8 default_up; + unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ + +#define IXGBE_MAX_LINK_HANDLE 10 + struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; + unsigned long tables; + +/* maximum number of RETA entries among all devices supported by ixgbe + * driver: currently it's x550 device in non-SRIOV mode + */ +#define IXGBE_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; + +#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; +}; + +static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + return IXGBE_MAX_RSS_INDICES; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + return IXGBE_MAX_RSS_INDICES_X550; + default: + return 0; + } +} + +struct ixgbe_fdir_filter { + struct hlist_node fdir_node; + union ixgbe_atr_input filter; + u16 sw_idx; + u64 action; +}; + +enum ixgbe_state_t { + __IXGBE_TESTING, + __IXGBE_RESETTING, + __IXGBE_DOWN, + __IXGBE_DISABLED, + __IXGBE_REMOVING, + __IXGBE_SERVICE_SCHED, + __IXGBE_SERVICE_INITED, + __IXGBE_IN_SFP_INIT, + __IXGBE_PTP_RUNNING, + __IXGBE_PTP_TX_IN_PROGRESS, + __IXGBE_RESET_REQUESTED, +}; + +struct ixgbe_cb { + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; + dma_addr_t dma; + u16 append_cnt; + bool page_released; +}; +#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) + +enum ixgbe_boards { + board_82598, + board_82599, + board_X540, + board_X550, + board_X550EM_x, + board_x550em_x_fw, + board_x550em_a, + board_x550em_a_fw, +}; + +extern const struct ixgbe_info ixgbe_82598_info; +extern const struct ixgbe_info ixgbe_82599_info; +extern const struct ixgbe_info ixgbe_X540_info; +extern const struct ixgbe_info ixgbe_X550_info; +extern const struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_x550em_x_fw_info; +extern const struct ixgbe_info ixgbe_x550em_a_info; +extern const struct ixgbe_info ixgbe_x550em_a_fw_info; +#ifdef CONFIG_IXGBE_DCB +extern const struct dcbnl_rtnl_ops dcbnl_ops; +#endif + +extern char ixgbe_driver_name[]; +extern const char ixgbe_driver_version[]; +#ifdef IXGBE_FCOE +extern char ixgbe_default_device_descr[]; +#endif /* IXGBE_FCOE */ + +int ixgbe_open(struct net_device *netdev); +int ixgbe_close(struct net_device *netdev); +void ixgbe_up(struct ixgbe_adapter *adapter); +void ixgbe_down(struct ixgbe_adapter *adapter); +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); +void ixgbe_reset(struct ixgbe_adapter *adapter); +void ixgbe_set_ethtool_ops(struct net_device *netdev); +int ixgbe_setup_rx_resources(struct ixgbe_ring *); +int ixgbe_setup_tx_resources(struct ixgbe_ring *); +void ixgbe_free_rx_resources(struct ixgbe_ring *); +void ixgbe_free_tx_resources(struct ixgbe_ring *); +void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); +void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); +void ixgbe_update_stats(struct ixgbe_adapter *adapter); +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); +#ifdef CONFIG_PCI_IOV +void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); +#endif +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 queue); +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 queue); +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, + struct ixgbe_ring *); +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, + struct ixgbe_tx_buffer *); +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); +void ixgbe_write_eitr(struct ixgbe_q_vector *); +int ixgbe_poll(struct napi_struct *napi, int budget); +int ethtool_ioctl(struct ifreq *ifr); +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue); +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx); +void ixgbe_set_rx_mode(struct net_device *netdev); +#ifdef CONFIG_IXGBE_DCB +void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); +#endif +int ixgbe_setup_tc(struct net_device *dev, u8 tc); +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); +void ixgbe_do_reset(struct net_device *netdev); +#ifdef CONFIG_IXGBE_HWMON +void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); +int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); +#endif /* CONFIG_IXGBE_HWMON */ +#ifdef IXGBE_FCOE +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); +int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, + u8 *hdr_len); +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +int ixgbe_fcoe_enable(struct net_device *netdev); +int ixgbe_fcoe_disable(struct net_device *netdev); +#ifdef CONFIG_IXGBE_DCB +u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); +u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); +#endif /* CONFIG_IXGBE_DCB */ +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, + struct netdev_fcoe_hbainfo *info); +u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); +#endif /* IXGBE_FCOE */ +#ifdef CONFIG_DEBUG_FS +void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); +void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); +void ixgbe_dbg_init(void); +void ixgbe_dbg_exit(void); +#else +static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} +static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} +static inline void ixgbe_dbg_init(void) {} +static inline void ixgbe_dbg_exit(void) {} +#endif /* CONFIG_DEBUG_FS */ +static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +void ixgbe_ptp_init(struct ixgbe_adapter *adapter); +void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); +void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); +void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); +void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); +static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) { + ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); + return; + } + + if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) + return; + + ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + +int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); +int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); +void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); +#endif + +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring); +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); +void ixgbe_store_reta(struct ixgbe_adapter *adapter); +s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); +#endif /* _IXGBE_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c new file mode 100644 index 000000000000..8a32eb7d47b9 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c @@ -0,0 +1,1220 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" + +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES 16 +#define IXGBE_82598_MC_TBL_SIZE 128 +#define IXGBE_82598_VFT_TBL_SIZE 128 +#define IXGBE_82598_RX_PB_SIZE 512 + +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); + +/** + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82598 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +{ + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); + u16 pcie_devctl2; + + if (ixgbe_removed(hw->hw_addr)) + return; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 250ms through the GCR register + */ + if (!(gcr & IXGBE_GCR_CAP_VER2)) { + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; + ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); +} + +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* Call PHY identify routine to get the phy type */ + ixgbe_identify_phy_generic(hw); + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during get_invariants because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + u16 list_offset, data_offset; + + /* Identify the PHY */ + phy->ops.identify(hw); + + /* Overwrite the link function pointers if copper PHY */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_generic; + } + + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = &ixgbe_reset_phy_nl; + + /* Call SFP+ identify routine to get the SFP+ module type */ + ret_val = phy->ops.identify_sfp(hw); + if (ret_val) + return ret_val; + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + /* Check to see if SFP+ module is supported */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, + &list_offset, + &data_offset); + if (ret_val) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + break; + default: + break; + } + + return 0; +} + +/** + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function. + * Disables relaxed ordering for archs other than SPARC + * Then set pcie completion timeout + * + **/ +static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +{ + s32 ret_val; + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val) + return ret_val; + + /* set the completion timeout for interface */ + ixgbe_set_pcie_completion_timeout(hw); + + return 0; +} + +/** + * ixgbe_get_link_capabilities_82598 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + u32 autoc = 0; + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + default: + return IXGBE_ERR_LINK_SETUP; + } + + return 0; +} + +/** + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + return ixgbe_media_type_copper; + + default: + break; + } + + /* Media type for I82598 is based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + /* Default device ID is mezzanine card KX/KX4 */ + return ixgbe_media_type_backplane; + + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + return ixgbe_media_type_fiber; + + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + return ixgbe_media_type_cx4; + + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + return ixgbe_media_type_copper; + + default: + return ixgbe_media_type_unknown; + } +} + +/** + * ixgbe_fc_enable_82598 - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) +{ + u32 fctrl_reg; + u32 rmcs_reg; + u32 reg; + u32 fcrtl, fcrth; + u32 link_speed = 0; + int i; + bool link_up; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) + return IXGBE_ERR_INVALID_LINK_SETTINGS; + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + hw_dbg(hw, "Invalid water mark configuration\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + } + } + + /* + * On 82598 having Rx FC on causes resets while doing 1G + * so if it's on turn it off once we know link_speed. For + * more details see 82598 Specification update. + */ + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + hw->fc.requested_mode = ixgbe_fc_tx_pause; + break; + case ixgbe_fc_rx_pause: + hw->fc.requested_mode = ixgbe_fc_none; + break; + default: + /* no change */ + break; + } + } + + /* Negotiate the fc mode to use */ + hw->mac.ops.fc_autoneg(hw); + + /* Disable any previous flow control settings */ + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + /* Set 802.3x based flow control settings. */ + fctrl_reg |= IXGBE_FCTRL_DPF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + } + + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return 0; +} + +/** + * ixgbe_start_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = 0; + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autonegotiation did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + + return status; +} + +/** + * ixgbe_validate_link_ready - Function looks for phy link + * @hw: pointer to hardware structure + * + * Function indicates success when phy link is available. If phy is not ready + * within 5 seconds of MAC indicating link, the function returns error. + **/ +static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +{ + u32 timeout; + u16 an_reg; + + if (hw->device_id != IXGBE_DEV_ID_82598AT2) + return 0; + + for (timeout = 0; + timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { + hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); + + if ((an_reg & MDIO_AN_STAT1_COMPLETE) && + (an_reg & MDIO_STAT1_LSTATUS)) + break; + + msleep(100); + } + + if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { + hw_dbg(hw, "Link was indicated but link is down\n"); + return IXGBE_ERR_LINK_SETUP; + } + + return 0; +} + +/** + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete) +{ + u32 links_reg; + u32 i; + u16 link_reg, adapt_comp_reg; + + /* + * SERDES PHY requires us to read link status from register 0xC79F. + * Bit 0 set indicates link is up/ready; clear indicates link down. + * 0xC00C is read to check that the XAUI lanes are active. Bit 0 + * clear indicates active; set indicates inactive. + */ + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, + &adapt_comp_reg); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if ((link_reg & 1) && + ((adapt_comp_reg & 1) == 0)) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + hw->phy.ops.read_reg(hw, 0xC79F, + MDIO_MMD_PMAPMD, + &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, + MDIO_MMD_PMAPMD, + &adapt_comp_reg); + } + } else { + if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) + *link_up = true; + else + *link_up = false; + } + + if (!*link_up) + return 0; + } + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + if (links_reg & IXGBE_LINKS_SPEED) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && + (ixgbe_validate_link_ready(hw) != 0)) + *link_up = false; + + return 0; +} + +/** + * ixgbe_setup_mac_link_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc = curr_autoc; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + + /* Check to see if speed passed in is supported. */ + ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) + return IXGBE_ERR_LINK_SETUP; + + /* Set KX4/KX support according to speed requested */ + else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + if (autoc != curr_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } + + /* Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); +} + + +/** + * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ + s32 status; + s32 phy_status = 0; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* + * Power up the Atlas Tx lanes if they are currently powered down. + * Atlas Tx lanes are powered down for MAC loopback tests, but + * they are not automatically restored on reset. + */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { + /* Enable Tx Atlas so packets can be transmitted again */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + analog_val); + } + + /* Reset PHY */ + if (hw->phy.reset_disable == false) { + /* PHY ops must be identified and initialized prior to reset */ + + /* Init PHY and function pointers, perform SFP setup */ + phy_status = hw->phy.ops.init(hw); + if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return phy_status; + if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) + goto mac_reset_top; + + hw->phy.ops.reset(hw); + } + +mac_reset_top: + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + udelay(1); + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); + gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6)); + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + + /* + * Store the original AUTOC value if it has not been + * stored off yet. Otherwise restore the stored original + * AUTOC value since the reset operation sets back to deaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_link_settings_stored = true; + } else if (autoc != hw->mac.orig_autoc) { + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + hw->mac.ops.init_rx_addrs(hw); + + if (phy_status) + status = phy_status; + + return status; +} + +/** + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index + **/ +static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + return 0; +} + +/** + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + if (rar_high & IXGBE_RAH_VIND_MASK) { + rar_high &= ~IXGBE_RAH_VIND_MASK; + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + } + + return 0; +} + +/** + * ixgbe_set_vfta_82598 - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * @vlvf_bypass: boolean flag - unused + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + u32 regindex; + u32 bitindex; + u32 bits; + u32 vftabyte; + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* Determine 32-bit word position in array */ + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); + bits &= (~(0x0F << bitindex)); + bits |= (vind << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); + + /* Determine the location of the bit for this VLAN id */ + bitindex = vlan & 0x1F; /* lower five bits */ + + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + if (vlan_on) + /* Turn on this VLAN id */ + bits |= BIT(bitindex); + else + /* Turn off this VLAN id */ + bits &= ~BIT(bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + + return 0; +} + +/** + * ixgbe_clear_vfta_82598 - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return 0; +} + +/** + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Atlas analog register specified. + **/ +static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + +/** + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @dev_addr: address to read from + * @byte_offset: byte offset to read from dev_addr + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's data over I2C interface. + **/ +static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, + u8 byte_offset, u8 *eeprom_data) +{ + s32 status = 0; + u16 sfp_addr = 0; + u16 sfp_data = 0; + u16 sfp_stat = 0; + u16 gssr; + u32 i; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + return IXGBE_ERR_SWFW_SYNC; + + if (hw->phy.type == ixgbe_phy_nl) { + /* + * phy SDA/SCL registers are at addresses 0xC30A to + * 0xC30D. These registers are used to talk to the SFP+ + * module's EEPROM through the SDA/SCL (I2C) interface. + */ + sfp_addr = (dev_addr << 8) + byte_offset; + sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); + hw->phy.ops.write_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, + MDIO_MMD_PMAPMD, + sfp_addr); + + /* Poll status */ + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, + MDIO_MMD_PMAPMD, + &sfp_stat); + sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) + break; + usleep_range(10000, 20000); + } + + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { + hw_dbg(hw, "EEPROM read did not pass.\n"); + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* Read data */ + hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, + MDIO_MMD_PMAPMD, &sfp_data); + + *eeprom_data = (u8)(sfp_data >> 8); + } else { + status = IXGBE_ERR_PHY; + } + +out: + hw->mac.ops.release_swfw_sync(hw, gssr); + return status; +} + +/** + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, + byte_offset, eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C + **/ +static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, + byte_offset, sff8472_data); +} + +/** + * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple + * port devices. + * @hw: pointer to the HW structure + * + * Calls common function and corrects issue with some single port devices + * that enable LAN1 but not LAN0. + **/ +static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u16 pci_gen = 0; + u16 pci_ctrl2 = 0; + + ixgbe_set_lan_id_multi_port_pcie(hw); + + /* check if LAN0 is disabled */ + hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); + if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { + + hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); + + /* if LAN0 is completely disabled force function to 0 */ + if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { + + bus->func = 0; + } + } +} + +/** + * ixgbe_set_rxpba_82598 - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy) +{ + u32 rxpktsize = IXGBE_RXPBSIZE_64KB; + u8 i = 0; + + if (!num_pb) + return; + + /* Setup Rx packet buffer sizes */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* Setup the first four at 80KB */ + rxpktsize = IXGBE_RXPBSIZE_80KB; + for (; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Setup the last four at 48KB...don't re-init i */ + rxpktsize = IXGBE_RXPBSIZE_48KB; + /* Fall Through */ + case PBA_STRATEGY_EQUAL: + default: + /* Divide the remaining Rx packet buffer evenly among the TCs */ + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + } + + /* Setup Tx packet buffer sizes */ + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); +} + +static const struct ixgbe_mac_operations mac_ops_82598 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_82598, + .start_hw = &ixgbe_start_hw_82598, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_82598, + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, + .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, + .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, + .setup_link = &ixgbe_setup_mac_link_82598, + .set_rxpba = &ixgbe_set_rxpba_82598, + .check_link = &ixgbe_check_mac_link_82598, + .get_link_capabilities = &ixgbe_get_link_capabilities_82598, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_82598, + .clear_vmdq = &ixgbe_clear_vmdq_82598, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_82598, + .set_vfta = &ixgbe_set_vfta_82598, + .fc_enable = &ixgbe_fc_enable_82598, + .setup_fc = ixgbe_setup_fc_generic, + .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = NULL, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, + .init_swfw_sync = NULL, + .get_thermal_sensor_data = NULL, + .init_thermal_sensor_thresh = NULL, + .prot_autoc_read = &prot_autoc_read_generic, + .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, +}; + +static const struct ixgbe_eeprom_operations eeprom_ops_82598 = { + .init_params = &ixgbe_init_eeprom_params_generic, + .read = &ixgbe_read_eerd_generic, + .write = &ixgbe_write_eeprom_generic, + .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, + .read_buffer = &ixgbe_read_eerd_buffer_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_generic, +}; + +static const struct ixgbe_phy_operations phy_ops_82598 = { + .identify = &ixgbe_identify_phy_generic, + .identify_sfp = &ixgbe_identify_module_generic, + .init = &ixgbe_init_phy_ops_82598, + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .read_reg_mdi = &ixgbe_read_phy_reg_mdi, + .write_reg_mdi = &ixgbe_write_phy_reg_mdi, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, + .check_overtemp = &ixgbe_tn_check_overtemp, +}; + +const struct ixgbe_info ixgbe_82598_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .eeprom_ops = &eeprom_ops_82598, + .phy_ops = &phy_ops_82598, + .mvals = ixgbe_mvals_8259X, +}; diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c new file mode 100644 index 000000000000..d602637ccc40 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c @@ -0,0 +1,2277 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" +#include "ixgbe_mbx.h" + +#define IXGBE_82599_MAX_TX_QUEUES 128 +#define IXGBE_82599_MAX_RX_QUEUES 128 +#define IXGBE_82599_RAR_ENTRIES 128 +#define IXGBE_82599_MC_TBL_SIZE 128 +#define IXGBE_82599_VFT_TBL_SIZE 128 +#define IXGBE_82599_RX_PB_SIZE 512 + +static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void +ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed); +static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); +static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); + +bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +{ + u32 fwsm, manc, factps; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) + return false; + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + if (!(manc & IXGBE_MANC_RCV_TCO_EN)) + return false; + + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); + if (factps & IXGBE_FACTPS_MNGCG) + return false; + + return true; +} + +static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* enable the laser control functions for SFP+ fiber + * and MNG not enabled + */ + if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && + !ixgbe_mng_enabled(hw)) { + mac->ops.disable_tx_laser = + &ixgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + &ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; + } else { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; + mac->ops.set_rate_select_speed = + ixgbe_set_hard_rate_select_speed; + } else { + if ((mac->ops.get_media_type(hw) == + ixgbe_media_type_backplane) && + (hw->phy.smart_speed == ixgbe_smart_speed_auto || + hw->phy.smart_speed == ixgbe_smart_speed_on) && + !ixgbe_verify_lesm_fw_enabled_82599(hw)) + mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; + else + mac->ops.setup_link = &ixgbe_setup_mac_link_82599; + } +} + +static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ + s32 ret_val; + u16 list_offset, data_offset, data_value; + + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { + ixgbe_init_mac_link_ops_82599(hw); + + hw->phy.ops.reset = NULL; + + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val) + return ret_val; + + /* PHY config will finish before releasing the semaphore */ + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + return IXGBE_ERR_SWFW_SYNC; + + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; + while (data_value != 0xffff) { + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); + IXGBE_WRITE_FLUSH(hw); + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; + } + + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* + * Delay obtaining semaphore again to allow FW access, + * semaphore_delay is in ms usleep_range needs us. + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); + + /* Restart DSP and set SFI mode */ + ret_val = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, + false); + + if (ret_val) { + hw_dbg(hw, " sfp module setup not complete\n"); + return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + } + } + + return 0; + +setup_sfp_err: + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* Delay obtaining semaphore again to allow FW access, + * semaphore_delay is in ms usleep_range needs us. + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); + hw_err(hw, "eeprom read at offset %d failed\n", data_offset); + return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; +} + +/** + * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: Return the if we locked for this read. + * @reg_val: Value we read from AUTOC + * + * For this part (82599) we need to wrap read-modify-writes with a possible + * FW/SW lock. It is assumed this lock will be freed with the next + * prot_autoc_write_82599(). Note, that locked can only be true in cases + * where this function doesn't return an error. + **/ +static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, + u32 *reg_val) +{ + s32 ret_val; + + *locked = false; + /* If LESM is on then we need to hold the SW/FW semaphore. */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + return IXGBE_ERR_SWFW_SYNC; + + *locked = true; + } + + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return 0; +} + +/** + * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous proc_autoc_read_82599. + * + * This part (82599) may need to hold a the SW/FW lock around all writes to + * AUTOC. Likewise after a write we need to do a pipeline reset. + **/ +static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) +{ + s32 ret_val = 0; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + /* We only need to get the lock if: + * - We didn't do it already (in the read part of a read-modify-write) + * - LESM is enabled. + */ + if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + return IXGBE_ERR_SWFW_SYNC; + + locked = true; + } + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + ret_val = ixgbe_reset_pipeline_82599(hw); + +out: + /* Free the SW/FW semaphore as we either grabbed it here or + * already had it when this function was called. + */ + if (locked) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + return ret_val; +} + +static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + ixgbe_init_mac_link_ops_82599(hw); + + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during get_invariants because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + u32 esdp; + + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = true; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); + + /* If copper media, overwrite with copper function pointers */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82599; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_generic; + } + + /* Set necessary function pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + break; + default: + break; + } + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82599 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + u32 autoc = 0; + + /* Determine 1G link capabilities off of SFP+ type */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + return 0; + } + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_10G_SERIAL: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_SGMII_1G_100M: + *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; + *autoneg = false; + break; + + default: + return IXGBE_ERR_LINK_SETUP; + } + + if (hw->phy.multispeed_fiber) { + *speed |= IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + + /* QSFP must not enable auto-negotiation */ + if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) + *autoneg = false; + else + *autoneg = true; + } + + return 0; +} + +/** + * ixgbe_get_media_type_82599 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +{ + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + return ixgbe_media_type_copper; + + default: + break; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_XAUI_LOM: + /* Default device ID is mezzanine card KX/KX4 */ + return ixgbe_media_type_backplane; + + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + return ixgbe_media_type_fiber; + + case IXGBE_DEV_ID_82599_CX4: + return ixgbe_media_type_cx4; + + case IXGBE_DEV_ID_82599_T3_LOM: + return ixgbe_media_type_copper; + + case IXGBE_DEV_ID_82599_LS: + return ixgbe_media_type_fiber_lco; + + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + return ixgbe_media_type_fiber_qsfp; + + default: + return ixgbe_media_type_unknown; + } +} + +/** + * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link, should be called during D3 power down sequence. + * + **/ +static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) +{ + u32 autoc2_reg; + u16 ee_ctrl_2 = 0; + + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + + if (!ixgbe_mng_present(hw) && !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + } +} + +/** + * ixgbe_start_mac_link_82599 - Setup MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = 0; + bool got_lock = false; + + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status) + return status; + + got_lock = true; + } + + /* Restart link */ + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + + return status; +} + +/** + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + /* Disable tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + udelay(100); +} + +/** + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Enable tx laser; allow 100ms to light up */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + msleep(100); +} + +/** + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + if (hw->mac.autotry_restart) { + ixgbe_disable_tx_laser_multispeed_fiber(hw); + ixgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = false; + } +} + +/** + * ixgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via RS0/RS1 rate select pins. + */ +static void +ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + break; + case IXGBE_LINK_SPEED_1GB_FULL: + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + break; + default: + hw_dbg(hw, "Invalid fixed module speed\n"); + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Implements the Intel SmartSpeed algorithm. + **/ +static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 i, j; + bool link_up = false; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* + * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the + * autoneg advertisement if link is unable to be established at the + * highest negotiated rate. This can sometimes happen due to integrity + * issues with the physical media connection. + */ + + /* First, try to get link with full advertisement */ + hw->phy.smart_speed_active = false; + for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + if (status != 0) + goto out; + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per + * Table 9 in the AN MAS. + */ + for (i = 0; i < 5; i++) { + mdelay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + goto out; + + if (link_up) + goto out; + } + } + + /* + * We didn't get link. If we advertised KR plus one of KX4/KX + * (or BX4/BX), then disable KR and try again. + */ + if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || + ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) + goto out; + + /* Turn SmartSpeed on to disable KR support */ + hw->phy.smart_speed_active = true; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + if (status != 0) + goto out; + + /* + * Wait for the controller to acquire link. 600ms will allow for + * the AN link_fail_inhibit_timer as well for multiple cycles of + * parallel detect, both 10g and 1g. This allows for the maximum + * connect attempts as defined in the AN MAS table 73-7. + */ + for (i = 0; i < 6; i++) { + mdelay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + goto out; + + if (link_up) + goto out; + } + + /* We didn't get link. Turn SmartSpeed back off. */ + hw->phy.smart_speed_active = false; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + +out: + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) + hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n"); + return status; +} + +/** + * ixgbe_setup_mac_link_82599 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status; + u32 pma_pmd_1g, link_mode, links_reg, i; + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + + /* holds the value of AUTOC register at this current point in time */ + u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + /* holds the cached value of AUTOC register */ + u32 orig_autoc = 0; + /* temporary variable used for comparison purposes */ + u32 autoc = current_autoc; + + /* Check to see if speed passed in is supported. */ + status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, + &autoneg); + if (status) + return status; + + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) + return IXGBE_ERR_LINK_SETUP; + + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ + if (hw->mac.orig_link_settings_stored) + orig_autoc = hw->mac.orig_autoc; + else + orig_autoc = autoc; + + link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + /* Set KX4/KX/KR support according to speed requested */ + autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && + (hw->phy.smart_speed_active == false)) + autoc |= IXGBE_AUTOC_KR_SUPP; + } + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || + link_mode == IXGBE_AUTOC_LMS_1G_AN)) { + /* Switch from 1G SFI to 10G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && + (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; + } + } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { + /* Switch from 10G SFI to 1G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && + (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + if (autoneg) + autoc |= IXGBE_AUTOC_LMS_1G_AN; + else + autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; + } + } + + if (autoc != current_autoc) { + /* Restart link */ + status = hw->mac.ops.prot_autoc_write(hw, autoc, false); + if (status) + return status; + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /*Just in case Autoneg time=0*/ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = + IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = + IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + } + + return status; +} + +/** + * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82599 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl, i, autoc, autoc2; + u32 curr_lms; + bool link_up = false; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* Reset PHY */ + if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) + hw->phy.ops.reset(hw); + + /* remember AUTOC from before we reset */ + curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; + +mac_reset_top: + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + udelay(1); + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + + /* Enable link if disabled in NVM */ + if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { + autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + IXGBE_WRITE_FLUSH(hw); + } + + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_autoc2 = autoc2; + hw->mac.orig_link_settings_stored = true; + } else { + + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state either. + */ + if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || + hw->wol_enabled) + hw->mac.orig_autoc = + (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | + curr_lms; + + if (autoc != hw->mac.orig_autoc) { + status = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc, + false); + if (status) + return status; + } + + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; + autoc2 |= (hw->mac.orig_autoc2 & + IXGBE_AUTOC2_UPPER_MASK); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + } + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (is_valid_ether_addr(hw->mac.san_addr)) { + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + return status; +} + +/** + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + return 0; + udelay(10); + } + + return IXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +{ + int i; + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + u32 fdircmd; + s32 err; + + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n"); + return err; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); + IXGBE_WRITE_FLUSH(hw); + /* + * 82599 adapters flow director init flow cannot be restarted, + * Workaround 82599 silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + usleep_range(1000, 2000); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + + return 0; +} + +/** + * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + int i; + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + usleep_range(1000, 2000); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + hw_dbg(hw, "Flow Director poll time exceeded!\n"); +} + +/** + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return 0; +} + +/** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | + (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return 0; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define IXGBE_ATR_COMMON_HASH_KEY \ + (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ + common_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains + * several optomizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input.dword); + + /* generate common hash dword */ + hi_hash_dword = ntohl(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(1); + IXGBE_COMPUTE_SIG_HASH_ITERATION(2); + IXGBE_COMPUTE_SIG_HASH_ITERATION(3); + IXGBE_COMPUTE_SIG_HASH_ITERATION(4); + IXGBE_COMPUTE_SIG_HASH_ITERATION(5); + IXGBE_COMPUTE_SIG_HASH_ITERATION(6); + IXGBE_COMPUTE_SIG_HASH_ITERATION(7); + IXGBE_COMPUTE_SIG_HASH_ITERATION(8); + IXGBE_COMPUTE_SIG_HASH_ITERATION(9); + IXGBE_COMPUTE_SIG_HASH_ITERATION(10); + IXGBE_COMPUTE_SIG_HASH_ITERATION(11); + IXGBE_COMPUTE_SIG_HASH_ITERATION(12); + IXGBE_COMPUTE_SIG_HASH_ITERATION(13); + IXGBE_COMPUTE_SIG_HASH_ITERATION(14); + IXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= IXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= IXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + * + * Note that the tunnel bit in input must not be set when the hardware + * tunneling support does not exist. + **/ +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) +{ + u64 fdirhashcmd; + u8 flow_type; + bool tunnel; + u32 fdircmd; + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + */ + tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); + flow_type = input.formatted.flow_type & + (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); + switch (flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TCPV6: + case IXGBE_ATR_FLOW_TYPE_UDPV6: + case IXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + hw_dbg(hw, " Error on flow type input\n"); + return IXGBE_ERR_CONFIG; + } + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + if (tunnel) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhashcmd = (u64)fdircmd << 32; + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + + hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + return 0; +} + +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask) +{ + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0, hi_dword = 0; + int i; + + /* Apply masks to input data */ + for (i = 0; i <= 10; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = ntohl(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + IXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +{ + u32 mask = ntohs(input_mask->formatted.dst_port); + + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; + mask |= ntohs(input_mask->formatted.src_port); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define IXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define IXGBE_WRITE_REG_BE32(a, reg, value) \ + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) + +#define IXGBE_STORE_AS_BE16(_value) \ + ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + hw_dbg(hw, " bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= IXGBE_FDIRM_POOL; + case 0x7F: + break; + default: + hw_dbg(hw, " Error on vm pool mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + hw_dbg(hw, " Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_L4TYPE_MASK: + break; + default: + hw_dbg(hw, " Error on flow type mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: + /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; + /* fall through */ + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: + /* mask VLAN ID only */ + fdirm |= IXGBE_FDIRM_VLANID; + /* fall through */ + case 0xEFFF: + /* no VLAN fields masked */ + break; + default: + hw_dbg(hw, " Error on VLAN mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes */ + fdirm |= IXGBE_FDIRM_FLEX; + /* fall through */ + case 0xFFFF: + break; + default: + hw_dbg(hw, " Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + + /* also use it for SCTP */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + break; + default: + break; + } + + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + + return 0; +} + +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + s32 err; + + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = ntohs(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= ntohs(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= ntohs(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + if (queue == IXGBE_FDIR_DROP_QUEUE) + fdircmd |= IXGBE_FDIRCMD_DROP; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director command did not complete!\n"); + return err; + } + + return 0; +} + +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd; + s32 err; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director command did not complete!\n"); + return err; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + + return 0; +} + +/** + * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Omer analog register specified. + **/ +static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 core_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | + (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); + *val = (u8)core_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Omer analog register specified. + **/ +static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 core_ctl; + + core_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + +/** + * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val) + return ret_val; + + ret_val = ixgbe_start_hw_gen2(hw); + if (ret_val) + return ret_val; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + return ixgbe_verify_fw_version_82599(hw); +} + +/** + * ixgbe_identify_phy_82599 - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +{ + s32 status; + + /* Detect PHY if not unknown - returns success if already detected. */ + status = ixgbe_identify_phy_generic(hw); + if (status) { + /* 82599 10GBASE-T requires an external PHY */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) + return status; + status = ixgbe_identify_module_generic(hw); + } + + /* Set PHY type none if no PHY detected */ + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.type = ixgbe_phy_none; + status = 0; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == ixgbe_phy_sfp_unsupported) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + return status; +} + +/** + * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for 82599 + **/ +static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +{ + /* + * Workaround for 82599 silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + hw->mac.ops.disable_rx_buff(hw); + + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); + + hw->mac.ops.enable_rx_buff(hw); + + return 0; +} + +/** + * ixgbe_verify_fw_version_82599 - verify fw version for 82599 + * @hw: pointer to hardware structure + * + * Verifies that installed the firmware version is 0.6 or higher + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. + **/ +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM_VERSION; + u16 fw_offset, fw_ptp_cfg_offset; + u16 offset; + u16 fw_version = 0; + + /* firmware check is only necessary for SFI devices */ + if (hw->phy.media_type != ixgbe_media_type_fiber) + return 0; + + /* get the offset to the Firmware Module block */ + offset = IXGBE_FW_PTR; + if (hw->eeprom.ops.read(hw, offset, &fw_offset)) + goto fw_version_err; + + if (fw_offset == 0 || fw_offset == 0xFFFF) + return IXGBE_ERR_EEPROM_VERSION; + + /* get the offset to the Pass Through Patch Configuration block */ + offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; + if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) + goto fw_version_err; + + if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF) + return IXGBE_ERR_EEPROM_VERSION; + + /* get the firmware version */ + offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; + if (hw->eeprom.ops.read(hw, offset, &fw_version)) + goto fw_version_err; + + if (fw_version > 0x5) + status = 0; + + return status; + +fw_version_err: + hw_err(hw, "eeprom read at offset %d failed\n", offset); + return IXGBE_ERR_EEPROM_VERSION; +} + +/** + * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. + * @hw: pointer to hardware structure + * + * Returns true if the LESM FW module is present and enabled. Otherwise + * returns false. Smart Speed must be disabled if LESM FW module is enabled. + **/ +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +{ + u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; + s32 status; + + /* get the offset to the Firmware Module block */ + status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if (status || fw_offset == 0 || fw_offset == 0xFFFF) + return false; + + /* get the offset to the LESM Parameters block */ + status = hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_LESM_PARAMETERS_PTR), + &fw_lesm_param_offset); + + if (status || + fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF) + return false; + + /* get the lesm state word */ + status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + + IXGBE_FW_LESM_STATE_1), + &fw_lesm_state); + + if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + return true; + + return false; +} + +/** + * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Retrieves 16 bit word(s) read from EEPROM + **/ +static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + + /* If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if (eeprom->type == ixgbe_eeprom_spi && + offset + (words - 1) <= IXGBE_EERD_MAX_ADDR) + return ixgbe_read_eerd_buffer_generic(hw, offset, words, data); + + return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words, + data); +} + +/** + * ixgbe_read_eeprom_82599 - Read EEPROM word using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM + **/ +static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR) + return ixgbe_read_eerd_generic(hw, offset, data); + + return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); +} + +/** + * ixgbe_reset_pipeline_82599 - perform pipeline reset + * + * @hw: pointer to hardware structure + * + * Reset pipeline by asserting Restart_AN together with LMS change to ensure + * full pipeline reset. Note - We must hold the SW/FW semaphore before writing + * to AUTOC, so this function assumes the semaphore is held. + **/ +static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +{ + s32 ret_val; + u32 anlp1_reg = 0; + u32 i, autoc_reg, autoc2_reg; + + /* Enable link if disabled in NVM */ + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { + autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + IXGBE_WRITE_FLUSH(hw); + } + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); + + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + usleep_range(4000, 8000); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + + if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { + hw_dbg(hw, "auto negotiation not completed\n"); + ret_val = IXGBE_ERR_RESET_FAILED; + goto reset_pipeline_out; + } + + ret_val = 0; + +reset_pipeline_out: + /* Write AUTOC register with original LMS field and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +/** + * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +static const struct ixgbe_mac_operations mac_ops_82599 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_82599, + .start_hw = &ixgbe_start_hw_82599, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_82599, + .enable_rx_dma = &ixgbe_enable_rx_dma_82599, + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_device_caps = &ixgbe_get_device_caps_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, + .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, + .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, + .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599, + .setup_link = &ixgbe_setup_mac_link_82599, + .set_rxpba = &ixgbe_set_rxpba_generic, + .check_link = &ixgbe_check_mac_link_generic, + .get_link_capabilities = &ixgbe_get_link_capabilities_82599, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_generic, + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, + .clear_vmdq = &ixgbe_clear_vmdq_generic, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_generic, + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, + .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = &ixgbe_setup_sfp_modules_82599, + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, + .init_swfw_sync = NULL, + .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, + .prot_autoc_read = &prot_autoc_read_82599, + .prot_autoc_write = &prot_autoc_write_82599, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, +}; + +static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { + .init_params = &ixgbe_init_eeprom_params_generic, + .read = &ixgbe_read_eeprom_82599, + .read_buffer = &ixgbe_read_eeprom_buffer_82599, + .write = &ixgbe_write_eeprom_generic, + .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_generic, +}; + +static const struct ixgbe_phy_operations phy_ops_82599 = { + .identify = &ixgbe_identify_phy_82599, + .identify_sfp = &ixgbe_identify_module_generic, + .init = &ixgbe_init_phy_ops_82599, + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, +}; + +const struct ixgbe_info ixgbe_82599_info = { + .mac = ixgbe_mac_82599EB, + .get_invariants = &ixgbe_get_invariants_82599, + .mac_ops = &mac_ops_82599, + .eeprom_ops = &eeprom_ops_82599, + .phy_ops = &phy_ops_82599, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_8259X, +}; diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c new file mode 100644 index 000000000000..fd055cc93cc6 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c @@ -0,0 +1,4432 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count); +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_release_eeprom(struct ixgbe_hw *hw); + +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset); +static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +/* Base table for registers values that change by MAC */ +const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(8259X) +}; + +/** + * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + bool supported = false; + ixgbe_link_speed speed; + bool link_up; + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + /* flow control autoneg black list */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + supported = false; + break; + default: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + } + + break; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) + supported = false; + else + supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + supported = true; + break; + default: + break; + } + default: + break; + } + + if (!supported) + hw_dbg(hw, "Device %x does not support flow control autoneg\n", + hw->device_id); + + return supported; +} + +/** + * ixgbe_setup_fc_generic - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; + bool locked = false; + + /* + * Validate the requested mode. Strict IEEE mode does not allow + * ixgbe_fc_rx_pause because it will cause us to fail at UNH. + */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + switch (hw->phy.media_type) { + case ixgbe_media_type_backplane: + /* some MAC's need RMW protection on AUTOC */ + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); + if (ret_val) + return ret_val; + + /* fall through - only backplane uses autoc */ + case ixgbe_media_type_fiber: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + + break; + case ixgbe_media_type_copper: + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, ®_cu); + break; + default: + break; + } + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= IXGBE_PCS1GANA_ASM_PAUSE; + reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_ASM_PAUSE; + reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; + } else if (hw->phy.media_type == ixgbe_media_type_copper) { + reg_cu |= IXGBE_TAF_ASM_PAUSE; + reg_cu &= ~IXGBE_TAF_SYM_PAUSE; + } + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE; + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + if (hw->mac.type != ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. There is no need to set the PCS1GCTL register. + * + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { + /* Need the SW/FW semaphore around AUTOC writes if 82599 and + * LESM is on, likewise reset_pipeline requries the lock as + * it also writes AUTOC. + */ + ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); + if (ret_val) + return ret_val; + + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && + ixgbe_device_supports_autoneg_fc(hw)) { + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, reg_cu); + } + + hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); + return ret_val; +} + +/** + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) +{ + s32 ret_val; + u32 ctrl_ext; + u16 device_caps; +#if 1 //by hilbert + s32 rc; + u16 regVal=0; +#endif + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* Identify the PHY */ + hw->phy.ops.identify(hw); + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + /* Set No Snoop Disable */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + /* Setup flow control if method for doing so */ + if (hw->mac.ops.setup_fc) { + ret_val = hw->mac.ops.setup_fc(hw); + if (ret_val) + return ret_val; + } + + /* Cashe bit indicating need for crosstalk fix */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) + hw->need_crosstalk_fix = false; + else + hw->need_crosstalk_fix = true; + break; + default: + hw->need_crosstalk_fix = false; + break; + } + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + +#if 1 /* To modify speed LED polarity and configure led on only for speed 1G in M88E1512 + * for Porsche2 platform. By hilbert + * From 88E1512 datasheet: + * Page register: 0x16 + * LED functon control register: 0x10 in page 3 + * LED polarity control register: 0x11 in page 3 + */ + + if (hw->mac.type == ixgbe_mac_x550em_a && + (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { + /* For M88E1512, to select page 3 in register 0x16 */ + regVal = 0x03; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "page register write failed, rc:%x\n", rc); + } +#if 0 //for debug + /* For M88E1512, read from register 0x16 */ + regVal = 0x00; + rc = hw->phy.ops.read_reg(hw, 0x16, MDIO_MMD_PMAPMD, ®Val); + if (rc) { + hw_err(hw, "phy register read failed, rc:%x\n", rc); + } + hw_err(hw, "####read phy register 0x16 again, value:%x\n", regVal); +#endif + /* For M88E1512, read from page 3, register 0x11 */ + regVal = 0x00; + rc = hw->phy.ops.read_reg(hw, 0x11, MDIO_MMD_PMAPMD, ®Val); + if (rc) { + hw_err(hw, "led polarity register read failed, rc:%x\n", rc); + } + + /* For M88E1512, write to page 3 register 0x11 with polarity bit set */ + regVal |= 0x01; + rc = hw->phy.ops.write_reg(hw, 0x11, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "led polarity register write failed, rc:%x\n", rc); + } + + /* For M88E1512, read from page 3, register 16 */ + regVal = 0x00; + rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); + if (rc) { + hw_err(hw, "led function control register read failed, rc:%x\n", rc); + } + + /* For M88E1512, write to page 3 register 16 with only 1000M led on */ + regVal = (regVal & 0xFFF0) | 0x0007; + rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "led function control register write failed, rc:%x\n", rc); + } + + /* For M88E1512, write page 22 back to default 0 */ + regVal = 0x00; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "page register write failed, rc:%x\n", rc); + } + } +#endif + return 0; +} + +/** + * ixgbe_start_hw_gen2 - Init sequence for common device family + * @hw: pointer to hw structure + * + * Performs the init sequence common to the second generation + * of 10 GbE devices. + * Devices in the second generation: + * 82599 + * X540 + **/ +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +{ + u32 i; + + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); + } + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_init_hw_generic - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) +{ + s32 status; + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == 0) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } + + /* Initialize the LED link active for LED blink support */ + if (hw->mac.ops.init_led_link_act) + hw->mac.ops.init_led_link_act(hw); + + return status; +} + +/** + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) +{ + u16 i = 0; + + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); + IXGBE_READ_REG(hw, IXGBE_MSPDC); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + IXGBE_READ_REG(hw, IXGBE_MLFC); + IXGBE_READ_REG(hw, IXGBE_MRFC); + IXGBE_READ_REG(hw, IXGBE_RLEC); + IXGBE_READ_REG(hw, IXGBE_LXONTXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + } + + for (i = 0; i < 8; i++) { + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } + } + if (hw->mac.type >= ixgbe_mac_82599EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PRC64); + IXGBE_READ_REG(hw, IXGBE_PRC127); + IXGBE_READ_REG(hw, IXGBE_PRC255); + IXGBE_READ_REG(hw, IXGBE_PRC511); + IXGBE_READ_REG(hw, IXGBE_PRC1023); + IXGBE_READ_REG(hw, IXGBE_PRC1522); + IXGBE_READ_REG(hw, IXGBE_GPRC); + IXGBE_READ_REG(hw, IXGBE_BPRC); + IXGBE_READ_REG(hw, IXGBE_MPRC); + IXGBE_READ_REG(hw, IXGBE_GPTC); + IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); + IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); + if (hw->mac.type == ixgbe_mac_82598EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + IXGBE_READ_REG(hw, IXGBE_RUC); + IXGBE_READ_REG(hw, IXGBE_RFC); + IXGBE_READ_REG(hw, IXGBE_ROC); + IXGBE_READ_REG(hw, IXGBE_RJC); + IXGBE_READ_REG(hw, IXGBE_MNGPRC); + IXGBE_READ_REG(hw, IXGBE_MNGPDC); + IXGBE_READ_REG(hw, IXGBE_MNGPTC); + IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); + IXGBE_READ_REG(hw, IXGBE_TPR); + IXGBE_READ_REG(hw, IXGBE_TPT); + IXGBE_READ_REG(hw, IXGBE_PTC64); + IXGBE_READ_REG(hw, IXGBE_PTC127); + IXGBE_READ_REG(hw, IXGBE_PTC255); + IXGBE_READ_REG(hw, IXGBE_PTC511); + IXGBE_READ_REG(hw, IXGBE_PTC1023); + IXGBE_READ_REG(hw, IXGBE_PTC1522); + IXGBE_READ_REG(hw, IXGBE_MPTC); + IXGBE_READ_REG(hw, IXGBE_BPTC); + for (i = 0; i < 16; i++) { + IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + } + + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { + if (hw->phy.id == 0) + hw->phy.ops.identify(hw); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); + } + + return 0; +} + +/** + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + hw_dbg(hw, "PBA string buffer was null\n"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + /* + * if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != IXGBE_PBANUM_PTR_GUARD) { + hw_dbg(hw, "NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + hw_dbg(hw, "PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg(hw, "NVM PBA number section invalid length\n"); + return IXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + hw_dbg(hw, "PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * ixgbe_get_mac_addr_generic - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + + for (i = 0; i < 4; i++) + mac_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + + return 0; +} + +enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) +{ + switch (link_status & IXGBE_PCI_LINK_WIDTH) { + case IXGBE_PCI_LINK_WIDTH_1: + return ixgbe_bus_width_pcie_x1; + case IXGBE_PCI_LINK_WIDTH_2: + return ixgbe_bus_width_pcie_x2; + case IXGBE_PCI_LINK_WIDTH_4: + return ixgbe_bus_width_pcie_x4; + case IXGBE_PCI_LINK_WIDTH_8: + return ixgbe_bus_width_pcie_x8; + default: + return ixgbe_bus_width_unknown; + } +} + +enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) +{ + switch (link_status & IXGBE_PCI_LINK_SPEED) { + case IXGBE_PCI_LINK_SPEED_2500: + return ixgbe_bus_speed_2500; + case IXGBE_PCI_LINK_SPEED_5000: + return ixgbe_bus_speed_5000; + case IXGBE_PCI_LINK_SPEED_8000: + return ixgbe_bus_speed_8000; + default: + return ixgbe_bus_speed_unknown; + } +} + +/** + * ixgbe_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) +{ + u16 link_status; + + hw->bus.type = ixgbe_bus_type_pci_express; + + /* Get the negotiated link width and speed from PCI config space */ + link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); + + hw->bus.width = ixgbe_convert_bus_width(link_status); + hw->bus.speed = ixgbe_convert_bus_speed(link_status); + + hw->mac.ops.set_lan_id(hw); + + return 0; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u16 ee_ctrl_4; + u32 reg; + + reg = IXGBE_READ_REG(hw, IXGBE_STATUS); + bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; + bus->lan_id = bus->func; + + /* check for a port swap */ + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); + if (reg & IXGBE_FACTPS_LFS) + bus->func ^= 0x1; + + /* Get MAC instance from EEPROM for configuring CS4227 */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); + bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> + IXGBE_EE_CTRL_4_INST_ID_SHIFT; + } +} + +/** + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + hw->mac.ops.disable_rx(hw); + + /* Clear interrupt mask to stop interrupts from being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_READ_REG(hw, IXGBE_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + reg_val |= IXGBE_RXDCTL_SWFLSH; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); + } + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 2000); + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return ixgbe_disable_pcie_master(hw); +} + +/** + * ixgbe_init_led_link_act_generic - Store the LED index link/activity. + * @hw: pointer to hardware structure + * + * Store the index for the link active LED. This will be used to support + * blinking the LED. + **/ +s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 led_reg, led_mode; + u16 i; + + led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* Get LED link active from the LEDCTL register */ + for (i = 0; i < 4; i++) { + led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); + + if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == + IXGBE_LED_LINK_ACTIVE) { + mac->led_link_act = i; + return 0; + } + } + + /* If LEDCTL register does not have the LED link active set, then use + * known MAC defaults. + */ + switch (hw->mac.type) { + case ixgbe_mac_x550em_a: + mac->led_link_act = 0; + break; + case ixgbe_mac_X550EM_x: + mac->led_link_act = 1; + break; + default: + mac->led_link_act = 2; + } + + return 0; +} + +/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 rc; + u16 regVal; + + /* following led behavior was modified by hilbert, + * to force led on through C22 MDI command. + */ + if (hw->mac.type == ixgbe_mac_x550em_a) { + /* For M88E1512, to select page 3 in register 22 */ + regVal = 0x03; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "page register write failed, rc:%x\n", rc); + } + + /* For M88E1512, read from page 3, register 16 */ + regVal = 0x00; + rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); + if (rc) { + hw_err(hw, "led function control register read failed, rc:%x\n", rc); + } + + /* For M88E1512, write to page 3 register 16 with force led on */ + regVal = (regVal & 0xFF00) | 0x0099; + rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "led function control register write failed, rc:%x\n", rc); + } + + /* For M88E1512, write page 22 back to default 0 */ + regVal = 0x00; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "page register write failed, rc:%x\n", rc); + } + } else { + if (index > 3) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + } + + return 0; +} + +/** + * ixgbe_led_off_generic - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 rc; + u16 regVal; + + /* following led behavior was modified by hilbert, + * to force led on through C22 MDI command. + */ + if (hw->mac.type == ixgbe_mac_x550em_a) { + /* For M88E1512, to select page 3 in register 22 */ + regVal = 0x03; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "page register write failed, rc:%x\n", rc); + } + + /* For M88E1512, read from page 3, register 16 */ + regVal = 0x00; + rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); + if (rc) { + hw_err(hw, "led function control register read failed, rc:%x\n", rc); + } + + /* For M88E1512, write to page 3 register 16 with force led on */ + regVal = (regVal & 0xFF00) | 0x0088; + rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "led function control register write failed, rc:%x\n", rc); + } + + /* For M88E1512, write page 22 back to default 0 */ + regVal = 0x00; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "page register write failed, rc:%x\n", rc); + } + } else { + if (index > 3) + return IXGBE_ERR_PARAM; + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + } + + return 0; +} + +/** + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + /* Set default semaphore delay to 10ms which is a well + * tested value */ + eeprom->semaphore_delay = 10; + /* Clear EEPROM page size, it will be initialized as needed */ + eeprom->word_page_size = 0; + + /* + * Check for EEPROM present first. + * If not present leave as none + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + + /* + * SPI EEPROM is assumed here. This code would need to + * change if a future EEPROM is not SPI. + */ + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; + hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", + eeprom->type, eeprom->word_size, eeprom->address_bits); + } + + return 0; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to write + * @words: number of words + * @data: 16 bit word(s) to write to EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 i, count; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) + return IXGBE_ERR_INVALID_ARGUMENT; + + if (offset + words > hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + /* + * The EEPROM page size cannot be queried from the chip. We do lazy + * initialization. It is worth to do that when we write large buffer. + */ + if ((hw->eeprom.word_page_size == 0) && + (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) + ixgbe_detect_eeprom_page_size_generic(hw, offset); + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != 0) + break; + } + + return status; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @words: number of word(s) + * @data: 16 bit word(s) to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word; + u16 page_size; + u16 i; + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + + /* Prepare the EEPROM for writing */ + status = ixgbe_acquire_eeprom(hw); + if (status) + return status; + + if (ixgbe_ready_eeprom(hw) != 0) { + ixgbe_release_eeprom(hw); + return IXGBE_ERR_EEPROM; + } + + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + ixgbe_shift_out_eeprom_bits(hw, + IXGBE_EEPROM_WREN_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + + ixgbe_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, write_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + page_size = hw->eeprom.word_page_size; + + /* Send the data in burst via SPI */ + do { + word = data[i]; + word = (word >> 8) | (word << 8); + ixgbe_shift_out_eeprom_bits(hw, word, 16); + + if (page_size == 0) + break; + + /* do not wrap around page */ + if (((offset + i) & (page_size - 1)) == + (page_size - 1)) + break; + } while (++i < words); + + ixgbe_standby_eeprom(hw); + usleep_range(10000, 20000); + } + /* Done with writing - release the EEPROM */ + ixgbe_release_eeprom(hw); + + return 0; +} + +/** + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit words(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 i, count; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) + return IXGBE_ERR_INVALID_ARGUMENT; + + if (offset + words > hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status) + return status; + } + + return 0; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit word(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word_in; + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + u16 i; + + /* Prepare the EEPROM for reading */ + status = ixgbe_acquire_eeprom(hw); + if (status) + return status; + + if (ixgbe_ready_eeprom(hw) != 0) { + ixgbe_release_eeprom(hw); + return IXGBE_ERR_EEPROM; + } + + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + /* End this read operation */ + ixgbe_release_eeprom(hw); + + return 0; +} + +/** + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); +} + +/** + * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of word(s) + * @data: 16 bit word(s) from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eerd; + s32 status; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) + return IXGBE_ERR_INVALID_ARGUMENT; + + if (offset >= hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + IXGBE_EEPROM_RW_REG_START; + + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); + + if (status == 0) { + data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_RW_REG_DATA); + } else { + hw_dbg(hw, "Eeprom read timed out\n"); + return status; + } + } + + return 0; +} + +/** + * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be used as a scratch pad + * + * Discover EEPROM page size by writing marching data at given offset. + * This function is called only when we are writing a new large buffer + * at given offset so the data would be overwritten anyway. + **/ +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset) +{ + u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; + s32 status; + u16 i; + + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) + data[i] = i; + + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, + IXGBE_EEPROM_PAGE_SIZE_MAX, data); + hw->eeprom.word_page_size = 0; + if (status) + return status; + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + if (status) + return status; + + /* + * When writing in burst more than the actual page size + * EEPROM address wraps around current page. + */ + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; + + hw_dbg(hw, "Detected EEPROM page size = %d words.\n", + hw->eeprom.word_page_size); + return 0; +} + +/** + * ixgbe_read_eerd_generic - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); +} + +/** + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eewr; + s32 status; + u16 i; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) + return IXGBE_ERR_INVALID_ARGUMENT; + + if (offset >= hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + (data[i] << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status) { + hw_dbg(hw, "Eeprom write EEWR timed out\n"); + return status; + } + + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status) { + hw_dbg(hw, "Eeprom write EEWR timed out\n"); + return status; + } + } + + return 0; +} + +/** + * ixgbe_write_eewr_generic - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); +} + +/** + * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status + * @hw: pointer to hardware structure + * @ee_reg: EEPROM flag for polling + * + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the + * read or write is done respectively. + **/ +static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +{ + u32 i; + u32 reg; + + for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { + if (ee_reg == IXGBE_NVM_POLL_READ) + reg = IXGBE_READ_REG(hw, IXGBE_EERD); + else + reg = IXGBE_READ_REG(hw, IXGBE_EEWR); + + if (reg & IXGBE_EEPROM_RW_REG_DONE) { + return 0; + } + udelay(5); + } + return IXGBE_ERR_EEPROM; +} + +/** + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + * @hw: pointer to hardware structure + * + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. + **/ +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + u32 i; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) + return IXGBE_ERR_SWFW_SYNC; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + if (eec & IXGBE_EEC_GNT) + break; + udelay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + hw_dbg(hw, "Could not acquire EEPROM grant\n"); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + return 0; +} + +/** + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) + break; + usleep_range(50, 100); + } + + if (i == timeout) { + hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); + /* this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ixgbe_release_eeprom_semaphore(hw); + + usleep_range(50, 100); + /* one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + if (swsm & IXGBE_SWSM_SMBI) { + hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); + return IXGBE_ERR_EEPROM; + } + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + + /* If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + usleep_range(50, 100); + } + + /* Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + return IXGBE_ERR_EEPROM; + } + + return 0; +} + +/** + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +{ + u16 i; + u8 spi_stat_reg; + + /* + * Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + ixgbe_standby_eeprom(hw); + } + + /* + * On some parts, SPI write time could vary from 0-20mSec on 3.3V + * devices (and only 0-5mSec on 5V devices) + */ + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { + hw_dbg(hw, "SPI EEPROM Status error\n"); + return IXGBE_ERR_EEPROM; + } + + return 0; +} + +/** + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure + **/ +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + + /* Toggle CS to flush commands */ + eec |= IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + eec &= ~IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + * @hw: pointer to hardware structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + **/ +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count) +{ + u32 eec; + u32 mask; + u32 i; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + + /* + * Mask is used to shift "count" bits of "data" out to the EEPROM + * one bit at a time. Determine the starting bit based on count + */ + mask = BIT(count - 1); + + for (i = 0; i < count; i++) { + /* + * A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK + * bit controls the clock input to the EEPROM). A "0" is + * shifted out to the EEPROM by setting "DI" to "0" and then + * raising and then lowering the clock. + */ + if (data & mask) + eec |= IXGBE_EEC_DI; + else + eec &= ~IXGBE_EEC_DI; + + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + + udelay(1); + + ixgbe_raise_eeprom_clk(hw, &eec); + ixgbe_lower_eeprom_clk(hw, &eec); + + /* + * Shift mask to signify next bit of data to shift in to the + * EEPROM + */ + mask = mask >> 1; + } + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eec &= ~IXGBE_EEC_DI; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +{ + u32 eec; + u32 i; + u16 data = 0; + + /* + * In order to read a register from the EEPROM, we need to shift + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising + * the clock input to the EEPROM (setting the SK bit), and then reading + * the value of the "DO" bit. During this "shifting in" process the + * "DI" bit should always be clear. + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + + for (i = 0; i < count; i++) { + data = data << 1; + ixgbe_raise_eeprom_clk(hw, &eec); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + + eec &= ~(IXGBE_EEC_DI); + if (eec & IXGBE_EEC_DO) + data |= 1; + + ixgbe_lower_eeprom_clk(hw, &eec); + } + + return data; +} + +/** + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC register's current value + **/ +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + /* + * Raise the clock input to the EEPROM + * (setting the SK bit), then delay + */ + *eec = *eec | IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eecd: EECD's current value + **/ +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + /* + * Lower the clock input to the EEPROM (clearing the SK bit), then + * delay + */ + *eec = *eec & ~IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure + **/ +static void ixgbe_release_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + + eec |= IXGBE_EEC_CS; /* Pull CS high */ + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + + udelay(1); + + /* Stop requesting EEPROM access */ + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + /* + * Delay before attempt to obtain semaphore again to allow FW + * access. semaphore_delay is in ms we need us for usleep_range + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); +} + +/** + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (hw->eeprom.ops.read(hw, i, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + if (hw->eeprom.ops.read(hw, i, &pointer)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* If the pointer seems invalid */ + if (pointer == 0xFFFF || pointer == 0) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + if (length == 0xFFFF || length == 0) + continue; + + for (j = pointer + 1; j <= pointer + length; j++) { + if (hw->eeprom.ops.read(hw, j, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); + + return status; +} + +/** + * ixgbe_set_rar_generic - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* setup VMDq pool selection before this RAR gets enabled */ + hw->mac.ops.set_vmdq(hw, index, vmdq); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + return 0; +} + +/** + * ixgbe_clear_rar_generic - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); + + return 0; +} + +/** + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (!is_valid_ether_addr(hw->mac.addr)) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); + } else { + /* Setup the receive address. */ + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); + hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + } + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + hw_dbg(hw, " Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + if (hw->mac.ops.init_uta_tables) + hw->mac.ops.init_uta_tables(hw); + + return 0; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); + hw_dbg(hw, " bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); +} + +/** + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u32 i; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + hw_dbg(hw, " Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* Update mta shadow */ + netdev_for_each_mc_addr(ha, netdev) { + hw_dbg(hw, " Adding the multicast addresses:\n"); + ixgbe_set_mta(hw, ha->addr); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + + hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); + return 0; +} + +/** + * ixgbe_enable_mc_generic - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | + hw->mac.mc_filter_type); + + return 0; +} + +/** + * ixgbe_disable_mc_generic - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + return 0; +} + +/** + * ixgbe_fc_enable_generic - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) +{ + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + int i; + + /* Validate the water mark configuration. */ + if (!hw->fc.pause_time) + return IXGBE_ERR_INVALID_LINK_SETTINGS; + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + hw_dbg(hw, "Invalid water mark configuration\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + } + } + + /* Negotiate the fc mode to use */ + hw->mac.ops.fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + /* Set 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return 0; +} + +/** + * ixgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + hw_dbg(hw, "Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + hw_dbg(hw, "Flow Control = NONE.\n"); + } + return 0; +} + +/** + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + s32 ret_val; + + /* + * On multispeed fiber at 1g, bail out if + * - link is up but AN did not complete, or if + * - link is up and AN completed but timed out + */ + + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); + + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +{ + u32 links2, anlp1_reg, autoc_reg, links; + s32 ret_val; + + /* + * On backplane, bail out if + * - backplane autoneg was not completed, or if + * - we are 82599 and link partner is not AN enabled + */ + links = IXGBE_READ_REG(hw, IXGBE_LINKS); + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + if (hw->mac.type == ixgbe_mac_82599EB) { + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + } + /* + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + + ret_val = ixgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, + IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); + + return ret_val; +} + +/** + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +{ + u16 technology_ability_reg = 0; + u16 lp_technology_ability_reg = 0; + + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &technology_ability_reg); + hw->phy.ops.read_reg(hw, MDIO_AN_LPA, + MDIO_MMD_AN, + &lp_technology_ability_reg); + + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); +} + +/** + * ixgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void ixgbe_fc_autoneg(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + * + * Since we're being called from an LSC, link is already known to be up. + * So use link_up_wait_to_complete=false. + */ + if (hw->fc.disable_fc_autoneg) + goto out; + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) + goto out; + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case ixgbe_media_type_backplane: + ret_val = ixgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case ixgbe_media_type_copper: + if (ixgbe_device_supports_autoneg_fc(hw)) + ret_val = ixgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == 0) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ixgbe_pcie_timeout_poll - Return number of times to poll for completion + * @hw: pointer to hardware structure + * + * System-wide timeout range is encoded in PCIe Device Control2 register. + * + * Add 10% to specified maximum and return the number of times to poll for + * completion timeout, in units of 100 microsec. Never return less than + * 800 = 80 millisec. + **/ +static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) +{ + s16 devctl2; + u32 pollcnt; + + devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; + + switch (devctl2) { + case IXGBE_PCIDEVCTRL2_65_130ms: + pollcnt = 1300; /* 130 millisec */ + break; + case IXGBE_PCIDEVCTRL2_260_520ms: + pollcnt = 5200; /* 520 millisec */ + break; + case IXGBE_PCIDEVCTRL2_1_2s: + pollcnt = 20000; /* 2 sec */ + break; + case IXGBE_PCIDEVCTRL2_4_8s: + pollcnt = 80000; /* 8 sec */ + break; + case IXGBE_PCIDEVCTRL2_17_34s: + pollcnt = 34000; /* 34 sec */ + break; + case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ + case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ + case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ + default: + pollcnt = 800; /* 80 millisec minimum */ + break; + } + + /* add 10% to spec maximum */ + return (pollcnt * 11) / 10; +} + +/** + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ + u32 i, poll; + u16 value; + + /* Always set this bit to ensure any future transactions are blocked */ + IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + + /* Poll for bit to read as set */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) + break; + usleep_range(100, 120); + } + if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { + hw_dbg(hw, "GIO disable did not set - requesting resets\n"); + goto gio_disable_fail; + } + + /* Exit if master requests are blocked */ + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || + ixgbe_removed(hw->hw_addr)) + return 0; + + /* Poll for master request bit to clear */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + udelay(100); + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + return 0; + } + + /* + * Two consecutive resets are required via CTRL.RST per datasheet + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine + * of this need. The first reset prevents new master requests from + * being issued by our device. We then must wait 1usec or more for any + * remaining completions from the PCIe bus to trickle in, and then reset + * again to clear out any effects they may have had on our device. + */ + hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); +gio_disable_fail: + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + + if (hw->mac.type >= ixgbe_mac_X550) + return 0; + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + udelay(100); + value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); + if (ixgbe_removed(hw->hw_addr)) + return 0; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + return 0; + } + + hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); + return IXGBE_ERR_MASTER_REQUESTS_PENDING; +} + +/** + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 5; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_eeprom_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + ixgbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + ixgbe_release_eeprom_semaphore(hw); + usleep_range(5000, 10000); + } + } + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + usleep_range(5000, 10000); + return IXGBE_ERR_SWFW_SYNC; +} + +/** + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) +{ + u32 gssr; + u32 swmask = mask; + + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + gssr &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); +} + +/** + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @reg_val: Value we read from AUTOC + * @locked: bool to indicate whether the SW/FW lock should be taken. Never + * true in this the generic case. + * + * The default case requires no protection so just to the register read. + **/ +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + *locked = false; + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return 0; +} + +/** + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous read. + **/ +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +{ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); + return 0; +} + +/** + * ixgbe_disable_rx_buff_generic - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally + * empty the Rx security block. + **/ +s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECRX_POLL 40 + int i; + int secrxreg; + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + udelay(1000); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECRX_POLL) + hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); + + return 0; + +} + +/** + * ixgbe_enable_rx_buff - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path + **/ +s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) +{ + u32 secrxreg; + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) +{ + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); + + return 0; +} + +/** + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = false; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + bool locked = false; + s32 ret_val; + + if (index > 3) + return IXGBE_ERR_PARAM; + + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (!link_up) { + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val) + return ret_val; + + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + autoc_reg |= IXGBE_AUTOC_FLU; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val) + return ret_val; + + IXGBE_WRITE_FLUSH(hw); + + usleep_range(10000, 20000); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = 0; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + bool locked = false; + s32 ret_val; + + if (index > 3) + return IXGBE_ERR_PARAM; + + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val) + return ret_val; + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val) + return ret_val; + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) +{ + s32 ret_val; + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); + if (ret_val) + hw_err(hw, "eeprom read at offset %d failed\n", + IXGBE_SAN_MAC_ADDR_PTR); + + return ret_val; +} + +/** + * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + s32 ret_val; + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + + goto san_mac_addr_clr; + + /* make sure we know which port we need to program */ + hw->mac.ops.set_lan_id(hw); + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + ret_val = hw->eeprom.ops.read(hw, san_mac_offset, + &san_mac_data); + if (ret_val) { + hw_err(hw, "eeprom read at offset %d failed\n", + san_mac_offset); + goto san_mac_addr_clr; + } + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + return 0; + +san_mac_addr_clr: + /* No addresses available in this EEPROM. It's not necessarily an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + return ret_val; +} + +/** + * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) +{ + u16 msix_count; + u16 max_msix_count; + u16 pcie_offset; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; + default: + return 1; + } + + msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); + if (ixgbe_removed(hw->hw_addr)) + msix_count = 0; + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + + if (ixgbe_removed(hw->hw_addr)) + return 0; + + if (!mpsar_lo && !mpsar_hi) + return 0; + + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { + if (mpsar_lo) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + mpsar_lo = 0; + } + if (mpsar_hi) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + mpsar_hi = 0; + } + } else if (vmdq < 32) { + mpsar_lo &= ~BIT(vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); + } else { + mpsar_hi &= ~BIT(vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); + } + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && + rar != 0 && rar != hw->mac.san_mac_rar_index) + hw->mac.ops.clear_rar(hw, rar); + + return 0; +} + +/** + * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + if (vmdq < 32) { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar |= BIT(vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); + } else { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + mpsar |= BIT(vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); + } + return 0; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + if (vmdq < 32) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + } else { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); + } + + return 0; +} + +/** + * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) +{ + int i; + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + + return 0; +} + +/** + * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; + + /* add VLAN enable bit for comparison */ + vlan |= IXGBE_VLVF_VIEN; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + /* If we are here then we didn't find the VLAN. Return first empty + * slot we found during our search, else error. + */ + if (!first_empty_slot) + hw_dbg(hw, "No space in VLVF.\n"); + + return first_empty_slot ? : IXGBE_ERR_NO_SPACE; +} + +/** + * ixgbe_set_vfta_generic - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vlvf_bypass: boolean flag indicating updating default pool is okay + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; + + if ((vlan > 4095) || (vind > 63)) + return IXGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = BIT(vlan % 32); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) + goto vfta_update; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } + + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); + + /* set the pool bit */ + bits |= BIT(vind % 32); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= BIT(vind % 32); + + if (!bits && + !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + + /* disable VLVF and clear remaining bit from pool */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); + +vfta_update: + /* Update VFTA now that we are ready for traffic */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + + return 0; +} + +/** + * ixgbe_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); + } + + return 0; +} + +/** + * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix + * @hw: pointer to hardware structure + * + * Contains the logic to identify if we need to verify link for the + * crosstalk fix + **/ +static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) +{ + /* Does FW say we need the fix */ + if (!hw->need_crosstalk_fix) + return false; + + /* Only consider SFP+ PHYs i.e. media type fiber */ + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + break; + default: + return false; + } + + return true; +} + +/** + * ixgbe_check_mac_link_generic - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg, links_orig; + u32 i; + + /* If Crosstalk fix enabled do the sanity check of making sure + * the SFP+ cage is full. + */ + if (ixgbe_need_crosstalk_fix(hw)) { + u32 sfp_cage_full; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP0; + break; + default: + /* sanity check - No SFP+ devices here */ + sfp_cage_full = false; + break; + } + + if (!sfp_cage_full) { + *link_up = false; + *speed = IXGBE_LINK_SPEED_UNKNOWN; + return 0; + } + } + + /* clear the old state */ + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_orig != links_reg) { + hw_dbg(hw, "LINKS changed from %08X to %08X\n", + links_orig, links_reg); + } + + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { + *speed = IXGBE_LINK_SPEED_10_FULL; + } + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; + if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) + goto wwn_prefix_err; + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + return 0; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + if (hw->eeprom.ops.read(hw, offset, &caps)) + goto wwn_prefix_err; + if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + return 0; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) + hw_err(hw, "eeprom read at offset %d failed\n", offset); + + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) + goto wwn_prefix_err; + + return 0; + +wwn_prefix_err: + hw_err(hw, "eeprom read at offset %d failed\n", offset); + return 0; +} + +/** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for MAC anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing + * + **/ +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= BIT(vf_target_shift); + else + pfvfspoof &= ~BIT(vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= BIT(vf_target_shift); + else + pfvfspoof &= ~BIT(vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_get_device_caps_generic - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) +{ + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * ixgbe_set_rxpba_generic - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, + int num_pb, + u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number + * of packet buffers requested using supplied strategy. + */ + switch (strategy) { + case (PBA_STRATEGY_WEIGHTED): + /* pba_80_48 strategy weight first half of packet buffer with + * 5/8 of the packet buffer space. + */ + rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* fall through - configure remaining packet buffers */ + case (PBA_STRATEGY_EQUAL): + /* Divide the remaining Rx packet buffer evenly among the TCs */ + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; + for (; i < num_pb; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + default: + break; + } + + /* + * Setup Tx packet buffer and threshold equally for all TCs + * TXPBTHRESH register is set in K so divide by 1024 and subtract + * 10 since the largest packet we support is just over 9K. + */ + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < IXGBE_MAX_PB; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } +} + +/** + * ixgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * ixgbe_hic_unlocked - Issue command to manageability block unlocked + * @hw: pointer to the HW structure + * @buffer: command to write and where the return status will be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * + * Communicates with the manageability block. On success return 0 + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * + * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held + * by the caller. + **/ +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, + u32 timeout) +{ + u32 hicr, i, fwsts; + u16 dword_len; + + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); + + /* Check that the host interface is enabled. */ + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_EN)) { + hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if (length % sizeof(u32)) { + hw_dbg(hw, "Buffer length failure, not aligned to dword"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, + i, cpu_to_le32(buffer[i])); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); + + for (i = 0; i < timeout; i++) { + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_C)) + break; + usleep_range(1000, 2000); + } + + /* Check command successful completion. */ + if ((timeout && i == timeout) || + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + + return 0; +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + u32 length, u32 timeout, + bool return_data) +{ + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + union { + struct ixgbe_hic_hdr hdr; + u32 u32arr[1]; + } *bp = buffer; + u16 buf_len, dword_len; + s32 status; + u32 bi; + + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, buffer, length, timeout); + if (status) + goto rel_out; + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&bp->u32arr[bi]); + } + + /* If there is any thing in data position pull it in */ + buf_len = bp->hdr.buf_len; + if (!buf_len) + goto rel_out; + + if (length < round_up(buf_len, 4) + hdr_size) { + hw_dbg(hw, "Buffer not large enough for reply message.\n"); + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&bp->u32arr[bi]); + } + +rel_out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + + return status; +} + +/** + * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, __always_unused u16 len, + __always_unused const char *driver_ver) +{ + struct ixgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, &fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo + * @hw: pointer to the hardware structure + * + * The 82599 and x540 MACs can experience issues if TX work is still pending + * when a reset occurs. This function prevents this by flushing the PCIe + * buffers on the system. + **/ +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) +{ + u32 gcr_ext, hlreg0, i, poll; + u16 value; + + /* + * If double reset is not requested then all transactions should + * already be clear and as such there is no work to do + */ + if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) + return; + + /* + * Set loopback enable to prevent any transmits from being sent + * should the link come up. This assumes that the RXCTRL.RXEN bit + * has already been cleared. + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + + /* wait for a last completion before clearing buffers */ + IXGBE_WRITE_FLUSH(hw); + usleep_range(3000, 6000); + + /* Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usleep_range(100, 200); + value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); + if (ixgbe_removed(hw->hw_addr)) + break; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + break; + } + + /* initiate cleaning flow for buffers in the PCIe transaction layer */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, + gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); + + /* Flush all writes and allow 20usec for all transactions to clear */ + IXGBE_WRITE_FLUSH(hw); + udelay(20); + + /* restore previous register values */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); +} + +static const u8 ixgbe_emc_temp_data[4] = { + IXGBE_EMC_INTERNAL_DATA, + IXGBE_EMC_DIODE1_DATA, + IXGBE_EMC_DIODE2_DATA, + IXGBE_EMC_DIODE3_DATA +}; +static const u8 ixgbe_emc_therm_limit[4] = { + IXGBE_EMC_INTERNAL_THERM_LIMIT, + IXGBE_EMC_DIODE1_THERM_LIMIT, + IXGBE_EMC_DIODE2_THERM_LIMIT, + IXGBE_EMC_DIODE3_THERM_LIMIT +}; + +/** + * ixgbe_get_ets_data - Extracts the ETS bit data + * @hw: pointer to hardware structure + * @ets_cfg: extected ETS data + * @ets_offset: offset of ETS data + * + * Returns error code. + **/ +static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, + u16 *ets_offset) +{ + s32 status; + + status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); + if (status) + return status; + + if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) + return IXGBE_NOT_IMPLEMENTED; + + status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); + if (status) + return status; + + if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) + return IXGBE_NOT_IMPLEMENTED; + + return 0; +} + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Returns the thermal sensor data structure + **/ +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + /* Only support thermal sensors attached to physical port 0 */ + if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) + return IXGBE_NOT_IMPLEMENTED; + + status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); + if (status) + return status; + + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { + u8 sensor_index; + u8 sensor_location; + + status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), + &ets_sensor); + if (status) + return status; + + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) { + status = hw->phy.ops.read_i2c_byte(hw, + ixgbe_emc_temp_data[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + if (status) + return status; + } + } + + return 0; +} + +/** + * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 therm_limit; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to physical port 0 */ + if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) + return IXGBE_NOT_IMPLEMENTED; + + status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); + if (status) + return status; + + low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> + IXGBE_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { + u8 sensor_index; + u8 sensor_location; + + if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { + hw_err(hw, "eeprom read at offset %d failed\n", + ets_offset + 1 + i); + continue; + } + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + ixgbe_emc_therm_limit[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); + + if (sensor_location == 0) + continue; + + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; + } + + return 0; +} + +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + } + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } +} + +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); + + if (hw->mac.type != ixgbe_mac_82598EB) { + if (hw->mac.set_lben) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = false; + } + } +} + +/** ixgbe_mng_present - returns true when management capability is present + * @hw: pointer to hardware structure + **/ +bool ixgbe_mng_present(struct ixgbe_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type < ixgbe_mac_82599EB) + return false; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + fwsm &= IXGBE_FWSM_MODE_MASK; + return fwsm == IXGBE_FWSM_FW_MODE_PT; +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + */ +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 status = 0; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = false; + + /* Mask off requested but non-supported speeds */ + status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); + if (status) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + hw->mac.ops.set_rate_select_speed(hw, + IXGBE_LINK_SPEED_10GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type\n"); + break; + } + + /* Allow module to change analog characteristics (1G->10G) */ + msleep(40); + + status = hw->mac.ops.setup_mac_link(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status) + return status; + + /* Flap the Tx laser if it has not already been done */ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + hw->mac.ops.set_rate_select_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type\n"); + break; + } + + /* Allow module to change analog characteristics (10G->1G) */ + msleep(40); + + status = hw->mac.ops.setup_mac_link(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status) + return status; + + /* Flap the Tx laser if it has not already been done */ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_set_soft_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the soft rate select. + */ +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + hw_dbg(hw, "Invalid fixed module speed\n"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); + return; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); + return; + } + + /* Set RS1 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); + return; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); + return; + } +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h new file mode 100644 index 000000000000..e083732adf64 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h @@ -0,0 +1,224 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" +#include "ixgbe.h" + +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); +enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *); +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg(struct ixgbe_hw *hw); + +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on, bool vlvf_bypass); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); + +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver, u16 len, const char *str); +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, + u32 timeout, bool return_data); +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]); +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); +bool ixgbe_mng_present(struct ixgbe_hw *hw); +bool ixgbe_mng_enabled(struct ixgbe_hw *hw); + +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy); + +extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define IXGBE_EMC_DIODE3_DATA 0x2A +#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); + +#define IXGBE_FAILED_READ_REG 0xffffffffU +#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define IXGBE_FAILED_READ_CFG_WORD 0xffffU + +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); + +static inline bool ixgbe_removed(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + + if (ixgbe_removed(reg_addr)) + return; + writel(value, reg_addr + reg); +} +#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value)) + +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel((u32)val, addr); + writel((u32)(val >> 32), addr + 4); +} +#endif + +static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + + if (ixgbe_removed(reg_addr)) + return; + writeq(value, reg_addr + reg); +} +#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) + +u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg); +#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ + ixgbe_write_reg((a), (reg) + ((offset) << 2), (value)) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) \ + ixgbe_read_reg((a), (reg) + ((offset) << 2)) + +#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS) + +#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev) + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(&adapter->pdev->dev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) +#endif /* IXGBE_COMMON */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c new file mode 100644 index 000000000000..072ef3b5fc61 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c @@ -0,0 +1,410 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_ieee_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programmable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + */ +static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, + __u16 *max, int max_frame) +{ + int min_percent = 100; + int min_credit, multiplier; + int i; + + min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } + + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; + refill[i] = val; + + max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; + } + return 0; +} + +/** + * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits + * @ixgbe_dcb_config: Struct containing DCB settings. + * @direction: Configuring either Tx or Rx. + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by + * ixgbe_dcb_check_config(). + */ +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config, + int max_frame, u8 direction) +{ + struct tc_bw_alloc *p; + int min_credit; + int min_multiplier; + int min_percent = 100; + /* Initialization values default for Tx settings */ + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; + u8 i; + + if (!dcb_config) + return DCB_ERR_CONFIG; + + min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / 100; + + if (link_percentage && link_percentage < min_percent) + min_percent = link_percentage; + } + + /* + * The ratio between traffic classes will control the bandwidth + * percentages seen on the wire. To calculate this ratio we use + * a multiplier. It is required that the refill credits must be + * larger than the max frame size so here we find the smallest + * multiplier that will allow all bandwidth percentages to be + * greater than the max frame size. + */ + min_multiplier = (min_credit / min_percent) + 1; + + /* Find out the link percentage for each TC first */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + + link_percentage = p->bwg_percent; + /* Must be careful of integer division for very small nums */ + link_percentage = (link_percentage * bw_percent) / 100; + if (p->bwg_percent > 0 && link_percentage == 0) + link_percentage = 1; + + /* Save link_percentage for reference */ + p->link_percent = (u8)link_percentage; + + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, + MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + + p->data_credits_refill = (u16)credit_refill; + + /* Calculate maximum credit for the TC */ + credit_max = (link_percentage * MAX_CREDIT) / 100; + + /* + * Adjustment based on rule checking, if the percentage + * of a TC is too small, the maximum credit may not be + * enough to send out a jumbo frame in data plane arbitration. + */ + if (credit_max < min_credit) + credit_max = min_credit; + + if (direction == DCB_TX_CONFIG) { + /* + * Adjustment based on rule checking, if the + * percentage of a TC is too small, the maximum + * credit may not be enough to send out a TSO + * packet in descriptor plane arbitration. + */ + if ((hw->mac.type == ixgbe_mac_82598EB) && + credit_max && + (credit_max < MINIMUM_CREDIT_FOR_TSO)) + credit_max = MINIMUM_CREDIT_FOR_TSO; + + dcb_config->tc_config[i].desc_credits_max = + (u16)credit_max; + } + + p->data_credits_max = (u16)credit_max; + } + + return 0; +} + +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { + if (tc_config[tc].dcb_pfc != pfc_disabled) + *pfc_en |= BIT(tc); + } +} + +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, + u16 *refill) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; +} + +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; +} + +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, + u8 *bwgid) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; +} + +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, + u8 *ptype) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + ptype[tc] = tc_config[tc].path[direction].prio_type; +} + +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + u8 prio_mask = BIT(up); + u8 tc = cfg->num_tcs.pg_tcs; + + /* If tc is 0 then DCB is likely not enabled or supported */ + if (!tc) + return 0; + + /* + * Test from maximum TC to 1 and report the first match we find. If + * we find no match we can assume that the TC is 0 since the TC must + * be set for all user priorities + */ + for (tc--; tc; tc--) { + if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) + break; + } + + return tc; +} + +void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) +{ + u8 up; + + for (up = 0; up < MAX_USER_PRIORITY; up++) + map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); +} + +/** + * ixgbe_dcb_hw_config - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + u8 pfc_en; + u8 ptype[MAX_TRAFFIC_CLASS]; + u8 bwgid[MAX_TRAFFIC_CLASS]; + u8 prio_tc[MAX_TRAFFIC_CLASS]; + u16 refill[MAX_TRAFFIC_CLASS]; + u16 max[MAX_TRAFFIC_CLASS]; + + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); + ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(dcb_config, max); + ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); + ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, + bwgid, ptype); + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, + bwgid, ptype, prio_tc); + default: + break; + } + return 0; +} + +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + return ixgbe_dcb_config_pfc_82598(hw, pfc_en); + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); + default: + break; + } + return -EINVAL; +} + +s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) +{ + __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + int i; + + /* naively give each TC a bwg to map onto CEE hardware */ + __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } + } + + ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); + return ixgbe_dcb_hw_ets_config(hw, refill, max, + bwg_id, prio_type, ets->prio_tc); +} + +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *prio_type, u8 *prio_tc) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, + prio_type); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, + bwg_id, prio_type, prio_tc); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); + break; + default: + break; + } + return 0; +} + +static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + for (i = 0; i < MAX_USER_PRIORITY; i++) + map[i] = IXGBE_RTRUP2TC_UP_MASK & + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); +} + +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + ixgbe_dcb_read_rtrup2tc_82599(hw, map); + break; + default: + break; + } +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h new file mode 100644 index 000000000000..fc0a2dd52499 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h @@ -0,0 +1,171 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_CONFIG_H_ +#define _DCB_CONFIG_H_ + +#include +#include "ixgbe_type.h" + +/* DCB data structures */ + +#define IXGBE_MAX_PACKET_BUFFERS 8 +#define MAX_USER_PRIORITY 8 +#define MAX_BW_GROUP 8 +#define BW_PERCENT 100 + +#define DCB_TX_CONFIG 0 +#define DCB_RX_CONFIG 1 + +/* DCB error Codes */ +#define DCB_SUCCESS 0 +#define DCB_ERR_CONFIG -1 +#define DCB_ERR_PARAM -2 + +/* Transmit and receive Errors */ +/* Error in bandwidth group allocation */ +#define DCB_ERR_BW_GROUP -3 +/* Error in traffic class bandwidth allocation */ +#define DCB_ERR_TC_BW -4 +/* Traffic class has both link strict and group strict enabled */ +#define DCB_ERR_LS_GS -5 +/* Link strict traffic class has non zero bandwidth */ +#define DCB_ERR_LS_BW_NONZERO -6 +/* Link strict bandwidth group has non zero bandwidth */ +#define DCB_ERR_LS_BWG_NONZERO -7 +/* Traffic class has zero bandwidth */ +#define DCB_ERR_TC_BW_ZERO -8 + +#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF + +struct dcb_pfc_tc_debug { + u8 tc; + u8 pause_status; + u64 pause_quanta; +}; + +enum strict_prio_type { + prio_none = 0, + prio_group, + prio_link +}; + +/* DCB capability definitions */ +#define IXGBE_DCB_PG_SUPPORT 0x00000001 +#define IXGBE_DCB_PFC_SUPPORT 0x00000002 +#define IXGBE_DCB_BCN_SUPPORT 0x00000004 +#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define IXGBE_DCB_GSP_SUPPORT 0x00000010 + +#define IXGBE_DCB_8_TC_SUPPORT 0x80 + +struct dcb_support { + /* DCB capabilities */ + u32 capabilities; + + /* Each bit represents a number of TCs configurable in the hw. + * If 8 traffic classes can be configured, the value is 0x80. + */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +/* Traffic class bandwidth allocation per direction */ +struct tc_bw_alloc { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer + * in 64B granularity.*/ + enum strict_prio_type prio_type; /* Link or Group Strict Priority */ +}; + +enum dcb_pfc_type { + pfc_disabled = 0, + pfc_enabled_full, + pfc_enabled_tx, + pfc_enabled_rx +}; + +/* Traffic class configuration */ +struct tc_configuration { + struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ + enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ + + u16 desc_credits_max; /* For Tx Descriptor arbitration */ + u8 tc; /* Traffic class (TC) */ +}; + +struct dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct ixgbe_dcb_config { + struct dcb_support support; + struct dcb_num_tcs num_tcs; + struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; + u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ +}; + +/* DCB driver APIs */ +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); + +/* DCB credits calculation */ +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, + struct ixgbe_dcb_config *, int, u8); + +/* DCB hw initialization */ +s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max); +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *prio_type, u8 *tc_prio); +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); + +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); + +/* DCB definitions for credit calculation */ +#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ +#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ +#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ +#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ +#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ + +#endif /* _DCB_CONFIG_H */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c new file mode 100644 index 000000000000..b79e93a5b699 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c @@ -0,0 +1,288 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" + +/** + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *prio_type) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; + IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + /* Enable Arbiter */ + reg &= ~IXGBE_RMCS_ARBDIS; + /* Enable Receive Recycle within the BWG */ + reg |= IXGBE_RMCS_RRM; + /* Enable Deficit Fixed Priority arbitration*/ + reg |= IXGBE_RMCS_DFP; + + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + + reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); + + if (prio_type[i] == prio_link) + reg |= IXGBE_RT2CR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); + } + + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= IXGBE_RDRXCTL_RDMTS_1_2; + reg |= IXGBE_RDRXCTL_MPBEN; + reg |= IXGBE_RDRXCTL_MCEN; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + /* Make sure there is enough descriptors before arbitration */ + reg &= ~IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg, max_credits; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); + + /* Enable arbiter */ + reg &= ~IXGBE_DPMCS_ARBDIS; + reg |= IXGBE_DPMCS_TSOEF; + + /* Configure Max TSO packet size 34KB including payload and headers */ + reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_TDTQ2TCCR_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_TDTQ2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); + /* Enable Data Plane Arbiter */ + reg &= ~IXGBE_PDPMCS_ARBDIS; + /* Enable DFP and Transmit Recycle Mode */ + reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); + + IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_TDPT2TCCR_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_TDPT2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); + } + + /* Enable Tx packet buffer division */ + reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + reg |= IXGBE_DTXCTL_ENDBUBD; + IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_pfc_82598 - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) +{ + u32 fcrtl, reg; + u8 i; + + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + reg &= ~IXGBE_RMCS_TFCE_802_3X; + reg |= IXGBE_RMCS_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE); + + if (pfc_en) + reg |= IXGBE_FCTRL_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if (!(pfc_en & BIT(i))) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + continue; + } + + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); + } + + /* Configure pause time */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + + return 0; +} + +/** + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +{ + u32 reg = 0; + u8 i = 0; + u8 j = 0; + + /* Receive Queues stats setting - 8 queues per statistics reg */ + for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); + } + /* Transmit Queues stats setting - 4 queues per statistics reg */ + for (i = 0; i < 8; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); + reg |= ((0x1010101) * i); + IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_hw_config_82598 - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type) +{ + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_pfc_82598(hw, pfc_en); + ixgbe_dcb_config_tc_stats_82598(hw); + + return 0; +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h new file mode 100644 index 000000000000..3164f5453b8f --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h @@ -0,0 +1,97 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_82598_CONFIG_H_ +#define _DCB_82598_CONFIG_H_ + +/* DCB register definitions */ + +#define IXGBE_DPMCS_MTSOS_SHIFT 16 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ + +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ + +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */ + +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000 + +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 +#define IXGBE_TDPT2TCCR_GSP 0x40000000 +#define IXGBE_TDPT2TCCR_LSP 0x80000000 + +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ + +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ + +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ + +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 + +/* DCB hardware-specific driver APIs */ + +/* DCB PFC functions */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); + +/* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type); + +#endif /* _DCB_82598_CONFIG_H */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c new file mode 100644 index 000000000000..1011d644978f --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c @@ -0,0 +1,369 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + /* Map all traffic classes to their UP */ + reg = 0; + for (i = 0; i < MAX_USER_PRIORITY; i++) + reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTRPT4C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); + } + + /* + * Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg, max_credits; + u8 i; + + /* Clear the per-Tx queue credits; we use per-TC instead */ + for (i = 0; i < 128; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); + } + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_RTTDT2C_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTTDT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); + } + + /* + * Configure Tx descriptor plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc) +{ + u32 reg; + u8 i; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; SP; arb delay) + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | + IXGBE_RTTPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + /* Map all traffic classes to their UP */ + reg = 0; + for (i = 0; i < MAX_USER_PRIORITY; i++) + reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_RTTPT2C_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTTPT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); + } + + /* + * Configure Tx packet plane (recycle mode; SP; arb delay) and + * enable arbiter + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_pfc_82599 - Configure priority flow control + * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask + * @prio_tc: priority to tc assignments indexed by priority + * + * Configure Priority Flow Control (PFC) for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +{ + u32 i, j, fcrtl, reg; + u8 max_tc = 0; + + /* Enable Transmit Priority Flow Control */ + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + reg |= IXGBE_MFLCN_DPF; + + /* + * X540 & X550 supports per TC Rx priority flow control. + * So clear all TCs and only enable those that should be + * enabled. + */ + reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + if (hw->mac.type >= ixgbe_mac_X540) + reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; + + if (pfc_en) + reg |= IXGBE_MFLCN_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + + for (i = 0; i < MAX_USER_PRIORITY; i++) { + if (prio_tc[i] > max_tc) + max_tc = prio_tc[i]; + } + + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i <= max_tc; i++) { + int enabled = 0; + + for (j = 0; j < MAX_USER_PRIORITY; j++) { + if ((prio_tc[j] == i) && (pfc_en & BIT(j))) { + enabled = 1; + break; + } + } + + if (enabled) { + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + } else { + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); + } + + for (; i < MAX_TRAFFIC_CLASS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return 0; +} + +/** + * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) +{ + u32 reg = 0; + u8 i = 0; + + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + reg = 0x01010101 * (i / 4); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 32, 32, 16, 16, 8, 8, 8, 8. + */ + for (i = 0; i < 32; i++) { + if (i < 8) + reg = 0x00000000; + else if (i < 16) + reg = 0x01010101; + else if (i < 20) + reg = 0x02020202; + else if (i < 24) + reg = 0x03030303; + else if (i < 26) + reg = 0x04040404; + else if (i < 28) + reg = 0x05050505; + else if (i < 30) + reg = 0x06060606; + else + reg = 0x07070707; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_hw_config_82599 - Configure and enable DCB + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * @pfc_en: enabled pfc bitmask + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) +{ + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, + bwg_id, prio_type, prio_tc); + ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); + ixgbe_dcb_config_tc_stats_82599(hw); + + return 0; +} + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h new file mode 100644 index 000000000000..90c370230e20 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h @@ -0,0 +1,125 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_82599_CONFIG_H_ +#define _DCB_82599_CONFIG_H_ + +/* DCB register definitions */ +#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 WSP - Weighted Strict Priority + */ +#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, + * 1 WRR - Weighted Round Robin + */ +#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ +#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must + * clear! + */ +#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ + +/* Receive UP2TC mapping */ +#define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_MASK 7 +/* Transmit UP2TC mapping */ +#define IXGBE_RTTUP2TC_UP_SHIFT 3 + +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ +#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ +#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable + */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable + */ + +/* RTRPCS Bit Masks */ +#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RTRPCS_RAC 0x00000004 +#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* RTTDT2C Bit Masks */ +#define IXGBE_RTTDT2C_MCL_SHIFT 12 +#define IXGBE_RTTDT2C_BWG_SHIFT 9 +#define IXGBE_RTTDT2C_GSP 0x40000000 +#define IXGBE_RTTDT2C_LSP 0x80000000 + +#define IXGBE_RTTPT2C_MCL_SHIFT 12 +#define IXGBE_RTTPT2C_BWG_SHIFT 9 +#define IXGBE_RTTPT2C_GSP 0x40000000 +#define IXGBE_RTTPT2C_LSP 0x80000000 + +/* RTTPCS Bit Masks */ +#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 SP - Strict Priority + */ +#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_RTTPCS_ARBD_SHIFT 22 +#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ + +/* SECTXMINIFG DCB */ +#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ + + +/* DCB hardware-specific driver APIs */ + +/* DCB PFC functions */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc); + +/* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc); + +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, + u8 *prio_tc); + +#endif /* _DCB_82599_CONFIG_H */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c new file mode 100644 index 000000000000..b8fc3cfec831 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c @@ -0,0 +1,809 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" + +/* Callbacks for DCB netlink in the kernel */ +#define BIT_DCB_MODE 0x01 +#define BIT_PFC 0x02 +#define BIT_PG_RX 0x04 +#define BIT_PG_TX 0x08 +#define BIT_APP_UPCHG 0x10 +#define BIT_LINKSPEED 0x80 + +/* Responses for the DCB_C_SET_ALL command */ +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ + +static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) +{ + struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; + struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; + struct tc_configuration *src = NULL; + struct tc_configuration *dst = NULL; + int i, j; + int tx = DCB_TX_CONFIG; + int rx = DCB_RX_CONFIG; + int changes = 0; +#ifdef IXGBE_FCOE + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = ETH_P_FCOE, + }; + u8 up = dcb_getapp(adapter->netdev, &app); + + if (up && !(up & BIT(adapter->fcoe.up))) + changes |= BIT_APP_UPCHG; +#endif + + for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { + src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; + dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; + + if (dst->path[tx].prio_type != src->path[tx].prio_type) { + dst->path[tx].prio_type = src->path[tx].prio_type; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { + dst->path[tx].bwg_id = src->path[tx].bwg_id; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { + dst->path[tx].bwg_percent = src->path[tx].bwg_percent; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].up_to_tc_bitmap != + src->path[tx].up_to_tc_bitmap) { + dst->path[tx].up_to_tc_bitmap = + src->path[tx].up_to_tc_bitmap; + changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); + } + + if (dst->path[rx].prio_type != src->path[rx].prio_type) { + dst->path[rx].prio_type = src->path[rx].prio_type; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].bwg_id != src->path[rx].bwg_id) { + dst->path[rx].bwg_id = src->path[rx].bwg_id; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) { + dst->path[rx].bwg_percent = src->path[rx].bwg_percent; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].up_to_tc_bitmap != + src->path[rx].up_to_tc_bitmap) { + dst->path[rx].up_to_tc_bitmap = + src->path[rx].up_to_tc_bitmap; + changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); + } + } + + for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { + j = i - DCB_PG_ATTR_BW_ID_0; + if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { + dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; + changes |= BIT_PG_TX; + } + if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) { + dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j]; + changes |= BIT_PG_RX; + } + } + + for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { + j = i - DCB_PFC_UP_ATTR_0; + if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) { + dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc; + changes |= BIT_PFC; + } + } + + if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) { + dcfg->pfc_mode_enable = scfg->pfc_mode_enable; + changes |= BIT_PFC; + } + + return changes; +} + +static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); +} + +static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + /* verify there is something to do, if not then exit */ + if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return 0; + + return !!ixgbe_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); +} + +static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < netdev->addr_len; i++) + perm_addr[i] = adapter->hw.mac.perm_addr[i]; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + for (j = 0; j < netdev->addr_len; j++, i++) + perm_addr[i] = adapter->hw.mac.san_addr[j]; + break; + default: + break; + } +} + +static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = + up_map; +} + +static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; +} + +static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = + up_map; +} + +static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; +} + +static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; +} + +static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; +} + +static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; +} + +static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; +} + +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, + u8 setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; + if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != + adapter->dcb_cfg.tc_config[priority].dcb_pfc) + adapter->temp_dcb_cfg.pfc_mode_enable = true; +} + +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, + u8 *setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; +} + +static void ixgbe_dcbnl_devreset(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(dev)) + dev->netdev_ops->ndo_stop(dev); + + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + dev->netdev_ops->ndo_open(dev); + + clear_bit(__IXGBE_RESETTING, &adapter->state); +} + +static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ixgbe_hw *hw = &adapter->hw; + int ret = DCB_NO_HW_CHG; + int i; + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return DCB_NO_HW_CHG; + + adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, + MAX_TRAFFIC_CLASS); + if (!adapter->dcb_set_bitmap) + return DCB_NO_HW_CHG; + + if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { + u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; + u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; + /* Priority to TC mapping in CEE case default to 1:1 */ + u8 prio_tc[MAX_USER_PRIORITY]; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#ifdef IXGBE_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, + DCB_RX_CONFIG); + + ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(dcb_cfg, max); + ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id); + ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type); + ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); + + ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, + prio_type, prio_tc); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + netdev_set_prio_tc_map(netdev, i, prio_tc[i]); + + ret = DCB_HW_CHG_RST; + } + + if (adapter->dcb_set_bitmap & BIT_PFC) { + if (dcb_cfg->pfc_mode_enable) { + u8 pfc_en; + u8 prio_tc[MAX_USER_PRIORITY]; + + ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); + ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en); + ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc); + } else { + hw->mac.ops.fc_enable(hw); + } + + ixgbe_set_rx_drop_en(adapter); + + ret = DCB_HW_CHG; + } + +#ifdef IXGBE_FCOE + /* Reprogam FCoE hardware offloads when the traffic class + * FCoE is using changes. This happens if the APP info + * changes or the up2tc mapping is updated. + */ + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = ETH_P_FCOE, + }; + u8 up = dcb_getapp(netdev, &app); + + adapter->fcoe.up = ffs(up) - 1; + ixgbe_dcbnl_devreset(netdev); + ret = DCB_HW_CHG_RST; + } +#endif + + adapter->dcb_set_bitmap = 0x00; + return ret; +} + +static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcbx_cap; + break; + default: + *cap = false; + break; + } + + return 0; +} + +static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = adapter->dcb_cfg.num_tcs.pg_tcs; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = adapter->dcb_cfg.num_tcs.pfc_tcs; + break; + default: + return -EINVAL; + } + } else { + return -EINVAL; + } + + return 0; +} + +static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + return -EINVAL; +} + +static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return adapter->dcb_cfg.pfc_mode_enable; +} + +static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.pfc_mode_enable = state; +} + +/** + * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * + * Returns : on success, returns a non-zero 802.1p user priority bitmap + * otherwise returns -EINVAL as the invalid user priority bitmap to indicate an + * error. + */ +static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return -EINVAL; + + return dcb_getapp(netdev, &app); +} + +static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; + + ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; + + /* No IEEE PFC settings available */ + if (!my_ets) + return 0; + + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + return 0; +} + +static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i, err; + __u8 max_tc = 0; + __u8 map_chg = 0; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->ixgbe_ieee_ets) { + adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), + GFP_KERNEL); + if (!adapter->ixgbe_ieee_ets) + return -ENOMEM; + + /* initialize UP2TC mappings to invalid value */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + adapter->ixgbe_ieee_ets->prio_tc[i] = + IEEE_8021QAZ_MAX_TCS; + /* if possible update UP2TC mappings from HW */ + ixgbe_dcb_read_rtrup2tc(&adapter->hw, + adapter->ixgbe_ieee_ets->prio_tc); + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i]) + map_chg = 1; + } + + memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + + if (max_tc) + max_tc++; + + if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (max_tc != netdev_get_num_tc(dev)) { + err = ixgbe_setup_tc(dev, max_tc); + if (err) + return err; + } else if (map_chg) { + ixgbe_dcbnl_devreset(dev); + } + + return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); +} + +static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; + int i; + + pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; + + /* No IEEE PFC settings available */ + if (!my_pfc) + return 0; + + pfc->pfc_en = my_pfc->pfc_en; + pfc->mbc = my_pfc->mbc; + pfc->delay = my_pfc->delay; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = adapter->stats.pxoffrxc[i]; + pfc->indications[i] = adapter->stats.pxofftxc[i]; + } + + return 0; +} + +static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u8 *prio_tc; + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->ixgbe_ieee_pfc) { + adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), + GFP_KERNEL); + if (!adapter->ixgbe_ieee_pfc) + return -ENOMEM; + } + + prio_tc = adapter->ixgbe_ieee_ets->prio_tc; + memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); + + /* Enable link flow control parameters if PFC is disabled */ + if (pfc->pfc_en) + err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc); + else + err = hw->mac.ops.fc_enable(hw); + + ixgbe_set_rx_drop_en(adapter); + + return err; +} + +static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + +#ifdef IXGBE_FCOE + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & BIT(adapter->fcoe.up)) + return 0; + + adapter->fcoe.up = app->priority; + ixgbe_dcbnl_devreset(dev); + } +#endif + + /* VF devices should use default UP when available */ + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0) { + int vf; + + adapter->default_up = app->priority; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->pf_qos) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + app->priority, vf); + } + } + + return 0; +} + +static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + err = dcb_ieee_delapp(dev, app); + +#ifdef IXGBE_FCOE + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & BIT(adapter->fcoe.up)) + return 0; + + adapter->fcoe.up = app_mask ? + ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; + ixgbe_dcbnl_devreset(dev); + } +#endif + /* IF default priority is being removed clear VF default UP */ + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0 && adapter->default_up == app->priority) { + int vf; + long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); + int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; + + adapter->default_up = qos; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->pf_qos) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + qos, vf); + } + } + + return err; +} + +static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + return adapter->dcbx_cap; +} + +static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + int err = 0; + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + if (mode == adapter->dcbx_cap) + return 0; + + adapter->dcbx_cap = mode; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; + + adapter->dcb_set_bitmap |= mask; + ixgbe_dcbnl_set_all(dev); + } else { + /* Drop into single TC mode strict priority as this + * indicates CEE and IEEE versions are disabled + */ + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + err = ixgbe_setup_tc(dev, 0); + } + + return err ? 1 : 0; +} + +const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = ixgbe_dcbnl_ieee_getets, + .ieee_setets = ixgbe_dcbnl_ieee_setets, + .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, + .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, + .ieee_setapp = ixgbe_dcbnl_ieee_setapp, + .ieee_delapp = ixgbe_dcbnl_ieee_delapp, + .getstate = ixgbe_dcbnl_get_state, + .setstate = ixgbe_dcbnl_set_state, + .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, + .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, + .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, + .setall = ixgbe_dcbnl_set_all, + .getcap = ixgbe_dcbnl_getcap, + .getnumtcs = ixgbe_dcbnl_getnumtcs, + .setnumtcs = ixgbe_dcbnl_setnumtcs, + .getpfcstate = ixgbe_dcbnl_getpfcstate, + .setpfcstate = ixgbe_dcbnl_setpfcstate, + .getapp = ixgbe_dcbnl_getapp, + .getdcbx = ixgbe_dcbnl_getdcbx, + .setdcbx = ixgbe_dcbnl_setdcbx, +}; diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c new file mode 100644 index 000000000000..5e2c1e35e517 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c @@ -0,0 +1,276 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ +#include +#include + +#include "ixgbe.h" + +static struct dentry *ixgbe_dbg_root; + +static char ixgbe_dbg_reg_ops_buf[256] = ""; + +/** + * ixgbe_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * ixgbe_dbg_reg_ops_write - write into reg_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf, + sizeof(ixgbe_dbg_reg_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ixgbe_dbg_reg_ops_buf[len] = '\0'; + + if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + IXGBE_WRITE_REG(&adapter->hw, reg, value); + value = IXGBE_READ_REG(&adapter->hw, reg); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write \n"); + } + } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + value = IXGBE_READ_REG(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations ixgbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ixgbe_dbg_reg_ops_read, + .write = ixgbe_dbg_reg_ops_write, +}; + +static char ixgbe_dbg_netdev_ops_buf[256] = ""; + +/** + * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf, + sizeof(ixgbe_dbg_netdev_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ixgbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static const struct file_operations ixgbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ixgbe_dbg_netdev_ops_read, + .write = ixgbe_dbg_netdev_ops_write, +}; + +/** + * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); + if (adapter->ixgbe_dbg_adapter) { + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + } else { + e_dev_err("debugfs entry for %s failed\n", name); + } +} + +/** + * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) +{ + debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); + adapter->ixgbe_dbg_adapter = NULL; +} + +/** + * ixgbe_dbg_init - start up debugfs for the driver + **/ +void ixgbe_dbg_init(void) +{ + ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); + if (ixgbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * ixgbe_dbg_exit - clean out the driver's debugfs entries + **/ +void ixgbe_dbg_exit(void) +{ + debugfs_remove_recursive(ixgbe_dbg_root); +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c new file mode 100644 index 000000000000..6b23b7406f27 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c @@ -0,0 +1,3379 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbe */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" + + +#define IXGBE_ALL_RAR_ENTRIES 16 + +enum {NETDEV_STATS, IXGBE_STATS}; + +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define IXGBE_STAT(m) IXGBE_STATS, \ + sizeof(((struct ixgbe_adapter *)0)->m), \ + offsetof(struct ixgbe_adapter, m) +#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct rtnl_link_stats64 *)0)->m), \ + offsetof(struct rtnl_link_stats64, m) + +static const struct ixgbe_stats ixgbe_gstrings_stats[] = { + {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, + {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, + {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, + {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, + {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, + {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, + {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, + {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, + {"lsc_int", IXGBE_STAT(lsc_int)}, + {"tx_busy", IXGBE_STAT(tx_busy)}, + {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, + {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, + {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, + {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, + {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, + {"multicast", IXGBE_NETDEV_STAT(multicast)}, + {"broadcast", IXGBE_STAT(stats.bprc)}, + {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, + {"collisions", IXGBE_NETDEV_STAT(collisions)}, + {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, + {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, + {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, + {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, + {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, + {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, + {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, + {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, + {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, + {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, + {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, + {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, + {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, + {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, + {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, + {"tx_restart_queue", IXGBE_STAT(restart_queue)}, + {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, + {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, + {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, + {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, + {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, + {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, + {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, + {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, + {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, + {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, + {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, + {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, + {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, + {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, +#ifdef IXGBE_FCOE + {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, + {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, + {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, + {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, + {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, + {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, + {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, + {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, +#endif /* IXGBE_FCOE */ +}; + +/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues + +#define IXGBE_QUEUE_STATS_LEN ( \ + (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBE_PB_STATS_LEN ( \ + (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ + IXGBE_PB_STATS_LEN + \ + IXGBE_QUEUE_STATS_LEN) + +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN + +/* currently supported speeds for 10G */ +#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ + SUPPORTED_10000baseKX4_Full | \ + SUPPORTED_10000baseKR_Full) + +#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) + +static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) +{ + if (!ixgbe_isbackplane(hw->phy.media_type)) + return SUPPORTED_10000baseT_Full; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_X550EM_X_KX4: + return SUPPORTED_10000baseKX4_Full; + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_XFI: + return SUPPORTED_10000baseKR_Full; + default: + return SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full; + } +} + +static int ixgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + ixgbe_link_speed supported_link; + bool autoneg = false; + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->supported |= ixgbe_get_supported_10gtypes(hw); + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) + ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_100_FULL) + ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_100baseT_Full; + + /* default advertised speed if phy.autoneg_advertised isn't set */ + ecmd->advertising = ecmd->supported; + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + ecmd->advertising = 0; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + } else { + if (hw->phy.multispeed_fiber && !autoneg) { + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + } + + if (autoneg) { + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + } else + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->transceiver = XCVR_EXTERNAL; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_x550em_ext_t: + case ixgbe_phy_fw: + case ixgbe_phy_cu_unknown: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ixgbe_phy_qt: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ixgbe_phy_nl: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + /* SFP+ devices, further checking needed */ + switch (adapter->hw.phy.sfp_type) { + case ixgbe_sfp_type_da_cu: + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case ixgbe_sfp_type_sr: + case ixgbe_sfp_type_lr: + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ixgbe_sfp_type_not_present: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ixgbe_sfp_type_unknown: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + break; + case ixgbe_phy_xaui: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case ixgbe_phy_unknown: + case ixgbe_phy_generic: + case ixgbe_phy_sfp_unsupported: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + + /* Indicate pause support */ + ecmd->supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + ecmd->advertising |= ADVERTISED_Pause; + break; + case ixgbe_fc_rx_pause: + ecmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + break; + case ixgbe_fc_tx_pause: + ecmd->advertising |= ADVERTISED_Asym_Pause; + break; + default: + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + + if (netif_carrier_ok(netdev)) { + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_2500); + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case IXGBE_LINK_SPEED_100_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int ixgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 advertised, old; + s32 err = 0; + + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (ecmd->advertising & ~ecmd->supported) + return -EINVAL; + + /* only allow one speed at a time if no autoneg */ + if (!ecmd->autoneg && hw->phy.multispeed_fiber) { + if (ecmd->advertising == + (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full)) + return -EINVAL; + } + + old = hw->phy.autoneg_advertised; + advertised = 0; + if (ecmd->advertising & ADVERTISED_10000baseT_Full) + advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + + if (ecmd->advertising & ADVERTISED_10baseT_Full) + advertised |= IXGBE_LINK_SPEED_10_FULL; + + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + hw->mac.autotry_restart = true; + err = hw->mac.ops.setup_link(hw, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->mac.ops.setup_link(hw, old, true); + } + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + } else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = ethtool_cmd_speed(ecmd); + if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->advertising != ADVERTISED_10000baseT_Full) || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} + +static void ixgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (ixgbe_device_supports_autoneg_fc(hw) && + !hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == ixgbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == ixgbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ixgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fc_info fc = hw->fc; + + /* 82598 does no support link flow control with DCB enabled */ + if ((hw->mac.type == ixgbe_mac_82598EB) && + (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return -EINVAL; + + /* some devices do not support autoneg of link flow control */ + if ((pause->autoneg == AUTONEG_ENABLE) && + !ixgbe_device_supports_autoneg_fc(hw)) + return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = ixgbe_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + fc.requested_mode = ixgbe_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + fc.requested_mode = ixgbe_fc_tx_pause; + else + fc.requested_mode = ixgbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + + return 0; +} + +static u32 ixgbe_get_msglevel(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; + + /* 2018/11/14 pega-julia modified start */ + /* Purpose : Add for light OOB LED static. */ + + struct ixgbe_hw *hw = &adapter->hw; + u16 regVal; + s32 rc; + + /* For M88E1512, write 3 in (page 0,register 22)[Page Address Register] to goto page 3 */ + regVal = 0x03; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) + hw_err(hw, "page register write failed, rc:%x\n", rc); + + /* For M88E1512, read from (page 3, register 16)[LED Function Control Register] */ + regVal = 0x00; + rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); + /*hw_err(hw, "[Pega Debug] : current register value = 0x%x\n", regVal);*/ + if (rc) + hw_err(hw, "led function control register read failed, rc:%x\n", rc); + + if (data == 0) /* Turn off OOB LED. */ + { + /* For M88E1512, write to (page 3, register 16) with force led off */ + regVal = (regVal & 0xFF00) | 0x0088; + rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); + if (rc) + hw_err(hw, "led function control register write failed, rc:%x\n", rc); + } + else if (data == 1) /* Turn on OOB LED. */ + { + /* For M88E1512, write to (page 3, register 16) with force led on */ + regVal = (regVal & 0xFF00) | 0x0099; + rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); + if (rc) + hw_err(hw, "led function control register write failed, rc:%x\n", rc); + } + else /* Switch OOB LED back to normal. */ + { + /* For M88E1512, set led back to nornmal in (page 3, register 16). */ + regVal = (regVal & 0xFF00) | 0x0017; + rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); + if (rc) + hw_err(hw, "led function control register write failed, rc:%x\n", rc); + } + + /* For M88E1512, write 0 in (page 0, register 22) to back to page 0 */ + regVal = 0x00; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) + hw_err(hw, "page register write failed, rc:%x\n", rc); + + /* 2018/11/14 pega-julia modified end */ +} + +static int ixgbe_get_regs_len(struct net_device *netdev) +{ +#define IXGBE_REGS_LEN 1139 + return IXGBE_REGS_LEN * sizeof(u32); +} + +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ + +static void ixgbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); + + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + + /* General Registers */ + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); + + /* NVM Register */ + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw)); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); + regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); + regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); + regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); + regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw)); + + /* Interrupt */ + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR */ + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); + regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); + regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); + regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); + regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); + regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); + regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); + regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); + regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); + regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); + regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); + + /* Flow Control */ + regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); + for (i = 0; i < 4; i++) + regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i)); + for (i = 0; i < 8; i++) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + break; + default: + break; + } + } + regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); + regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); + + /* Receive DMA */ + for (i = 0; i < 64; i++) + regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + for (i = 0; i < 64; i++) + regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + for (i = 0; i < 64; i++) + regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + for (i = 0; i < 64; i++) + regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + for (i = 0; i < 64; i++) + regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + for (i = 0; i < 64; i++) + regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + for (i = 0; i < 8; i++) + regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); + + /* Receive */ + regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); + for (i = 0; i < 16; i++) + regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); + regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); + regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); + regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); + regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + for (i = 0; i < 8; i++) + regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); + regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); + + /* Transmit */ + for (i = 0; i < 32; i++) + regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + for (i = 0; i < 32; i++) + regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + for (i = 0; i < 32; i++) + regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + for (i = 0; i < 32; i++) + regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + for (i = 0; i < 32; i++) + regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + for (i = 0; i < 32; i++) + regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); + regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + for (i = 0; i < 16; i++) + regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); + for (i = 0; i < 8; i++) + regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); + regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); + + /* Wake Up */ + regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); + regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); + regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); + regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); + regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); + regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); + regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); + regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); + regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); + + /* DCB */ + regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ + regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = + IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = + IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = + IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = + IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = + IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = + IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = + IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = + IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); + break; + default: + break; + } + + for (i = 0; i < 8; i++) + regs_buff[865 + i] = + IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ + for (i = 0; i < 8; i++) + regs_buff[873 + i] = + IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ + + /* Statistics */ + regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); + regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); + regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); + regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); + for (i = 0; i < 8; i++) + regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); + regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); + regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); + regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); + regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); + regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); + regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); + regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); + for (i = 0; i < 8; i++) + regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); + for (i = 0; i < 8; i++) + regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); + for (i = 0; i < 8; i++) + regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); + for (i = 0; i < 8; i++) + regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); + regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); + regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); + regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); + regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); + regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); + regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); + regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); + regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); + regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); + regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); + regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc); + regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32); + regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc); + regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32); + for (i = 0; i < 8; i++) + regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); + regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); + regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); + regs_buff[956] = IXGBE_GET_STAT(adapter, roc); + regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); + regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); + regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); + regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); + regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor); + regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32); + regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); + regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); + regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); + regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); + regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); + regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); + regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); + regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); + regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); + regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); + regs_buff[973] = IXGBE_GET_STAT(adapter, xec); + for (i = 0; i < 16; i++) + regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); + for (i = 0; i < 16; i++) + regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); + for (i = 0; i < 16; i++) + regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); + for (i = 0; i < 16; i++) + regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); + + /* MAC */ + regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); + regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); + regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); + regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); + regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); + regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); + regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); + regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); + regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); + regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); + regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); + regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); + regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); + regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); + regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); + regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); + regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); + regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); + regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); + regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); + regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); + regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); + regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); + regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); + regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); + regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); + regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); + regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + + /* Diagnostic */ + regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); + regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); + for (i = 0; i < 4; i++) + regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); + regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); + regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); + regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); + for (i = 0; i < 4; i++) + regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); + regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); + regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); + for (i = 0; i < 4; i++) + regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i)); + regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); + for (i = 0; i < 4; i++) + regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i)); + for (i = 0; i < 8; i++) + regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); + regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); + regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); + regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); + regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); + regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); + regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); + regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); + regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); + regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); + + /* 82599 X540 specific registers */ + regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); + + /* 82599 X540 specific DCB registers */ + regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); + for (i = 0; i < 4; i++) + regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); + regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); + /* same as RTTQCNRM */ + regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); + /* same as RTTQCNRR */ + + /* X540 specific DCB registers */ + regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); + regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); +} + +static int ixgbe_get_eeprom_len(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int ixgbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int ixgbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* + * need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if ((eeprom->offset + eeprom->len) & 1) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->eeprom.ops.write_buffer(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + hw->eeprom.ops.update_checksum(hw); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void ixgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 nvm_track_id; + + strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgbe_driver_version, + sizeof(drvinfo->version)); + + nvm_track_id = (adapter->eeprom_verh << 16) | + adapter->eeprom_verl; + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", + nvm_track_id); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void ixgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; + struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; + + ring->rx_max_pending = IXGBE_MAX_RXD; + ring->tx_max_pending = IXGBE_MAX_TXD; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; +} + +static int ixgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + IXGBE_MIN_TXD, IXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + IXGBE_MIN_RXD, IXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + ixgbe_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct ixgbe_ring)); + + temp_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct ixgbe_ring)); + + temp_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbe_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + ixgbe_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IXGBE_RESETTING, &adapter->state); + return err; +} + +static int ixgbe_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ixgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + unsigned int start; + struct ixgbe_ring *ring; + int i, j; + char *p = NULL; + + ixgbe_update_stats(adapter); + net_stats = dev_get_stats(netdev, &temp); + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + switch (ixgbe_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) net_stats + + ixgbe_gstrings_stats[i].stat_offset; + break; + case IXGBE_STATS: + p = (char *) adapter + + ixgbe_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i] = 0; + data[i+1] = 0; + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = 0; + data[i+1] = 0; + data[i+2] = 0; + i += 3; +#endif + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i] = 0; + data[i+1] = 0; + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = 0; + data[i+1] = 0; + data[i+2] = 0; + i += 3; +#endif + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + + for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } +} + +static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + for (i = 0; i < IXGBE_TEST_LEN; i++) { + memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + case ETH_SS_STATS: + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < netdev->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + + if (ixgbe_removed(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct ixgbe_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default 82599 register test */ +static const struct ixgbe_reg_test reg_test_82599[] = { + { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + +/* default 82598 register test */ +static const struct ixgbe_reg_test reg_test_82598[] = { + { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + /* RDH is read-only for 82598, only test RDT. */ + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, + { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + +static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + + if (ixgbe_removed(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = ixgbe_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); + val = ixgbe_read_reg(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (test_pattern[pat] & write & mask)); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (ixgbe_removed(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = ixgbe_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, write & mask); + val = ixgbe_read_reg(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + return false; +} + +static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) +{ + const struct ixgbe_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + if (ixgbe_removed(adapter->hw.hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return 1; + } + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + toggle = 0x7FFFF3FF; + test = reg_test_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + toggle = 0x7FFFF30F; + test = reg_test_82599; + break; + default: + *data = 1; + return 1; + } + + /* + * Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable on newer MACs. + */ + before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); + value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); + ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); + after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + ixgbe_write_reg(&adapter->hw, + test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return 1; + } + test++; + } + + *data = 0; + return 0; +} + +static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + if (hw->eeprom.ops.validate_checksum(hw, NULL)) + *data = 1; + else + *data = 0; + return *data; +} + +static irqreturn_t ixgbe_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); + + return IRQ_HANDLED; +} + +static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", shared_int ? + "shared" : "unshared"); + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = BIT(i); + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + ~mask & 0x00007FFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + ~mask & 0x00007FFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* + * Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + ~mask & 0x00007FFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + ~mask & 0x00007FFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_ctl; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); + reg_ctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg_ctl &= ~IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); + break; + default: + break; + } + + ixgbe_reset(adapter); + + ixgbe_free_tx_resources(&adapter->test_tx_ring); + ixgbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; + u32 rctl, reg_data; + int ret_val; + int err; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IXGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = ixgbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); + reg_data |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); + break; + default: + break; + } + + ixgbe_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = IXGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + + err = ixgbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + hw->mac.ops.disable_rx(hw); + + ixgbe_configure_rx_ring(adapter, rx_ring); + + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + rctl |= IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); + + hw->mac.ops.enable_rx(hw); + + return 0; + +err_nomem: + ixgbe_free_desc_rings(adapter); + return ret_val; +} + +static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_data; + + + /* Setup MAC loopback */ + reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); + reg_data |= IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); + + reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); + + /* X540 and X550 needs to set the MACC.FLU bit to force link up */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); + reg_data |= IXGBE_MACC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); + break; + default: + if (hw->mac.orig_autoc) { + reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); + } else { + return 10; + } + } + IXGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + /* Disable Atlas Tx lanes; re-enabled in reset path */ + if (hw->mac.type == ixgbe_mac_82598EB) { + u8 atlas; + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); + } + + return 0; +} + +static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) +{ + u32 reg_data; + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); + reg_data &= ~IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); +} + +static void ixgbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap(rx_buffer->page) + rx_buffer->page_offset; + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + + return match; +} + +static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, + struct ixgbe_ring *tx_ring, + unsigned int size) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer; + struct ixgbe_tx_buffer *tx_buffer; + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); + + while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (ixgbe_check_lbtest_frame(rx_buffer, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* re-map buffers to ring, store next to clean values */ + ixgbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + ixgbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = ixgbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) +{ + *data = ixgbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = ixgbe_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = ixgbe_run_loopback_test(adapter); + ixgbe_loopback_cleanup(adapter); + +err_loopback: + ixgbe_free_desc_rings(adapter); +out: + return *data; +} + +static void ixgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (ixgbe_removed(adapter->hw.hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__IXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__IXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + ixgbe_close(netdev); + else + ixgbe_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (ixgbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (ixgbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (ixgbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. */ + if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | + IXGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + ixgbe_reset(adapter); + e_info(hw, "loopback testing starting\n"); + if (ixgbe_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + +skip_loopback: + ixgbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__IXGBE_TESTING, &adapter->state); + if (if_running) + ixgbe_open(netdev); + else if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__IXGBE_TESTING, &adapter->state); + } + +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_hw *hw = &adapter->hw; + int retval = 0; + + /* WOL not supported for all devices */ + if (!ixgbe_wol_supported(adapter, hw->device_id, + hw->subsystem_device_id)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +static void ixgbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (ixgbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + if (adapter->wol & IXGBE_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IXGBE_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IXGBE_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IXGBE_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (ixgbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IXGBE_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IXGBE_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IXGBE_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IXGBE_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int ixgbe_nway_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + + return 0; +} + +static int ixgbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* Modified by hilbert for C22 MDI directly access */ + s32 rc; + u16 regVal; + /* Modified by hilbert done */ + + if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) + return -EOPNOTSUPP; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + return 2; + + case ETHTOOL_ID_ON: + hw->mac.ops.led_on(hw, hw->mac.led_link_act); + break; + + case ETHTOOL_ID_OFF: + hw->mac.ops.led_off(hw, hw->mac.led_link_act); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + /* Modified by hilbert for C22 MDI directly access */ + if (hw->mac.type == ixgbe_mac_x550em_a) { + /* For M88E1512, to select page 3 in register 22 */ + regVal = 0x03; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "page register write failed, rc:%x\n", rc); + } + + /* For M88E1512, read from page 3, register 16 */ + regVal = 0x00; + rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); + if (rc) { + hw_err(hw, "led function control register read failed, rc:%x\n", rc); + } + + /* For M88E1512, write to page 3 register 16 with force led on */ + regVal = (regVal & 0xFF00) | 0x0017; + rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "led function control register write failed, rc:%x\n", rc); + } + + /* For M88E1512, write page 22 back to default 0 */ + regVal = 0x00; + rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); + if (rc) { + hw_err(hw, "page register write failed, rc:%x\n", rc); + } + } else { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); + } + break; + } + + return 0; +} + +static int ixgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +/* + * this function must be called before setting the new value of + * rx_itr_setting + */ +static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* nothing to do if LRO or RSC are not enabled */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || + !(netdev->features & NETIF_F_LRO)) + return false; + + /* check the feature flag value and enable RSC if necessary */ + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); + return true; + } + /* if interrupt rate is too high then disable RSC */ + } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs set too low, disabling RSC\n"); + return true; + } + return false; +} + +static int ixgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param, tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < IXGBE_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= IXGBE_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < IXGBE_100K_ITR)) + need_reset = true; + } + + /* check the old value and enable RSC if necessary */ + need_reset |= ixgbe_update_rsc(adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + +static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + union ixgbe_atr_input *mask = &adapter->fdir_mask; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule = NULL; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* fill out the flow spec entry */ + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; + fsp->m_ext.vlan_tci = mask->formatted.vlan_id; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == IXGBE_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on ixgbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case UDP_V4_FLOW: + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case UDP_V6_FLOW: + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = ixgbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input || (rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash)) { + err = ixgbe_fdir_erase_perfect_filter_82599(hw, + &rule->filter, + sw_idx); + } + + hlist_del(&rule->fdir_node); + kfree(rule); + adapter->fdir_filter_count--; + } + + /* + * If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, &parent->fdir_node); + else + hlist_add_head(&input->fdir_node, + &adapter->fdir_filter_list); + + /* update counts */ + adapter->fdir_filter_count++; + + return 0; +} + +static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + default: + return 0; + } + break; + default: + return 0; + } + + return 1; +} + +static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input mask; + u8 queue; + int err; + + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* ring_cookie is a masked into a set of queues and ixgbe pools or + * we use the drop index. + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf && (ring >= adapter->num_rx_queues)) + return -EINVAL; + else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union ixgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!ixgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; + mask.formatted.vlan_id = fsp->m_ext.vlan_tci; + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; + } + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = IXGBE_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + if (err) { + e_err(drv, "Error writing mask\n"); + goto err_out_w_lock; + } + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + e_err(drv, "Only one mask supported per port\n"); + goto err_out_w_lock; + } + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + + /* program filters to filter memory */ + err = ixgbe_fdir_write_perfect_filter_82599(hw, + &input->filter, input->sw_idx, queue); + if (err) + goto err_out_w_lock; + + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct ixgbe_hw *hw = &adapter->hw; + u32 mrqc; + unsigned int pf_pool = adapter->num_vfs; + + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); + else + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | + IXGBE_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); + else + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } + + return 0; +} + +static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = ixgbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type < ixgbe_mac_X550) + return 16; + else + return 64; +} + +static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 ixgbe_rss_indir_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return ixgbe_rss_indir_tbl_entries(adapter); +} + +static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); + u16 rss_m = adapter->ring_feature[RING_F_RSS].mask; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + rss_m = adapter->ring_feature[RING_F_RSS].indices - 1; + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i] & rss_m; +} + +static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + ixgbe_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); + + return 0; +} + +static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + ixgbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + (max_queues < 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + + ixgbe_store_reta(adapter); + + return 0; +} + +static int ixgbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* we always support timestamping disabled */ + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + /* fallthrough */ + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters |= + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + break; + default: + return ethtool_op_get_ts_info(dev, info); + } + return 0; +} + +static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + /* Limit value based on the queue mask */ + max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + max_combined = 4; + } else if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = IXGBE_MAX_FDIR_INDICES; + } else { + /* support up to 16 queues with RSS */ + max_combined = ixgbe_max_rss_indices(adapter); + } + + return max_combined; +} + +static void ixgbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = ixgbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + + /* report flow director queues as maximum channels */ + ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; +} + +static int ixgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = ixgbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > ixgbe_max_channels(adapter)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + +#ifdef IXGBE_FCOE + /* cap FCoE limit at 8 */ + if (count > IXGBE_FCRETA_SIZE) + count = IXGBE_FCRETA_SIZE; + adapter->ring_feature[RING_F_FCOE].limit = count; + +#endif + /* use setup TC to update any traffic class queue mapping */ + return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int ixgbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + s32 status; + u8 sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status) + return -EIO; + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int ixgbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u8 databyte = 0xFF; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + for (i = ee->offset; i < ee->offset + ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return -EBUSY; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status) + return -EIO; + + data[i - ee->offset] = databyte; + } + + return 0; +} + +static const struct ethtool_ops ixgbe_ethtool_ops = { + .get_settings = ixgbe_get_settings, + .set_settings = ixgbe_set_settings, + .get_drvinfo = ixgbe_get_drvinfo, + .get_regs_len = ixgbe_get_regs_len, + .get_regs = ixgbe_get_regs, + .get_wol = ixgbe_get_wol, + .set_wol = ixgbe_set_wol, + .nway_reset = ixgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgbe_get_eeprom_len, + .get_eeprom = ixgbe_get_eeprom, + .set_eeprom = ixgbe_set_eeprom, + .get_ringparam = ixgbe_get_ringparam, + .set_ringparam = ixgbe_set_ringparam, + .get_pauseparam = ixgbe_get_pauseparam, + .set_pauseparam = ixgbe_set_pauseparam, + .get_msglevel = ixgbe_get_msglevel, + .set_msglevel = ixgbe_set_msglevel, + .self_test = ixgbe_diag_test, + .get_strings = ixgbe_get_strings, + .set_phys_id = ixgbe_set_phys_id, + .get_sset_count = ixgbe_get_sset_count, + .get_ethtool_stats = ixgbe_get_ethtool_stats, + .get_coalesce = ixgbe_get_coalesce, + .set_coalesce = ixgbe_set_coalesce, + .get_rxnfc = ixgbe_get_rxnfc, + .set_rxnfc = ixgbe_set_rxnfc, + .get_rxfh_indir_size = ixgbe_rss_indir_size, + .get_rxfh_key_size = ixgbe_get_rxfh_key_size, + .get_rxfh = ixgbe_get_rxfh, + .set_rxfh = ixgbe_set_rxfh, + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, + .get_ts_info = ixgbe_get_ts_info, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, +}; + +void ixgbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ixgbe_ethtool_ops; +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c new file mode 100644 index 000000000000..2a653ec954f5 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c @@ -0,0 +1,1080 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * ixgbe_fcoe_clear_ddp - clear the given ddp context + * @ddp: ptr to the ixgbe_fcoe_ddp + * + * Returns : none + * + */ +static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) +{ + ddp->len = 0; + ddp->err = 1; + ddp->udl = NULL; + ddp->udp = 0UL; + ddp->sgl = NULL; + ddp->sgc = 0; +} + +/** + * ixgbe_fcoe_ddp_put - free the ddp context for a given xid + * @netdev: the corresponding net_device + * @xid: the xid that corresponding ddp will be freed + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + */ +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ + int len; + struct ixgbe_fcoe *fcoe; + struct ixgbe_adapter *adapter; + struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_hw *hw; + u32 fcbuff; + + if (!netdev) + return 0; + + if (xid >= netdev->fcoe_ddp_xid) + return 0; + + adapter = netdev_priv(netdev); + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + return 0; + + hw = &adapter->hw; + len = ddp->len; + /* if no error then skip ddp context invalidation */ + if (!ddp->err) + goto skip_ddpinv; + + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP FCoE lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), + (xid | IXGBE_FCFLTRW_WE)); + + /* program FCBUFF */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); + + /* program FCDMARW */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_WE)); + + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); + } else { + /* other hardware requires DDP FCoE lock */ + spin_lock_bh(&fcoe->lock); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, + (xid | IXGBE_FCFLTRW_WE)); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_WE)); + + /* guaranteed to be invalidated after 100us */ + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); + spin_unlock_bh(&fcoe->lock); + } + + if (fcbuff & IXGBE_FCBUFF_VALID) + usleep_range(100, 150); + +skip_ddpinv: + if (ddp->sgl) + dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + if (ddp->pool) { + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + ddp->pool = NULL; + } + + ixgbe_fcoe_clear_ddp(ddp); + + return len; +} + +/** + * ixgbe_fcoe_ddp_setup - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * Returns : 1 for success and 0 for no ddp + */ +static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc, + int target_mode) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_fcoe_ddp_pool *ddp_pool; + struct scatterlist *sg; + unsigned int i, j, dmacount; + unsigned int len; + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; + unsigned int firstoff = 0; + unsigned int lastsize; + unsigned int thisoff = 0; + unsigned int thislen = 0; + u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; + dma_addr_t addr = 0; + + if (!netdev || !sgl) + return 0; + + adapter = netdev_priv(netdev); + if (xid >= netdev->fcoe_ddp_xid) { + e_warn(drv, "xid=0x%x out-of-range\n", xid); + return 0; + } + + /* no DDP if we are already down or resetting */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return 0; + + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); + return 0; + } + ixgbe_fcoe_clear_ddp(ddp); + + + if (!fcoe->ddp_pool) { + e_warn(drv, "No ddp_pool resources allocated\n"); + return 0; + } + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); + if (!ddp_pool->pool) { + e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); + goto out_noddp; + } + + /* setup dma from scsi command sgl */ + dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); + if (dmacount == 0) { + e_err(drv, "xid 0x%x DMA map error\n", xid); + goto out_noddp; + } + + /* alloc the udl from per cpu ddp pool */ + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); + if (!ddp->udl) { + e_err(drv, "failed allocated ddp context\n"); + goto out_noddp_unmap; + } + ddp->pool = ddp_pool->pool; + ddp->sgl = sgl; + ddp->sgc = sgc; + + j = 0; + for_each_sg(sgl, sg, dmacount, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= IXGBE_BUFFCNT_MAX) { + ddp_pool->noddp++; + goto out_noddp_free; + } + + /* get the offset of length of current buffer */ + thisoff = addr & ((dma_addr_t)bufflen - 1); + thislen = min((bufflen - thisoff), len); + /* + * all but the 1st buffer (j == 0) + * must be aligned on bufflen + */ + if ((j != 0) && (thisoff)) + goto out_noddp_free; + /* + * all but the last buffer + * ((i == (dmacount - 1)) && (thislen == len)) + * must end at bufflen + */ + if (((i != (dmacount - 1)) || (thislen != len)) + && ((thislen + thisoff) != bufflen)) + goto out_noddp_free; + + ddp->udl[j] = (u64)(addr - thisoff); + /* only the first buffer may have none-zero offset */ + if (j == 0) + firstoff = thisoff; + len -= thislen; + addr += thislen; + j++; + } + } + /* only the last buffer may have non-full bufflen */ + lastsize = thisoff + thislen; + + /* + * lastsize can not be buffer len. + * If it is then adding another buffer with lastsize = 1. + */ + if (lastsize == bufflen) { + if (j >= IXGBE_BUFFCNT_MAX) { + ddp_pool->noddp_ext_buff++; + goto out_noddp_free; + } + + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); + j++; + lastsize = 1; + } + put_cpu(); + + fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); + fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); + fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); + /* Set WRCONTX bit to allow DDP for target */ + if (target_mode) + fcbuff |= (IXGBE_FCBUFF_WRCONTX); + fcbuff |= (IXGBE_FCBUFF_VALID); + + fcdmarw = xid; + fcdmarw |= IXGBE_FCDMARW_WE; + fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); + + fcfltrw = xid; + fcfltrw |= IXGBE_FCFLTRW_WE; + + /* program DMA context */ + hw = &adapter->hw; + + /* turn on last frame indication for target mode as FCP_RSPtarget is + * supposed to send FCP_RSP when it is done. */ + if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { + set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); + fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); + fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); + } + + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), + ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); + } else { + /* DDP lock for indirect DDP context access */ + spin_lock_bh(&fcoe->lock); + + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + + spin_unlock_bh(&fcoe->lock); + } + + return 1; + +out_noddp_free: + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + ixgbe_fcoe_clear_ddp(ddp); + +out_noddp_unmap: + dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); +out_noddp: + put_cpu(); + return 0; +} + +/** + * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +/** + * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +/** + * ixgbe_fcoe_ddp - check ddp status and mark it done + * @adapter: ixgbe adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + */ +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + int rc = -EINVAL; + struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct fc_frame_header *fh; + struct fcoe_crc_eof *crc; + __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); + __le32 ddp_err; + int ddp_max; + u32 fctl; + u16 xid; + + if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) + skb->ip_summed = CHECKSUM_NONE; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) + fh = (struct fc_frame_header *)(skb->data + + sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); + else + fh = (struct fc_frame_header *)(skb->data + + sizeof(struct fcoe_hdr)); + + fctl = ntoh24(fh->fh_f_ctl); + if (fctl & FC_FC_EX_CTX) + xid = be16_to_cpu(fh->fh_ox_id); + else + xid = be16_to_cpu(fh->fh_rx_id); + + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + if (xid >= ddp_max) + return -EINVAL; + + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + return -EINVAL; + + ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | + IXGBE_RXDADV_ERR_FCERR); + if (ddp_err) + return -EINVAL; + + switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { + /* return 0 to bypass going to ULD for DDPed data */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + rc = 0; + break; + /* unmap the sg list when FCPRSP is received */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): + dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, + ddp->sgc, DMA_FROM_DEVICE); + ddp->err = ddp_err; + ddp->sgl = NULL; + ddp->sgc = 0; + /* fall through */ + /* if DDP length is present pass it through to ULD */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + if (ddp->len) + rc = ddp->len; + break; + /* no match will return as an error */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): + default: + break; + } + + /* In target mode, check the last data frame of the sequence. + * For DDP in target mode, data is already DDPed but the header + * indication of the last data frame ould allow is to tell if we + * got all the data and the ULP can send FCP_RSP back, as this is + * not a full fcoe frame, we fill the trailer here so it won't be + * dropped by the ULP stack. + */ + if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && + (fctl & FC_FC_END_SEQ)) { + skb_linearize(skb); + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; + } + + return rc; +} + +/** + * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) + * @tx_ring: tx desc ring + * @first: first tx_buffer structure containing skb, tx_flags, and protocol + * @hdr_len: hdr_len to be returned + * + * This sets up large send offload for FCoE + * + * Returns : 0 indicates success, < 0 for error + */ +int ixgbe_fso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len) +{ + struct sk_buff *skb = first->skb; + struct fc_frame_header *fh; + u32 vlan_macip_lens; + u32 fcoe_sof_eof = 0; + u32 mss_l4len_idx; + u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; + u8 sof, eof; + + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { + dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); + return -EINVAL; + } + + /* resets the header to point fcoe/fc */ + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + + sizeof(struct fcoe_hdr)); + + /* sets up SOF and ORIS */ + sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; + switch (sof) { + case FC_SOF_I2: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; + break; + case FC_SOF_I3: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | + IXGBE_ADVTXD_FCOEF_ORIS; + break; + case FC_SOF_N2: + break; + case FC_SOF_N3: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; + break; + default: + dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); + return -EINVAL; + } + + /* the first byte of the last dword is EOF */ + skb_copy_bits(skb, skb->len - 4, &eof, 1); + /* sets up EOF and ORIE */ + switch (eof) { + case FC_EOF_N: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; + break; + case FC_EOF_T: + /* lso needs ORIE */ + if (skb_is_gso(skb)) + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | + IXGBE_ADVTXD_FCOEF_ORIE; + else + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; + break; + case FC_EOF_NI: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; + break; + case FC_EOF_A: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; + break; + default: + dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); + return -EINVAL; + } + + /* sets up PARINC indicating data offset */ + fh = (struct fc_frame_header *)skb_transport_header(skb); + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; + + /* include trailer in headlen as it is replicated per frame */ + *hdr_len = sizeof(struct fcoe_crc_eof); + + /* hdr_len includes fc_hdr if FCoE LSO is enabled */ + if (skb_is_gso(skb)) { + *hdr_len += skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + /* update gso_segs and bytecount */ + first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, + skb_shinfo(skb)->gso_size); + first->bytecount += (first->gso_segs - 1) * *hdr_len; + first->tx_flags |= IXGBE_TX_FLAGS_TSO; + /* Hardware expects L4T to be RSV for FCoE TSO */ + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; + } + + /* set flag indicating FCOE to ixgbe_tx_map call */ + first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; + + /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ + mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + vlan_macip_lens |= (skb_transport_offset(skb) - 4) + << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + /* write context desc */ + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, + type_tucmd, mss_l4len_idx); + + return 0; +} + +static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) +{ + struct ixgbe_fcoe_ddp_pool *ddp_pool; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; +} + +static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct ixgbe_fcoe_ddp_pool *ddp_pool; + struct dma_pool *pool; + char pool_name[32]; + + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); + + pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, + IXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!pool) + return -ENOMEM; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + ddp_pool->pool = pool; + ddp_pool->noddp = 0; + ddp_pool->noddp_ext_buff = 0; + + return 0; +} + +/** + * ixgbe_configure_fcoe - configures registers for fcoe at start + * @adapter: ptr to ixgbe adapter + * + * This sets up FCoE related registers + * + * Returns : none + */ +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; + struct ixgbe_hw *hw = &adapter->hw; + int i, fcoe_q, fcoe_i, fcoe_q_h = 0; + int fcreta_size; + u32 etqf; + + /* Minimal functionality for FCoE requires at least CRC offloads */ + if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) + return; + + /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ + etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + etqf |= IXGBE_ETQF_POOL_ENABLE; + etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + + /* leave registers un-configured if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return; + + /* Use one or more Rx queues for FCoE by redirection table */ + fcreta_size = IXGBE_FCRETA_SIZE; + if (adapter->hw.mac.type == ixgbe_mac_X550) + fcreta_size = IXGBE_FCRETA_SIZE_X550; + + for (i = 0; i < fcreta_size; i++) { + if (adapter->hw.mac.type == ixgbe_mac_X550) { + int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % + fcoe->indices); + fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; + fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & + IXGBE_FCRETA_ENTRY_HIGH_MASK; + } + + fcoe_i = fcoe->offset + (i % fcoe->indices); + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + fcoe_q |= fcoe_q_h; + IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); + + /* Enable L2 EtherType filter for FIP */ + etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + etqf |= IXGBE_ETQF_POOL_ENABLE; + etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); + + /* Send FIP frames to the first FCoE queue */ + fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + + /* Configure FCoE Rx control */ + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, + IXGBE_FCRXCTRL_FCCRCBO | + (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); +} + +/** + * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources + * @adapter : ixgbe adapter + * + * Cleans up outstanding ddp context resources + * + * Returns : none + */ +void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + int cpu, i, ddp_max; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return; + + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + + for (i = 0; i < ddp_max; i++) + ixgbe_fcoe_ddp_put(adapter->netdev, i); + + for_each_possible_cpu(cpu) + ixgbe_fcoe_dma_pool_free(fcoe, cpu); + + dma_unmap_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); + + fcoe->extra_ddp_buffer = NULL; + fcoe->extra_ddp_buffer_dma = 0; +} + +/** + * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources + * @adapter: ixgbe adapter + * + * Sets up ddp context resouces + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct device *dev = &adapter->pdev->dev; + void *buffer; + dma_addr_t dma; + unsigned int cpu; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return 0; + + /* Extra buffer to be shared by all DDPs for HW work around */ + buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + kfree(buffer); + return -ENOMEM; + } + + fcoe->extra_ddp_buffer = buffer; + fcoe->extra_ddp_buffer_dma = dma; + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); + if (!err) + continue; + + e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); + ixgbe_free_fcoe_ddp_resources(adapter); + return -ENOMEM; + } + + return 0; +} + +static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); + + if (!fcoe->ddp_pool) { + e_err(drv, "failed to allocate percpu DDP resources\n"); + return -ENOMEM; + } + + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; + + return 0; +} + +static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + adapter->netdev->fcoe_ddp_xid = 0; + + if (!fcoe->ddp_pool) + return; + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; +} + +/** + * ixgbe_fcoe_enable - turn on FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns on FCoE offload feature in 82599. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_fcoe_enable(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + atomic_inc(&fcoe->refcnt); + + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + return -EINVAL; + + e_info(drv, "Enabling FCoE offload features.\n"); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + /* Allocate per CPU memory to track DDP pools */ + ixgbe_fcoe_ddp_enable(adapter); + + /* enable FCoE and notify stack */ + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; + netdev->features |= NETIF_F_FCOE_MTU; + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + + return 0; +} + +/** + * ixgbe_fcoe_disable - turn off FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns off FCoE offload feature in 82599. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_fcoe_disable(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) + return -EINVAL; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return -EINVAL; + + e_info(drv, "Disabling FCoE offload features.\n"); + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + /* Free per CPU memory to track DDP pools */ + ixgbe_fcoe_ddp_disable(adapter); + + /* disable FCoE and notify stack */ + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + netdev->features &= ~NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + + return 0; +} + +/** + * ixgbe_fcoe_get_wwn - get world wide name for the node or the port + * @netdev : ixgbe adapter + * @wwn : the world wide name + * @type: the type of world wide name + * + * Returns the node or port world wide name if both the prefix and the san + * mac address are valid, then the wwn is formed based on the NAA-2 for + * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). + * + * Returns : 0 on success + */ +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ + u16 prefix = 0xffff; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + switch (type) { + case NETDEV_FCOE_WWNN: + prefix = mac->wwnn_prefix; + break; + case NETDEV_FCOE_WWPN: + prefix = mac->wwpn_prefix; + break; + default: + break; + } + + if ((prefix != 0xffff) && + is_valid_ether_addr(mac->san_addr)) { + *wwn = ((u64) prefix << 48) | + ((u64) mac->san_addr[0] << 40) | + ((u64) mac->san_addr[1] << 32) | + ((u64) mac->san_addr[2] << 24) | + ((u64) mac->san_addr[3] << 16) | + ((u64) mac->san_addr[4] << 8) | + ((u64) mac->san_addr[5]); + return 0; + } + return -EINVAL; +} + +/** + * ixgbe_fcoe_get_hbainfo - get FCoE HBA information + * @netdev : ixgbe adapter + * @info : HBA information + * + * Returns ixgbe HBA information + * + * Returns : 0 on success + */ +int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, + struct netdev_fcoe_hbainfo *info) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int i, pos; + u8 buf[8]; + + if (!info) + return -EINVAL; + + /* Don't return information on unsupported devices */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return -EINVAL; + + /* Manufacturer */ + snprintf(info->manufacturer, sizeof(info->manufacturer), + "Intel Corporation"); + + /* Serial Number */ + + /* Get the PCI-e Device Serial Number Capability */ + pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); + if (pos) { + pos += 4; + for (i = 0; i < 8; i++) + pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); + + snprintf(info->serial_number, sizeof(info->serial_number), + "%02X%02X%02X%02X%02X%02X%02X%02X", + buf[7], buf[6], buf[5], buf[4], + buf[3], buf[2], buf[1], buf[0]); + } else + snprintf(info->serial_number, sizeof(info->serial_number), + "Unknown"); + + /* Hardware Version */ + snprintf(info->hardware_version, + sizeof(info->hardware_version), + "Rev %d", hw->revision_id); + /* Driver Name/Version */ + snprintf(info->driver_version, + sizeof(info->driver_version), + "%s v%s", + ixgbe_driver_name, + ixgbe_driver_version); + /* Firmware Version */ + snprintf(info->firmware_version, + sizeof(info->firmware_version), + "0x%08x", + (adapter->eeprom_verh << 16) | + adapter->eeprom_verl); + + /* Model */ + if (hw->mac.type == ixgbe_mac_82599EB) { + snprintf(info->model, + sizeof(info->model), + "Intel 82599"); + } else if (hw->mac.type == ixgbe_mac_X550) { + snprintf(info->model, + sizeof(info->model), + "Intel X550"); + } else { + snprintf(info->model, + sizeof(info->model), + "Intel X540"); + } + + /* Model Description */ + snprintf(info->model_description, + sizeof(info->model_description), + "%s", + ixgbe_default_device_descr); + + return 0; +} + +/** + * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to + * @adapter - pointer to the device adapter structure + * + * Return : TC that FCoE is mapped to + */ +u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_IXGBE_DCB + return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); +#else + return 0; +#endif +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h new file mode 100644 index 000000000000..38385876effb --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h @@ -0,0 +1,88 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_FCOE_H +#define _IXGBE_FCOE_H + +#include +#include + +/* shift bits within STAT fo FCSTAT */ +#define IXGBE_RXDADV_FCSTAT_SHIFT 4 + +/* ddp user buffer */ +#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ +#define IXGBE_FCPTR_ALIGN 16 +#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define IXGBE_FCBUFF_4KB 0x0 +#define IXGBE_FCBUFF_8KB 0x1 +#define IXGBE_FCBUFF_16KB 0x2 +#define IXGBE_FCBUFF_64KB 0x3 +#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ +#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ +#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ +#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ + +/* Default traffic class to use for FCoE */ +#define IXGBE_FCOE_DEFTC 3 + +/* fcerr */ +#define IXGBE_FCERR_BADCRC 0x00100000 + +/* FCoE DDP for target mode */ +#define __IXGBE_FCOE_TARGET 1 + +struct ixgbe_fcoe_ddp { + int len; + u32 err; + unsigned int sgc; + struct scatterlist *sgl; + dma_addr_t udp; + u64 *udl; + struct dma_pool *pool; +}; + +/* per cpu variables */ +struct ixgbe_fcoe_ddp_pool { + struct dma_pool *pool; + u64 noddp; + u64 noddp_ext_buff; +}; + +struct ixgbe_fcoe { + struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; + atomic_t refcnt; + spinlock_t lock; + struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550]; + void *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; + unsigned long mode; + u8 up; +}; + +#endif /* _IXGBE_FCOE_H */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c new file mode 100644 index 000000000000..10d29678d65e --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c @@ -0,0 +1,1228 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_sriov.h" + +#ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) +{ +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* nothing to do if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return true; + + /* The work is already done if the FCoE ring is shared */ + if (fcoe->offset < tcs) + return true; + + /* The FCoE rings exist separately, we need to move their reg_idx */ + if (fcoe->indices) { + u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->rx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->tx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + } + +#endif /* IXGBE_FCOE */ + return true; +} + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + /* TxQs/TC: 4 RxQs/TC: 8 */ + *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ + *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + if (num_tcs > 4) { + /* + * TCs : TC0/1 TC2/3 TC4-7 + * TxQs/TC: 32 16 8 + * RxQs/TC: 16 16 16 + */ + *rx = tc << 4; + if (tc < 3) + *tx = tc << 5; /* 0, 32, 64 */ + else if (tc < 5) + *tx = (tc + 2) << 4; /* 80, 96 */ + else + *tx = (tc + 8) << 3; /* 104, 112, 120 */ + } else { + /* + * TCs : TC0 TC1 TC2/3 + * TxQs/TC: 64 32 16 + * RxQs/TC: 32 32 32 + */ + *rx = tc << 5; + if (tc < 2) + *tx = tc << 6; /* 0, 64 */ + else + *tx = (tc + 4) << 4; /* 96, 112 */ + } + default: + break; + } +} + +/** + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + unsigned int tx_idx, rx_idx; + int tc, offset, rss_i, i; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify we have DCB queueing enabled before proceeding */ + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); + for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { + adapter->tx_ring[offset + i]->reg_idx = tx_idx; + adapter->rx_ring[offset + i]->reg_idx = rx_idx; + adapter->tx_ring[offset + i]->dcb_tc = tc; + adapter->rx_ring[offset + i]->dcb_tc = tc; + } + } + + return true; +} + +#endif +/** + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) +{ +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_rx_queues; i++, reg_idx++) + adapter->rx_ring[i]->reg_idx = reg_idx; + +#endif + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; + +#endif + + return true; +} + +/** + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + +/** + * ixgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) +{ + /* start with default case */ + adapter->rx_ring[0]->reg_idx = 0; + adapter->tx_ring[0]->reg_idx = 0; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_cache_ring_dcb_sriov(adapter)) + return; + + if (ixgbe_cache_ring_dcb(adapter)) + return; + +#endif + if (ixgbe_cache_ring_sriov(adapter)) + return; + + ixgbe_cache_ring_rss(adapter); +} + +#define IXGBE_RSS_64Q_MASK 0x3F +#define IXGBE_RSS_16Q_MASK 0xF +#define IXGBE_RSS_8Q_MASK 0x7 +#define IXGBE_RSS_4Q_MASK 0x3 +#define IXGBE_RSS_2Q_MASK 0x1 +#define IXGBE_RSS_DISABLED_MASK 0x0 + +#ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) +{ + int i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; +#ifdef IXGBE_FCOE + u16 fcoe_i = 0; +#endif + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + } + +#ifdef IXGBE_FCOE + /* queues in the remaining pools are available for FCoE */ + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; + +#endif + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 1; + adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = tcs; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * tcs; + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } else if (tcs > 1) { + /* use queue belonging to FcoE TC */ + fcoe->indices = 1; + fcoe->offset = ixgbe_fcoe_get_tc(adapter); + } else { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + + fcoe->indices = 0; + fcoe->offset = 0; + } + } + +#endif /* IXGBE_FCOE */ + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, i, 1, i); + + return true; +} + +static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_ring_feature *f; + int rss_i, rss_m, i; + int tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + rss_i = min_t(u16, rss_i, 4); + rss_m = IXGBE_RSS_4Q_MASK; + } else if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + rss_i = min_t(u16, rss_i, 8); + rss_m = IXGBE_RSS_8Q_MASK; + } else { + /* 4 TC w/ 16 queues per TC */ + rss_i = min_t(u16, rss_i, 16); + rss_m = IXGBE_RSS_16Q_MASK; + } + + /* set RSS mask and indices */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = min_t(int, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when multiple TCs are enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* FCoE enabled queues require special configuration indexed + * by feature specific indices and offset. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + u8 tc = ixgbe_fcoe_get_tc(adapter); + + f = &adapter->ring_feature[RING_F_FCOE]; + f->indices = min_t(u16, rss_i, f->limit); + f->offset = rss_i * tc; + } + +#endif /* IXGBE_FCOE */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +#endif +/** + * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = IXGBE_RSS_DISABLED_MASK; +#ifdef IXGBE_FCOE + u16 fcoe_i = 0; +#endif + bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); + + /* only proceed if SR-IOV is enabled */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool */ + if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) { + vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; + rss_m = IXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with up to 4 queues per pool */ + } else { + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + rss_m = IXGBE_RSS_4Q_MASK; + /* We can support 4, 2, or 1 queues */ + rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; + } + +#ifdef IXGBE_FCOE + /* queues in the remaining pools are available for FCoE */ + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); + +#endif + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; + adapter->num_tx_queues = vmdq_i * rss_i; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* + * FCoE can use rings from adjacent buffers to allow RSS + * like behavior. To account for this we need to add the + * FCoE indices to the total ring count. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (vmdq_i > 1 && fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * rss_i; + } else { + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); + fcoe->offset = fcoe_i - fcoe->indices; + + fcoe_i -= rss_i; + } + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } + +#endif + return true; +} + +/** + * ixgbe_set_rss_queues - Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + + if (hw->mac.type < ixgbe_mac_X550) + f->mask = IXGBE_RSS_16Q_MASK; + else + f->mask = IXGBE_RSS_64Q_MASK; + + /* disable ATR by default, it will be configured below */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + + /* + * Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (rss_i > 1 && adapter->atr_sample_rate) { + f = &adapter->ring_feature[RING_F_FDIR]; + + rss_i = f->indices = f->limit; + + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + +#ifdef IXGBE_FCOE + /* + * FCoE can exist on the same rings as standard network traffic + * however it is preferred to avoid that if possible. In order + * to get the best performance we allocate as many FCoE queues + * as we can and we place them at the end of the ring array to + * avoid sharing queues with standard RSS on systems with 24 or + * more CPUs. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct net_device *dev = adapter->netdev; + u16 fcoe_i; + + f = &adapter->ring_feature[RING_F_FCOE]; + + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); + fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + f->indices = min_t(u16, fcoe_i, f->limit); + f->offset = fcoe_i - f->indices; + rss_i = max_t(u16, fcoe_i, rss_i); + } + +#endif /* IXGBE_FCOE */ + adapter->num_rx_queues = rss_i; + adapter->num_tx_queues = rss_i; + + return true; +} + +/** + * ixgbe_set_num_queues - Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_set_dcb_sriov_queues(adapter)) + return; + + if (ixgbe_set_dcb_queues(adapter)) + return; + +#endif + if (ixgbe_set_sriov_queues(adapter)) + return; + + ixgbe_set_rss_queues(adapter); +} + +/** + * ixgbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors + */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void ixgbe_add_ring(struct ixgbe_ring *ring, + struct ixgbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct ixgbe_q_vector *q_vector; + struct ixgbe_ring *ring; + int node = NUMA_NO_NODE; + int cpu = -1; + int ring_count, size; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + ring_count = txr_count + rxr_count; + size = sizeof(struct ixgbe_q_vector) + + (sizeof(struct ixgbe_ring) * ring_count); + + /* customize cpu for Flow Director mapping */ + if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + q_vector->numa_node = node; + +#ifdef CONFIG_IXGBE_DCA + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +#endif + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + ixgbe_poll, 64); + +#ifdef CONFIG_NET_RX_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); + +#endif + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = IXGBE_12K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = IXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ixgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + if (adapter->num_rx_pools > 1) + ring->queue_index = + txr_idx % adapter->num_rx_queues_per_pool; + else + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ixgbe_add_ring(ring, &q_vector->rx); + + /* + * 82599 errata, UDP frames with a 0 checksum + * can be marked as checksum errors. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); + +#ifdef IXGBE_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) { + struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; + if ((rxr_idx >= f->offset) && + (rxr_idx < f->offset + f->indices)) + set_bit(__IXGBE_RX_FCOE, &ring->state); + } + +#endif /* IXGBE_FCOE */ + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + if (adapter->num_rx_pools > 1) + ring->queue_index = + rxr_idx % adapter->num_rx_queues_per_pool; + else + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) +{ + struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ixgbe_ring *ring; + + ixgbe_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + ixgbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + napi_hash_del(&q_vector->napi); + netif_napi_del(&q_vector->napi); + + /* + * ixgbe_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); +} + +/** + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + /* only one q_vector if MSI-X is disabled. */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + q_vectors = 1; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ixgbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ixgbe_free_q_vector(adapter, v_idx); +} + +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +{ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!ixgbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + + /* Disable DCB unless we only have a single traffic class */ + if (netdev_get_num_tc(adapter->netdev) > 1) { + e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); + netdev_reset_tc(adapter->netdev); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); + ixgbe_disable_sriov(adapter); + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + ixgbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", + err); + else + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; +} + +/** + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + ixgbe_set_num_queues(adapter); + + /* Set interrupt mode */ + ixgbe_set_interrupt_capability(adapter); + + err = ixgbe_alloc_q_vectors(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + ixgbe_cache_ring_register(adapter); + + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; + +err_alloc_q_vectors: + ixgbe_reset_interrupt_capability(adapter); + return err; +} + +/** + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbe_free_q_vectors(adapter); + ixgbe_reset_interrupt_capability(adapter); +} + +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c new file mode 100644 index 000000000000..a5e3b62491e6 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c @@ -0,0 +1,10254 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" +#include "ixgbe_model.h" + +char ixgbe_driver_name[] = "ixgbe"; +static const char ixgbe_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Network Driver"; +#ifdef IXGBE_FCOE +char ixgbe_default_device_descr[] = + "Intel(R) 10 Gigabit Network Connection"; +#else +static char ixgbe_default_device_descr[] = + "Intel(R) 10 Gigabit Network Connection"; +#endif +#define DRV_VERSION "4.4.0-k" +const char ixgbe_driver_version[] = DRV_VERSION; +static const char ixgbe_copyright[] = + "Copyright (c) 1999-2016 Intel Corporation."; + +static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; + +static const struct ixgbe_info *ixgbe_info_tbl[] = { + [board_82598] = &ixgbe_82598_info, + [board_82599] = &ixgbe_82599_info, + [board_X540] = &ixgbe_X540_info, + [board_X550] = &ixgbe_X550_info, + [board_X550EM_x] = &ixgbe_X550EM_x_info, + [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, + [board_x550em_a] = &ixgbe_x550em_a_info, + [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, +}; + +/* ixgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, + void *p); +static struct notifier_block dca_notifier = { + .notifier_call = ixgbe_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif + +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, + "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); +#endif /* CONFIG_PCI_IOV */ + +static unsigned int allow_unsupported_sfp; +module_param(allow_unsupported_sfp, uint, 0); +MODULE_PARM_DESC(allow_unsupported_sfp, + "Allow unsupported and untested SFP+ modules on 82599-based adapters"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static struct workqueue_struct *ixgbe_wq; + +static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); + +static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, + u32 reg, u16 *value) +{ + struct pci_dev *parent_dev; + struct pci_bus *parent_bus; + + parent_bus = adapter->pdev->bus->parent; + if (!parent_bus) + return -1; + + parent_dev = parent_bus->self; + if (!parent_dev) + return -1; + + if (!pci_is_pcie(parent_dev)) + return -1; + + pcie_capability_read_word(parent_dev, reg, value); + if (*value == IXGBE_FAILED_READ_CFG_WORD && + ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) + return -1; + return 0; +} + +static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 link_status = 0; + int err; + + hw->bus.type = ixgbe_bus_type_pci_express; + + /* Get the negotiated link width and speed from PCI config space of the + * parent, as this device is behind a switch + */ + err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); + + /* assume caller will handle error case */ + if (err) + return err; + + hw->bus.width = ixgbe_convert_bus_width(link_status); + hw->bus.speed = ixgbe_convert_bus_speed(link_status); + + return 0; +} + +/** + * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * @hw: hw specific details + * + * This function is used by probe to determine whether a device's PCI-Express + * bandwidth details should be gathered from the parent bus instead of from the + * device. Used to ensure that various locations all have the correct device ID + * checks. + */ +static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) +{ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + return true; + default: + return false; + } +} + +static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, + int expected_gts) +{ + struct ixgbe_hw *hw = &adapter->hw; + int max_gts = 0; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + struct pci_dev *pdev; + + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. + */ + if (hw->bus.type == ixgbe_bus_type_internal) + return; + + /* determine whether to use the parent device */ + if (ixgbe_pcie_from_parent(&adapter->hw)) + pdev = adapter->pdev->bus->parent->self; + else + pdev = adapter->pdev; + + if (pcie_get_minimum_link(pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { + e_dev_warn("Unable to determine PCI Express bandwidth.\n"); + return; + } + + switch (speed) { + case PCIE_SPEED_2_5GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 2 * width; + break; + case PCIE_SPEED_5_0GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 4 * width; + break; + case PCIE_SPEED_8_0GT: + /* 128b/130b encoding reduces throughput by less than 2% */ + max_gts = 8 * width; + break; + default: + e_dev_warn("Unable to determine PCI Express bandwidth.\n"); + return; + } + + e_dev_info("PCI Express bandwidth of %dGT/s available\n", + max_gts); + e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n", + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + "Unknown"), + width, + (speed == PCIE_SPEED_2_5GT ? "20%" : + speed == PCIE_SPEED_5_0GT ? "20%" : + speed == PCIE_SPEED_8_0GT ? "<2%" : + "Unknown")); + + if (max_gts < expected_gts) { + e_dev_warn("This is not sufficient for optimal performance of this card.\n"); + e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n", + expected_gts); + e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n"); + } +} + +static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) +{ + if (!test_bit(__IXGBE_DOWN, &adapter->state) && + !test_bit(__IXGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) + queue_work(ixgbe_wq, &adapter->service_task); +} + +static void ixgbe_remove_adapter(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) + ixgbe_service_event_schedule(adapter); +} + +static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned IXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == IXGBE_STATUS) { + ixgbe_remove_adapter(hw); + return; + } + value = ixgbe_read_reg(hw, IXGBE_STATUS); + if (value == IXGBE_FAILED_READ_REG) + ixgbe_remove_adapter(hw); +} + +/** + * ixgbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or IXGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns IXGBE_FAILED_READ_REG (all ones). + */ +u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u32 value; + + if (ixgbe_removed(reg_addr)) + return IXGBE_FAILED_READ_REG; + if (unlikely(hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { + struct ixgbe_adapter *adapter; + int i; + + for (i = 0; i < 200; ++i) { + value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); + if (likely(!value)) + goto writes_completed; + if (value == IXGBE_FAILED_READ_REG) { + ixgbe_remove_adapter(hw); + return IXGBE_FAILED_READ_REG; + } + udelay(5); + } + + adapter = hw->back; + e_warn(hw, "register writes incomplete %08x\n", value); + } + +writes_completed: + value = readl(reg_addr + reg); + if (unlikely(value == IXGBE_FAILED_READ_REG)) + ixgbe_check_remove(hw, reg); + return value; +} + +static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD) { + ixgbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u16 value; + + if (ixgbe_removed(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef CONFIG_PCI_IOV +static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u32 value; + + if (ixgbe_removed(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_DWORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ + +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (ixgbe_removed(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) +{ + BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); +} + +struct ixgbe_reg_info { + u32 ofs; + char *name; +}; + +static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { + + /* General Registers */ + {IXGBE_CTRL, "CTRL"}, + {IXGBE_STATUS, "STATUS"}, + {IXGBE_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {IXGBE_EICR, "EICR"}, + + /* RX Registers */ + {IXGBE_SRRCTL(0), "SRRCTL"}, + {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, + {IXGBE_RDLEN(0), "RDLEN"}, + {IXGBE_RDH(0), "RDH"}, + {IXGBE_RDT(0), "RDT"}, + {IXGBE_RXDCTL(0), "RXDCTL"}, + {IXGBE_RDBAL(0), "RDBAL"}, + {IXGBE_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IXGBE_TDBAL(0), "TDBAL"}, + {IXGBE_TDBAH(0), "TDBAH"}, + {IXGBE_TDLEN(0), "TDLEN"}, + {IXGBE_TDH(0), "TDH"}, + {IXGBE_TDT(0), "TDT"}, + {IXGBE_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + { .name = NULL } +}; + + +/* + * ixgbe_regdump - register printout routine + */ +static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) +{ + int i = 0, j = 0; + char rname[16]; + u32 regs[64]; + + switch (reginfo->ofs) { + case IXGBE_SRRCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + break; + case IXGBE_DCA_RXCTRL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + break; + case IXGBE_RDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + break; + case IXGBE_RDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + break; + case IXGBE_RDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + break; + case IXGBE_RXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + break; + case IXGBE_RDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + break; + case IXGBE_RDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + break; + case IXGBE_TDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + break; + case IXGBE_TDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + break; + case IXGBE_TDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + break; + case IXGBE_TDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + break; + case IXGBE_TDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + break; + case IXGBE_TXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, + IXGBE_READ_REG(hw, reginfo->ofs)); + return; + } + + for (i = 0; i < 8; i++) { + snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); + pr_err("%-15s", rname); + for (j = 0; j < 8; j++) + pr_cont(" %08x", regs[i*8+j]); + pr_cont("\n"); + } + +} + +/* + * ixgbe_dump - Print registers, tx-rings and rx-rings + */ +static void ixgbe_dump(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_reg_info *reginfo; + int n = 0; + struct ixgbe_ring *tx_ring; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ixgbe_ring *rx_ring; + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer_info; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state " + "trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + dev_trans_start(netdev), + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; + reginfo->name; reginfo++) { + ixgbe_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * 82598 Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 + * + * 82598 Advanced Transmit Descriptor (Write-Back Format) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | NXTSEQ | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + * + * 82599+ Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 + * + * 82599+ Advanced Transmit Descriptor (Write-Back Format) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | RSV | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = IXGBE_TX_DESC(tx_ring, i); + tx_buffer = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Receive Descriptor Formats + * + * 82598 Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * 82598 Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 16 15 4 3 0 + * +------------------------------------------------------+ + * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | + * | Packet | IP | | | | Type | Type | + * | Checksum | Ident | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + * + * 82599+ Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * 82599+ Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | + * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | + * |/ Flow Dir Flt ID | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s%s", + "R [desc] [ PktBuf A0] ", + "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", + "<-- Adv Rx Read format\n"); + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs] ", + "[vl er S cks ln] ---------------- [bi->skb ] ", + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IXGBE_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & IXGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + pr_info("R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter) && + rx_buffer_info->dma) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + page_address(rx_buffer_info->page) + + rx_buffer_info->page_offset, + ixgbe_rx_bufsz(rx_ring), true); + } + } + + if (i == rx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + } + } +} + +static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) +{ + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); +} + +static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) +{ + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); +} + +/** + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct ixgbe_hw *hw = &adapter->hw; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + if (direction == -1) + direction = 0; + index = (((direction * 64) + queue) >> 2) & 0x1F; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (queue & 0x3))); + ivar |= (msix_vector << (8 * (queue & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((queue & 1) * 8); + ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); + break; + } else { + /* tx or rx causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); + break; + } + default: + break; + } +} + +static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); + mask = (qmask >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); + break; + default: + break; + } +} + +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, + struct ixgbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; + + if ((hw->fc.current_mode != ixgbe_fc_full) && + (hw->fc.current_mode != ixgbe_fc_rx_pause)) + return; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + break; + default: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__IXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); +} + +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u32 xoff[8] = {0}; + u8 tc; + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { + ixgbe_update_xoff_rx_lfc(adapter); + return; + } + + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + u32 pxoffrxc; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + break; + default: + pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } + hwstats->pxoffrxc[i] += pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] += pxoffrxc; + } + + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + + tc = tx_ring->dcb_tc; + if (xoff[tc]) + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } +} + +static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) +{ + return ring->stats.packets; +} + +static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + u32 head, tail; + + if (ring->l2_accel_priv) + adapter = ring->l2_accel_priv->real_adapter; + else + adapter = netdev_priv(ring->netdev); + + hw = &adapter->hw; + head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); + tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +{ + u32 tx_done = ixgbe_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = ixgbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} + +/** + * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) +{ + + /* Do the reset outside of interrupt context */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + e_warn(drv, "initiating reset due to tx timeout\n"); + ixgbe_service_event_schedule(adapter); + } +} + +/** + * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate + **/ +static int ixgbe_tx_maxrate(struct net_device *netdev, + int queue_index, u32 maxrate) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = ixgbe_link_mbps(adapter); + + if (!maxrate) + return 0; + + /* Calculate the rate factor values to set */ + bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= maxrate; + + /* clear everything but the rate factor */ + bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | + IXGBE_RTTBCNRC_RF_DEC_MASK; + + /* enable the rate scheduler */ + bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + + return 0; +} + +/** + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll + **/ +static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring, int napi_budget) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct ixgbe_hw *hw = &adapter->hw; + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + /* schedule immediate reset if we believe we hung */ + ixgbe_tx_timeout_reset(adapter); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) + && !test_bit(__IXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +#ifdef CONFIG_IXGBE_DCA +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + int cpu) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 txctrl = 0; + u16 reg_offset; + + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + txctrl = dca3_get_tag(tx_ring->dev, cpu); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); + txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; + break; + default: + /* for unknown hardware do not write register */ + return; + } + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN | + IXGBE_DCA_TXCTRL_DESC_DCA_EN; + + IXGBE_WRITE_REG(hw, reg_offset, txctrl); +} + +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int cpu) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rxctrl = 0; + u8 reg_idx = rx_ring->reg_idx; + + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + rxctrl = dca3_get_tag(rx_ring->dev, cpu); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; + break; + default: + break; + } + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_DCA_EN | + IXGBE_DCA_RXCTRL_DESC_DCA_EN; + + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); +} + +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + ixgbe_for_each_ring(ring, q_vector->tx) + ixgbe_update_tx_dca(adapter, ring, cpu); + + ixgbe_for_each_ring(ring, q_vector->rx) + ixgbe_update_rx_dca(adapter, ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) +{ + int i; + + /* always use CB2 mode, difference is masked in the CB driver */ + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_MODE_CB2); + else + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + ixgbe_update_dca(adapter->q_vector[i]); + } +} + +static int __ixgbe_notify_dca(struct device *dev, void *data) +{ + struct ixgbe_adapter *adapter = dev_get_drvdata(dev); + unsigned long event = *(unsigned long *)data; + + if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) + return 0; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if we're already enabled, don't do it again */ + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_MODE_CB2); + break; + } + /* Fall Through since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + dca_remove_requester(dev); + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); + } + break; + } + + return 0; +} + +#endif /* CONFIG_IXGBE_DCA */ + +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + +static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @ring: structure containing ring specific data + * @rx_desc: advanced rx descriptor + * + * Returns : true if it is FCoE pkt + */ +static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + return test_bit(__IXGBE_RX_FCOE, &ring->state) && + ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == + (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << + IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); +} + +#endif /* IXGBE_FCOE */ +/** + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + bool encap_pkt = false; + + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* check for VXLAN and Geneve packets */ + if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { + encap_pkt = true; + skb->encapsulation = 1; + } + + /* if IP and error */ + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { + ring->rx_stats.csum_err++; + return; + } + + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) + return; + + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as + * checksum errors. + */ + if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && + test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) + return; + + ring->rx_stats.csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (encap_pkt) { + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) + return; + + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; + } +} + +static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * ixgbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!ixgbe_alloc_mapped_page(rx_ring, bi)) + break; + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, + struct sk_buff *skb) +{ + u16 hdr_len = skb_headlen(skb); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + IXGBE_CB(skb)->append_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +} + +static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if append_cnt is 0 then frame is not RSC */ + if (!IXGBE_CB(skb)->append_cnt) + return; + + rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + + ixgbe_set_rsc_gso_size(rx_ring, skb); + + /* gso_size is computed using append_cnt so always clear it last */ + IXGBE_CB(skb)->append_cnt = 0; +} + +/** + * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u32 flags = rx_ring->q_vector->adapter->flags; + + ixgbe_update_rsc_stats(rx_ring, skb); + + ixgbe_rx_hash(rx_ring, rx_desc, skb); + + ixgbe_rx_checksum(rx_ring, rx_desc, skb); + + if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) + ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, dev); +} + +static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + skb_mark_napi_id(skb, &q_vector->napi); + if (ixgbe_qv_busy_polling(q_vector)) + netif_receive_skb(skb); + else + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * ixgbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IXGBE_RX_DESC(rx_ring, ntc)); + + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; + IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= IXGBE_RXDADV_NEXTP_MASK; + ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + } + } + + /* if we are the last buffer then there is nothing else to do */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } else { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + frag->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + IXGBE_CB(skb)->dma = 0; +} + +/** + * ixgbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbe_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbe_pull_tail(rx_ring, skb); + +#ifdef IXGBE_FCOE + /* do not attempt to pad FCoE Frames as this will disrupt DDP */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) + return false; + +#endif + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *old_buff) +{ + struct ixgbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, + new_buff->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +} + +static inline bool ixgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - + ixgbe_rx_bufsz(rx_ring); +#endif + + if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ixgbe_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ixgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + page_ref_inc(page); + + return true; +} + +static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + struct ixgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + goto dma_sync; + + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + ixgbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; + } + + /* pull page into skb */ + if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * + * Returns amount of work completed + **/ +static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + const int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#ifdef IXGBE_FCOE + struct ixgbe_adapter *adapter = q_vector->adapter; + int ddp_bytes; + unsigned int mss = 0; +#endif /* IXGBE_FCOE */ + u16 cleaned_count = ixgbe_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; + struct sk_buff *skb; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!rx_desc->wb.upper.status_error) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + /* retrieve a buffer from the ring */ + skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + +#ifdef IXGBE_FCOE + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = rx_ring->netdev->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, + mss); + } + if (!ddp_bytes) { + dev_kfree_skb_any(skb); + continue; + } + } + +#endif /* IXGBE_FCOE */ + ixgbe_rx_skb(q_vector, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbe_low_latency_recv(struct napi_struct *napi) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int found = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbe_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbe_for_each_ring(ring, q_vector->rx) { + found = ixgbe_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ixgbe_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/** + * ixgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) +{ + struct ixgbe_q_vector *q_vector; + int v_idx; + u32 mask; + + /* Populate MSIX to EITR Select */ + if (adapter->num_vfs > 32) { + u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); + } + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ixgbe_ring *ring; + q_vector = adapter->q_vector[v_idx]; + + ixgbe_for_each_ring(ring, q_vector->rx) + ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ixgbe_for_each_ring(ring, q_vector->tx) + ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + ixgbe_write_eitr(q_vector); + } + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, + v_idx); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + ixgbe_set_ivar(adapter, -1, 1, v_idx); + break; + default: + break; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~(IXGBE_EIMS_OTHER | + IXGBE_EIMS_MAILBOX | + IXGBE_EIMS_LSC); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see ixgbe_param.c) + **/ +static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (12000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; + + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) + itr_setting = low_latency; + break; + case low_latency: + if (bytes_perint > 20) + itr_setting = bulk_latency; + else if (bytes_perint <= 10) + itr_setting = lowest_latency; + break; + case bulk_latency: + if (bytes_perint <= 20) + itr_setting = low_latency; + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} + +/** + * ixgbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* must write high and low 16 bits to reset counter */ + itr_reg |= (itr_reg << 16); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + /* + * set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= IXGBE_EITR_CNT_WDIS; + break; + default: + break; + } + IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); +} + +static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) +{ + u32 new_itr = q_vector->itr; + u8 current_itr; + + ixgbe_update_itr(q_vector, &q_vector->tx); + ixgbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IXGBE_100K_ITR; + break; + case low_latency: + new_itr = IXGBE_20K_ITR; + break; + case bulk_latency: + new_itr = IXGBE_12K_ITR; + break; + default: + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * q_vector->itr) / + ((9 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + + ixgbe_write_eitr(q_vector); + } +} + +/** + * ixgbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + s32 rc; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && + !(eicr & IXGBE_EICR_LSC)) + return; + + if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { + u32 speed; + bool link_up = false; + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (link_up) + return; + } + + /* Check if this is not due to overtemp */ + if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) + return; + + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + rc = hw->phy.ops.check_overtemp(hw); + if (rc != IXGBE_ERR_OVERTEMP) + return; + break; + default: + if (adapter->hw.mac.type >= ixgbe_mac_X540) + return; + if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) + return; + break; + } + e_crit(drv, "%s\n", ixgbe_overheat_msg); + + adapter->interrupt_event = 0; +} + +static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && + (eicr & IXGBE_EICR_GPI_SDP1(hw))) { + e_crit(probe, "Fan has stopped, replace the adapter\n"); + /* write to clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); + } +} + +static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + /* + * Need to check link state so complete overtemp check + * on service task + */ + if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || + (eicr & IXGBE_EICR_LSC)) && + (!test_bit(__IXGBE_DOWN, &adapter->state))) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + return; + } + return; + case ixgbe_mac_x550em_a: + if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + IXGBE_EICR_GPI_SDP0_X550EM_a); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, + IXGBE_EICR_GPI_SDP0_X550EM_a); + } + return; + case ixgbe_mac_X550: + case ixgbe_mac_X540: + if (!(eicr & IXGBE_EICR_TS)) + return; + break; + default: + return; + } + + e_crit(drv, "%s\n", ixgbe_overheat_msg); +} + +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + if (hw->phy.type == ixgbe_phy_nl) + return true; + return false; + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + return true; + default: + return false; + } + default: + return false; + } +} + +static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); + + if (!ixgbe_is_sfp(hw)) + return; + + /* Later MAC's use different SDP */ + if (hw->mac.type >= ixgbe_mac_X540) + eicr_mask = IXGBE_EICR_GPI_SDP0_X540; + + if (eicr & eicr_mask) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; + ixgbe_service_event_schedule(adapter); + } + } + + if (adapter->hw.mac.type == ixgbe_mac_82599EB && + (eicr & IXGBE_EICR_GPI_SDP1(hw))) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + ixgbe_service_event_schedule(adapter); + } + } +} + +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->lsc_int++; + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); + IXGBE_WRITE_FLUSH(hw); + ixgbe_service_event_schedule(adapter); + } +} + +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; + } + /* skip the flush */ +} + +static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); + break; + default: + break; + } + /* skip the flush */ +} + +/** + * ixgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, + bool flush) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + + /* don't reenable LSC while waiting for link */ + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) + mask &= ~IXGBE_EIMS_LSC; + + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + mask |= IXGBE_EIMS_GPI_SDP0(hw); + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + mask |= IXGBE_EIMS_TS; + break; + default: + break; + } + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + mask |= IXGBE_EIMS_GPI_SDP1(hw); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + mask |= IXGBE_EIMS_GPI_SDP1(hw); + mask |= IXGBE_EIMS_GPI_SDP2(hw); + /* fall through */ + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || + adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || + adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) + mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); + if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) + mask |= IXGBE_EICR_GPI_SDP0_X540; + mask |= IXGBE_EIMS_ECC; + mask |= IXGBE_EIMS_MAILBOX; + break; + default: + break; + } + + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + mask |= IXGBE_EIMS_FLOW_DIR; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + if (queues) + ixgbe_irq_enable_queues(adapter, ~0); + if (flush) + IXGBE_WRITE_FLUSH(&adapter->hw); +} + +static irqreturn_t ixgbe_msix_other(int irq, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr; + + /* + * Workaround for Silicon errata. Use clear-by-write instead + * of clear-by-read. Reading with EICS will return the + * interrupt causes without clearing, which later be done + * with the write to EICR. + */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICS); + + /* The lower 16bits of the EICR register are for the queue interrupts + * which should be masked here in order to not accidentally clear them if + * the bits are high when ixgbe_msix_other is called. There is a race + * condition otherwise which results in possible performance loss + * especially if the ixgbe_msix_other interrupt is triggering + * consistently (as it would when PPS is turned on for the X540 device) + */ + eicr &= 0xFFFF0000; + + IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); + + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); + + if (eicr & IXGBE_EICR_MAILBOX) + ixgbe_msg_task(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + if (hw->phy.type == ixgbe_phy_x550em_ext_t && + (eicr & IXGBE_EICR_GPI_SDP0_X540)) { + adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, + IXGBE_EICR_GPI_SDP0_X540); + } + if (eicr & IXGBE_EICR_ECC) { + e_info(link, "Received ECC Err, initiating reset\n"); + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } + /* Handle Flow Director Full threshold interrupt */ + if (eicr & IXGBE_EICR_FLOW_DIR) { + int reinit_count = 0; + int i; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, + &ring->state)) + reinit_count++; + } + if (reinit_count) { + /* no more flow director interrupts until after init */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); + adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + ixgbe_service_event_schedule(adapter); + } + } + ixgbe_check_sfp_event(adapter, eicr); + ixgbe_check_overtemp_event(adapter, eicr); + break; + default: + break; + } + + ixgbe_check_fan_failure(adapter, eicr); + + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) + ixgbe_ptp_check_pps_event(adapter); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) +{ + struct ixgbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ixgbe_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +int ixgbe_poll(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int per_ring_budget, work_done = 0; + bool clean_complete = true; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + ixgbe_for_each_ring(ring, q_vector->tx) { + if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } + + /* Exit if we are called by netpoll or busy polling is active */ + if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ixgbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + work_done += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + ixgbe_qv_unlock_napi(q_vector); + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete_done(napi, work_done); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); + + return min(work_done, budget - 1); +} + +/** + * ixgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ixgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto free_queue_irqs; + } + /* If Flow Director is enabled, set interrupt affinity */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, + &q_vector->affinity_mask); + } + } + + err = request_irq(adapter->msix_entries[vector].vector, + ixgbe_msix_other, 0, netdev->name, adapter); + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * ixgbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ixgbe_intr(int irq, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; + u32 eicr; + + /* + * Workaround for silicon errata #26 on 82598. Mask the interrupt + * before the read of EICR. + */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read + * therefore no explicit interrupt disable is necessary */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + if (!eicr) { + /* + * shared interrupt alert! + * make sure interrupts are enabled because the read will + * have disabled interrupts due to EIAM + * finish the workaround of silicon errata on 82598. Unmask + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + ixgbe_check_sfp_event(adapter, eicr); + /* Fall through */ + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + if (eicr & IXGBE_EICR_ECC) { + e_info(link, "Received ECC Err, initiating reset\n"); + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } + ixgbe_check_overtemp_event(adapter, eicr); + break; + default: + break; + } + + ixgbe_check_fan_failure(adapter, eicr); + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) + ixgbe_ptp_check_pps_event(adapter); + + /* would disable interrupts here but EIAM disabled it */ + napi_schedule_irqoff(&q_vector->napi); + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * ixgbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * ixgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbe_request_irq(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + err = ixgbe_request_msix_irqs(adapter); + else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ixgbe_free_irq(struct ixgbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + + free_irq(entry->vector, q_vector); + } + + free_irq(adapter->msix_entries[vector].vector, adapter); +} + +/** + * ixgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); + break; + default: + break; + } + IXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) +{ + struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; + + ixgbe_write_eitr(q_vector); + + ixgbe_set_ivar(adapter, 0, 0, 0); + ixgbe_set_ivar(adapter, 1, 0, 0); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = IXGBE_TXDCTL_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), + (tdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_tx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) + txdctl |= 1u << 16; /* WTHRESH = 1 */ + else + txdctl |= 8u << 16; /* WTHRESH = 8 */ + + /* + * Setting PTHRESH to 32 both improves performance + * and avoids a TX hang with DFP enabled + */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ + + /* reinitialize flowdirector state */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + /* initialize XPS */ + if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct ixgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(ring->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); + + /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!wait_loop) + hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); +} + +static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rttdcs, mtqc; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + /* disable the arbiter while setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + /* set transmit pool layout */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + mtqc = IXGBE_MTQC_VT_ENA; + if (tcs > 4) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else if (adapter->ring_feature[RING_F_VMDQ].mask == + IXGBE_82599_VMDQ_4Q_MASK) + mtqc |= IXGBE_MTQC_32VF; + else + mtqc |= IXGBE_MTQC_64VF; + } else { + if (tcs > 4) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + mtqc = IXGBE_MTQC_64Q_1PB; + } + + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); + + /* Enable Security TX Buffer IFG for multiple pb */ + if (tcs) { + u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + sectx |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); + } + + /* re-enable the arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); +} + +/** + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 dmatxctl; + u32 i; + + ixgbe_setup_mtqc(adapter); + + if (hw->mac.type != ixgbe_mac_82598EB) { + /* DMATXCTL.EN must be before Tx queues are enabled */ + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); + } + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); + + srrctl |= IXGBE_SRRCTL_DROP_EN; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); + + srrctl &= ~IXGBE_SRRCTL_DROP_EN; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +#ifdef CONFIG_IXGBE_DCB +void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) +#else +static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) +#endif +{ + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 srrctl; + u8 reg_idx = rx_ring->reg_idx; + + if (hw->mac.type == ixgbe_mac_82598EB) { + u16 mask = adapter->ring_feature[RING_F_RSS].mask; + + /* + * if VMDq is not active we must program one srrctl register + * per RSS queue since we have enabled RDRXCTL.MVMEN + */ + reg_idx &= mask; + } + + /* configure header buffer length, needed for RSC */ + srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + + /* configure descriptor type */ + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +/** + * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries + * @adapter: device handle + * + * - 82598/82599/X540: 128 + * - X550(non-SRIOV mode): 512 + * - X550(SRIOV mode): 64 + */ +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type < ixgbe_mac_X550) + return 128; + else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 512; +} + +/** + * ixgbe_store_reta - Write the RETA table to HW + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void ixgbe_store_reta(struct ixgbe_adapter *adapter) +{ + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + struct ixgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u32 indices_multi; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Fill out the redirection table as follows: + * - 82598: 8 bit wide entries containing pair of 4 bit RSS + * indices. + * - 82599/X540: 8 bit wide entries containing 4 bit RSS index + * - X550: 8 bit wide entries containing 6 bit RSS index + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + indices_multi = 0x11; + else + indices_multi = 0x1; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + if (i < 128) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + else + IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), + reta); + reta = 0; + } + } +} + +/** + * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) +{ + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + struct ixgbe_hw *hw = &adapter->hw; + u32 vfreta = 0; + unsigned int pf_pool = adapter->num_vfs; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + vfreta); + vfreta = 0; + } + } +} + +static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* Program table for at least 4 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) + rss_i = 4; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ixgbe_store_reta(adapter); +} + +static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + int i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), + adapter->rss_key[i]); + + /* Fill out the redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ixgbe_store_vfreta(adapter); +} + +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 mrqc = 0, rss_field = 0, vfmrqc = 0; + u32 rxcsum; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + if (adapter->ring_feature[RING_F_RSS].mask) + mrqc = IXGBE_MRQC_RSSEN; + } else { + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (tcs > 4) + mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ + else if (tcs > 1) + mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ + else if (adapter->ring_feature[RING_F_VMDQ].mask == + IXGBE_82599_VMDQ_4Q_MASK) + mrqc = IXGBE_MRQC_VMDQRSS32EN; + else + mrqc = IXGBE_MRQC_VMDQRSS64EN; + } else { + if (tcs > 4) + mrqc = IXGBE_MRQC_RTRSS8TCEN; + else if (tcs > 1) + mrqc = IXGBE_MRQC_RTRSS4TCEN; + else + mrqc = IXGBE_MRQC_RSSEN; + } + } + + /* Perform hash on these packet types */ + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | + IXGBE_MRQC_RSS_FIELD_IPV4_TCP | + IXGBE_MRQC_RSS_FIELD_IPV6 | + IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + unsigned int pf_pool = adapter->num_vfs; + + /* Enable VF RSS mode */ + mrqc |= IXGBE_MRQC_MULTIPLE_RSS; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* Setup RSS through the VF registers */ + ixgbe_setup_vfreta(adapter); + vfmrqc = IXGBE_MRQC_RSSEN; + vfmrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); + } else { + ixgbe_setup_reta(adapter); + mrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } +} + +/** + * ixgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure + * @index: index of ring to set + **/ +static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl; + u8 reg_idx = ring->reg_idx; + + if (!ring_is_rsc_enabled(ring)) + return; + + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); + rscctrl |= IXGBE_RSCCTL_RSCEN; + /* + * we must limit the number of descriptors so that the + * total size of max desc * buf_len is not greater + * than 65536 + */ + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); +} + +#define IXGBE_MAX_RX_DESC_POLL 10 +static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (ixgbe_removed(hw->hw_addr)) + return; + /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + do { + usleep_range(1000, 2000); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (ixgbe_removed(hw->hw_addr)) + return; + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + ixgbe_disable_rx_queue(adapter, ring); + + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_rx_desc)); + /* Force flushing of IXGBE_RDLEN to prevent MDD */ + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); + + ixgbe_configure_srrctl(adapter, ring); + ixgbe_configure_rscctl(adapter, ring); + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* + * enable cache line friendly hardware writes: + * PTHRESH=32 descriptors (half the internal cache), + * this also removes ugly rx_no_buffer_count increment + * HTHRESH=4 descriptors (to minimize latency on fetch) + * WTHRESH=8 burst writeback up to two cache lines + */ + rxdctl &= ~0x3FFFFF; + rxdctl |= 0x080420; + } + + /* enable receive descriptor ring */ + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + ixgbe_rx_desc_queue_enable(adapter, ring); + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); +} + +static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int rss_i = adapter->ring_feature[RING_F_RSS].indices; + u16 pool; + + /* PSRTYPE must be initialized in non 82598 adapters */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_L2HDR | + IXGBE_PSRTYPE_IPV6HDR; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (rss_i > 3) + psrtype |= 2u << 29; + else if (rss_i > 1) + psrtype |= 1u << 29; + + for_each_set_bit(pool, &adapter->fwd_bitmask, 32) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); +} + +static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + u32 gcr_ext, vmdctl; + int i; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + + /* Enable only the PF's pool for Tx/Rx */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); + if (adapter->bridge_mode == BRIDGE_MODE_VEB) + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ + hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + + /* + * Set up VF register offsets for selected VT Mode, + * i.e. 32 or 64 VFs for SR-IOV + */ + switch (adapter->ring_feature[RING_F_VMDQ].mask) { + case IXGBE_82599_VMDQ_8Q_MASK: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; + break; + case IXGBE_82599_VMDQ_4Q_MASK: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; + break; + default: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; + break; + } + + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + + for (i = 0; i < adapter->num_vfs; i++) { + /* configure spoof checking */ + ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, + adapter->vfinfo[i].spoofchk_enabled); + + /* Enable/Disable RSS query feature */ + ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, + adapter->vfinfo[i].rss_query_enabled); + } +} + +static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct ixgbe_ring *rx_ring; + int i; + u32 mhadd, hlreg0; + +#ifdef IXGBE_FCOE + /* adjust max frame to be able to do baby jumbo for FCoE */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; + +#endif /* IXGBE_FCOE */ + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); + else + clear_ring_rsc_enabled(rx_ring); + } +} + +static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + /* + * For VMDq support of different descriptor types or + * buffer sizes through the use of multiple SRRCTL + * registers, RDRXCTL.MVMEN must be set to 1 + * + * also, the manual doesn't mention it clearly but DCA hints + * will only use queue 0's tags unless this bit is set. Side + * effects of setting this bit are only that SRRCTL must be + * fully programmed [0..15] + */ + rdrxctl |= IXGBE_RDRXCTL_MVMEN; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + if (adapter->num_vfs) + rdrxctl |= IXGBE_RDRXCTL_PSP; + /* fall through for older HW */ + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* Disable RSC for ACK packets */ + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + /* hardware requires some bits to be set by default */ + rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + break; + default: + /* We should do nothing since we don't know this hardware */ + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); +} + +/** + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl, rfctl; + + /* disable receives while setting up the descriptors */ + hw->mac.ops.disable_rx(hw); + + ixgbe_setup_psrtype(adapter); + ixgbe_setup_rdrxctl(adapter); + + /* RSC Setup */ + rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); + rfctl &= ~IXGBE_RFCTL_RSC_DIS; + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) + rfctl |= IXGBE_RFCTL_RSC_DIS; + + /* disable NFS filtering */ + rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); + IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); + + /* Program registers for the distribution of queues */ + ixgbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + ixgbe_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + /* disable drop enable for 82598 parts */ + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + + /* enable all receives */ + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + +static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* add VID to filter table */ + if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (idx = IXGBE_VLVF_ENTRIES; --idx;) { + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; +} + +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 bits, word; + int idx; + + idx = ixgbe_find_vlvf_entry(hw, vid); + if (!idx) + return; + + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + word = idx * 2 + (VMDQ_P(0) / 32); + bits = ~BIT(VMDQ_P(0) % 32); + bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); + } +} + +static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* remove VID from filter table */ + if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +/** + * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring->l2_accel_priv) + continue; + j = ring->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +/** + * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring->l2_accel_priv) + continue; + j = ring->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { + /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ + vlnctrl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + } else { + vlnctrl &= ~IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + return; + } + + /* Nothing to do for 82598 */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; + + /* Add PF to all active pools */ + for (i = IXGBE_VLVF_ENTRIES; --i;) { + u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); + u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); + + vlvfb |= BIT(VMDQ_P(0) % 32); + IXGBE_WRITE_REG(hw, reg_offset, vlvfb); + } + + /* Set all bits in the VLAN filter table array */ + for (i = hw->mac.vft_size; i--;) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); +} + +#define VFTA_BLOCK_SIZE 8 +static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits; + + for (i = IXGBE_VLVF_ENTRIES; --i;) { + u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern outselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + word = i * 2 + VMDQ_P(0) / 32; + bits = ~BIT(VMDQ_P(0) % 32); + bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); + } + + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); + } +} + +static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + + /* Set VLAN filtering to enabled */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || + hw->mac.type == ixgbe_mac_82598EB) + return; + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + + for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) + ixgbe_scrub_vfta(adapter, i); +} + +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) +{ + u16 vid = 1; + + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +/** + * ixgbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int ixgbe_write_mc_addr_list(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (!netif_running(netdev)) + return 0; + + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, netdev); + else + return -ENOMEM; + +#ifdef CONFIG_PCI_IOV + ixgbe_restore_vf_multicasts(adapter); +#endif + + return netdev_mc_count(netdev); +} + +#ifdef CONFIG_PCI_IOV +void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + mac_table->addr, + mac_table->pool, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); + } +} + +#endif +static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) + continue; + + mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + mac_table->addr, + mac_table->pool, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); + } +} + +static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; + } + + ixgbe_sync_mac_table(adapter); +} + +static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + /* do not count default RAR as available */ + if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) + continue; + + /* only count unused and addresses that belong to us */ + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { + if (mac_table->pool != pool) + continue; + } + + count++; + } + + return count; +} + +/* this function destroys the first RAR entry */ +static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + + memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); + mac_table->pool = VMDQ_P(0); + + mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; + + hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, + IXGBE_RAH_AV); +} + +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 pool) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + continue; + + ether_addr_copy(mac_table->addr, addr); + mac_table->pool = pool; + + mac_table->state |= IXGBE_MAC_STATE_MODIFIED | + IXGBE_MAC_STATE_IN_USE; + + ixgbe_sync_mac_table(adapter); + + return i; + } + + return -ENOMEM; +} + +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 pool) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* search table for addr, if found clear IN_USE flag and sync */ + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + /* we can only delete an entry if it is in use */ + if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) + continue; + /* we only care about entries that belong to the given pool */ + if (mac_table->pool != pool) + continue; + /* we only care about a specific MAC address */ + if (!ether_addr_equal(addr, mac_table->addr)) + continue; + + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; + + ixgbe_sync_mac_table(adapter); + + return 0; + } + + return -ENOMEM; +} +/** + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + netdev_for_each_uc_addr(ha, netdev) { + ixgbe_del_mac_filter(adapter, ha->addr, vfn); + ixgbe_add_mac_filter(adapter, ha->addr, vfn); + count++; + } + } + return count; +} + +static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + +/** + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void ixgbe_set_rx_mode(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + netdev_features_t features = netdev->features; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + /* set all bits that we expect to always be set */ + fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ + fctrl |= IXGBE_FCTRL_PMCF; + + /* clear the bits we are changing the status of */ + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= IXGBE_VMOLR_MPE; + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + } else { + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } + hw->addr_ctrl.user_set_promisc = false; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { + fctrl |= IXGBE_FCTRL_UPE; + vmolr |= IXGBE_VMOLR_ROPE; + } + + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = ixgbe_write_mc_addr_list(netdev); + if (count < 0) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } else if (count) { + vmolr |= IXGBE_VMOLR_ROMPE; + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & + ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_ROPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); + } + + /* This is useful for sniffing bad packets. */ + if (features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode */ + fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ + IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ + IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ + + fctrl &= ~(IXGBE_FCTRL_DPF); + /* NOTE: VLAN filtering is disabled by setting PROMISC */ + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + ixgbe_vlan_strip_enable(adapter); + else + ixgbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + ixgbe_vlan_promisc_disable(adapter); + else + ixgbe_vlan_promisc_enable(adapter); +} + +static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + ixgbe_qv_init_lock(adapter->q_vector[q_idx]); + napi_enable(&adapter->q_vector[q_idx]->napi); + } +} + +static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + napi_disable(&adapter->q_vector[q_idx]->napi); + while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } + } +} + +static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vxlanctrl; + + if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | + IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) + return; + + vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); + + if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) + adapter->vxlan_port = 0; + + if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) + adapter->geneve_port = 0; +} + +#ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_configure_dcb - Configure DCB hardware + * @adapter: ixgbe adapter struct + * + * This is called by the driver on open to configure the DCB hardware. + * This is also called by the gennetlink interface when reconfiguring + * the DCB state. + */ +static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 65536); + return; + } + + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 32768); + +#ifdef IXGBE_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + /* reconfigure the hardware */ + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_RX_CONFIG); + ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { + ixgbe_dcb_hw_ets(&adapter->hw, + adapter->ixgbe_ieee_ets, + max_frame); + ixgbe_dcb_hw_pfc_config(&adapter->hw, + adapter->ixgbe_ieee_pfc->pfc_en, + adapter->ixgbe_ieee_ets->prio_tc); + } + + /* Enable RSS Hash per TC */ + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 msb = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; + + while (rss_i) { + msb++; + rss_i >>= 1; + } + + /* write msb to all 8 TCs in one write */ + IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); + } +} +#endif + +/* Additional bittime to account for IXGBE framing */ +#define IXGBE_ETH_FRAMING 20 + +/** + * ixgbe_hpbthresh - calculate high water mark for flow control + * + * @adapter: board private structure to calculate for + * @pb: packet buffer to calculate + */ +static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; + +#ifdef IXGBE_FCOE + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == ixgbe_fcoe_get_tc(adapter))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif + + /* Calculate delay value for device */ + switch (hw->mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + dv_id = IXGBE_DV_X540(link, tc); + break; + default: + dv_id = IXGBE_DV(link, tc); + break; + } + + /* Loopback switch introduces additional latency */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + dv_id += IXGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = IXGBE_BT2KB(dv_id); + rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer(%i) can not provide enough" + "headroom to support flow control." + "Decrease MTU or number of traffic classes\n", pb); + marker = tc + 1; + } + + return marker; +} + +/** + * ixgbe_lpbthresh - calculate low water mark for for flow control + * + * @adapter: board private structure to calculate for + * @pb: packet buffer to calculate + */ +static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#ifdef IXGBE_FCOE + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif + + /* Calculate delay value for device */ + switch (hw->mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + dv_id = IXGBE_LOW_DV_X540(tc); + break; + default: + dv_id = IXGBE_LOW_DV(tc); + break; + } + + /* Delay value is calculated in bit times convert to KB */ + return IXGBE_BT2KB(dv_id); +} + +/* + * ixgbe_pbthresh_setup - calculate and setup high low water marks + */ +static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + int i; + + if (!num_tc) + num_tc = 1; + + for (i = 0; i < num_tc; i++) { + hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); + hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water[i] > hw->fc.high_water[i]) + hw->fc.low_water[i] = 0; + } + + for (; i < MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = 0; +} + +static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int hdrm; + u8 tc = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + hdrm = 32 << adapter->fdir_pballoc; + else + hdrm = 0; + + hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); + ixgbe_pbthresh_setup(adapter); +} + +static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) + ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); + + hlist_for_each_entry_safe(filter, node2, + &adapter->fdir_filter_list, fdir_node) { + ixgbe_fdir_write_perfect_filter_82599(hw, + &filter->filter, + filter->sw_idx, + (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action]->reg_idx); + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, + struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmolr; + + /* No unicast promiscuous support for VMDQ devices. */ + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); + + /* clear the affected bit */ + vmolr &= ~IXGBE_VMOLR_MPE; + + if (dev->flags & IFF_ALLMULTI) { + vmolr |= IXGBE_VMOLR_MPE; + } else { + vmolr |= IXGBE_VMOLR_ROMPE; + hw->mac.ops.update_mc_addr_list(hw, dev); + } + ixgbe_write_uc_addr_list(adapter->netdev, pool); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); +} + +static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) +{ + struct ixgbe_adapter *adapter = vadapter->real_adapter; + int rss_i = adapter->num_rx_queues_per_pool; + struct ixgbe_hw *hw = &adapter->hw; + u16 pool = vadapter->pool; + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_L2HDR | + IXGBE_PSRTYPE_IPV6HDR; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (rss_i > 3) + psrtype |= 2u << 29; + else if (rss_i > 1) + psrtype |= 1u << 29; + + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); +} + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + if (IXGBE_CB(skb)->page_released) + dma_unmap_page(dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + + rx_buffer->page = NULL; + } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, + struct ixgbe_ring *rx_ring) +{ + struct ixgbe_adapter *adapter = vadapter->real_adapter; + int index = rx_ring->queue_index + vadapter->rx_base_queue; + + /* shutdown specific queue receive and wait for dma to settle */ + ixgbe_disable_rx_queue(adapter, rx_ring); + usleep_range(10000, 20000); + ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); + ixgbe_clean_rx_ring(rx_ring); + rx_ring->l2_accel_priv = NULL; +} + +static int ixgbe_fwd_ring_down(struct net_device *vdev, + struct ixgbe_fwd_adapter *accel) +{ + struct ixgbe_adapter *adapter = accel->real_adapter; + unsigned int rxbase = accel->rx_base_queue; + unsigned int txbase = accel->tx_base_queue; + int i; + + netif_tx_stop_all_queues(vdev); + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; + } + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; + adapter->tx_ring[txbase + i]->netdev = adapter->netdev; + } + + + return 0; +} + +static int ixgbe_fwd_ring_up(struct net_device *vdev, + struct ixgbe_fwd_adapter *accel) +{ + struct ixgbe_adapter *adapter = accel->real_adapter; + unsigned int rxbase, txbase, queues; + int i, baseq, err = 0; + + if (!test_bit(accel->pool, &adapter->fwd_bitmask)) + return 0; + + baseq = accel->pool * adapter->num_rx_queues_per_pool; + netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->pool, adapter->num_rx_pools, + baseq, baseq + adapter->num_rx_queues_per_pool, + adapter->fwd_bitmask); + + accel->netdev = vdev; + accel->rx_base_queue = rxbase = baseq; + accel->tx_base_queue = txbase = baseq; + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) + ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->rx_ring[rxbase + i]->netdev = vdev; + adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); + } + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->netdev = vdev; + adapter->tx_ring[txbase + i]->l2_accel_priv = accel; + } + + queues = min_t(unsigned int, + adapter->num_rx_queues_per_pool, vdev->num_tx_queues); + err = netif_set_real_num_tx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + err = netif_set_real_num_rx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + if (is_valid_ether_addr(vdev->dev_addr)) + ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); + + ixgbe_fwd_psrtype(accel); + ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); + return err; +fwd_queue_err: + ixgbe_fwd_ring_down(vdev, accel); + return err; +} + +static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) +{ + struct net_device *upper; + struct list_head *iter; + int err; + + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + + if (dfwd->fwd_priv) { + err = ixgbe_fwd_ring_up(upper, vadapter); + if (err) + continue; + } + } + } +} + +static void ixgbe_configure(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + ixgbe_configure_pb(adapter); +#ifdef CONFIG_IXGBE_DCB + ixgbe_configure_dcb(adapter); +#endif + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter); + ixgbe_set_rx_mode(adapter->netdev); + ixgbe_restore_vlan(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + hw->mac.ops.disable_rx_buff(hw); + break; + default: + break; + } + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + ixgbe_init_fdir_signature_82599(&adapter->hw, + adapter->fdir_pballoc); + } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + ixgbe_init_fdir_perfect_82599(&adapter->hw, + adapter->fdir_pballoc); + ixgbe_fdir_filter_restore(adapter); + } + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + hw->mac.ops.enable_rx_buff(hw); + break; + default: + break; + } +#ifdef CONFIG_IXGBE_DCA + /* configure DCA */ + if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) + ixgbe_setup_dca(adapter); +#endif /* CONFIG_IXGBE_DCA */ + +#ifdef IXGBE_FCOE + /* configure FCoE L2 filters, redirection table, and Rx control */ + ixgbe_configure_fcoe(adapter); + +#endif /* IXGBE_FCOE */ + ixgbe_configure_tx(adapter); + ixgbe_configure_rx(adapter); + ixgbe_configure_dfwd(adapter); +} + +/** + * ixgbe_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) +{ + /* + * We are assuming the worst case scenario here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; +} + +/** + * ixgbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) +{ + u32 speed; + bool autoneg, link_up = false; + int ret = IXGBE_ERR_LINK_SETUP; + + if (hw->mac.ops.check_link) + ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (ret) + return ret; + + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) + ret = hw->mac.ops.get_link_capabilities(hw, &speed, + &autoneg); + if (ret) + return ret; + + if (hw->mac.ops.setup_link) + ret = hw->mac.ops.setup_link(hw, speed, link_up); + + return ret; +} + +static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD; + gpie |= IXGBE_GPIE_EIAME; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + default: + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + break; + } + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } + + /* XXX: to interrupt immediately for EICS writes, enable this */ + /* gpie |= IXGBE_GPIE_EIMEN; */ + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + + switch (adapter->ring_feature[RING_F_VMDQ].mask) { + case IXGBE_82599_VMDQ_8Q_MASK: + gpie |= IXGBE_GPIE_VTMODE_16; + break; + case IXGBE_82599_VMDQ_4Q_MASK: + gpie |= IXGBE_GPIE_VTMODE_32; + break; + default: + gpie |= IXGBE_GPIE_VTMODE_64; + break; + } + } + + /* Enable Thermal over heat sensor interrupt */ + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + gpie |= IXGBE_SDP0_GPIEN_8259X; + break; + default: + break; + } + } + + /* Enable fan failure interrupt */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + gpie |= IXGBE_SDP1_GPIEN(hw); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + gpie |= IXGBE_SDP0_GPIEN_X540; + break; + default: + break; + } + + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); +} + +static void ixgbe_up_complete(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + u32 ctrl_ext; + + ixgbe_get_hw_control(adapter); + ixgbe_setup_gpie(adapter); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + ixgbe_configure_msix(adapter); + else + ixgbe_configure_msi_and_legacy(adapter); + + /* enable the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + + if (hw->phy.ops.set_phy_power) + hw->phy.ops.set_phy_power(hw, true); + + smp_mb__before_atomic(); + clear_bit(__IXGBE_DOWN, &adapter->state); + ixgbe_napi_enable_all(adapter); + + if (ixgbe_is_sfp(hw)) { + ixgbe_sfp_link_config(adapter); + } else { + err = ixgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + } + + /* clear any pending interrupts, may auto mask */ + IXGBE_READ_REG(hw, IXGBE_EICR); + ixgbe_irq_enable(adapter, true, true); + + /* + * If this adapter has a fan, check to see if we had a failure + * before we enabled the interrupt. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + e_crit(drv, "Fan has stopped, replace the adapter\n"); + } + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); +} + +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + netif_trans_update(adapter->netdev); + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + if (adapter->hw.phy.type == ixgbe_phy_fw) + ixgbe_watchdog_link_is_down(adapter); + ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + ixgbe_up(adapter); + clear_bit(__IXGBE_RESETTING, &adapter->state); +} + +void ixgbe_up(struct ixgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ixgbe_configure(adapter); + + ixgbe_up_complete(adapter); +} + +void ixgbe_reset(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + + if (ixgbe_removed(hw->hw_addr)) + return; + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | + IXGBE_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + + err = hw->mac.ops.init_hw(hw); + switch (err) { + case 0: + case IXGBE_ERR_SFP_NOT_PRESENT: + case IXGBE_ERR_SFP_NOT_SUPPORTED: + break; + case IXGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case IXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated with " + "your hardware. If you are experiencing problems " + "please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + + /* flush entries out of MAC table */ + ixgbe_flush_sw_mac_table(adapter); + __dev_uc_unsync(netdev, NULL); + + /* do not flush user set addresses */ + ixgbe_mac_set_default_filter(adapter); + + /* update SAN MAC vmdq pool selection */ + if (hw->mac.san_mac_rar_index) + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_reset(adapter); + + if (hw->phy.ops.set_phy_power) { + if (!netif_running(adapter->netdev) && !adapter->wol) + hw->phy.ops.set_phy_power(hw, false); + else + hw->phy.ops.set_phy_power(hw, true); + } +} + +/** + * ixgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) +{ + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_clean_tx_ring(adapter->tx_ring[i]); +} + +static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) +{ + struct hlist_node *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node2, + &adapter->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +void ixgbe_down(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *upper; + struct list_head *iter; + int i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + /* disable receives */ + hw->mac.ops.disable_rx(hw); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + usleep_range(10000, 20000); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + /* disable any upper devices */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) { + netif_tx_stop_all_queues(upper); + netif_carrier_off(upper); + netif_tx_disable(upper); + } + } + } + + ixgbe_irq_disable(adapter); + + ixgbe_napi_disable_all(adapter); + + clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = false; + + /* ping all the active vfs to let them know we are going down */ + ixgbe_ping_all_vfs(adapter); + + /* Disable all VFTE/VFRE TX/RX */ + ixgbe_disable_tx_rx(adapter); + } + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* Disable the Tx DMA engine on 82599 and later MAC */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); + break; + default: + break; + } + + if (!pci_channel_offline(adapter->pdev)) + ixgbe_reset(adapter); + + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); + + ixgbe_clean_all_tx_rings(adapter); + ixgbe_clean_all_rx_rings(adapter); +} + +/** + * ixgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbe_tx_timeout(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + ixgbe_tx_timeout_reset(adapter); +} + +#ifdef CONFIG_IXGBE_DCB +static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct tc_configuration *tc; + int j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + default: + adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; + break; + } + + /* Configure DCB traffic classes */ + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[DCB_TX_CONFIG].bwg_id = 0; + tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); + tc->path[DCB_RX_CONFIG].bwg_id = 0; + tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); + tc->dcb_pfc = pfc_disabled; + } + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc = &adapter->dcb_cfg.tc_config[0]; + tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_set_bitmap = 0x00; + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + sizeof(adapter->temp_dcb_cfg)); +} +#endif + +/** + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) + * @adapter: board private structure to initialize + * + * ixgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int ixgbe_sw_init(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned int rss, fdir; + u32 fwsm; + int i; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Set common capability flags and settings */ + rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); + adapter->ring_feature[RING_F_RSS].limit = rss; + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + adapter->max_q_vectors = MAX_Q_VECTORS_82599; + adapter->atr_sample_rate = 20; + fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +#ifdef CONFIG_IXGBE_DCA + adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; +#endif +#ifdef CONFIG_IXGBE_DCB + adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; +#endif +#ifdef IXGBE_FCOE + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = IXGBE_FCOE_DEFTC; +#endif /* CONFIG_IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + + /* initialize static ixgbe jump table entries */ + adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), + GFP_KERNEL); + if (!adapter->jump_tables[0]) + return -ENOMEM; + adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; + + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) + adapter->jump_tables[i] = NULL; + + adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) + return -ENOMEM; + + /* Set MAC specific capability flags and exceptions */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; + + if (hw->device_id == IXGBE_DEV_ID_82598AT) + adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; + + adapter->max_q_vectors = MAX_Q_VECTORS_82598; + adapter->ring_feature[RING_F_FDIR].limit = 0; + adapter->atr_sample_rate = 0; + adapter->fdir_pballoc = 0; +#ifdef IXGBE_FCOE + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB + adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + break; + case ixgbe_mac_82599EB: + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + case ixgbe_mac_X540: + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + if (fwsm & IXGBE_FWSM_TS_ENABLED) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + case ixgbe_mac_x550em_a: + adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + default: + break; + } + /* fall through */ + case ixgbe_mac_X550EM_x: +#ifdef CONFIG_IXGBE_DCB + adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; +#endif +#ifdef IXGBE_FCOE + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; +#ifdef CONFIG_IXGBE_DCB + adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + /* Fall Through */ + case ixgbe_mac_X550: + if (hw->mac.type == ixgbe_mac_X550) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; +#ifdef CONFIG_IXGBE_DCA + adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; +#endif + adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; + break; + default: + break; + } + +#ifdef IXGBE_FCOE + /* FCoE support exists, always init the FCoE lock */ + spin_lock_init(&adapter->fcoe.lock); + +#endif + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + +#ifdef CONFIG_IXGBE_DCB + ixgbe_init_dcb(adapter); +#endif + + /* default flow control settings */ + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ + ixgbe_pbthresh_setup(adapter); + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); + +#ifdef CONFIG_PCI_IOV + if (max_vfs > 0) + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); + + /* assign number of SR-IOV VFs */ + if (hw->mac.type != ixgbe_mac_82598EB) { + if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { + adapter->num_vfs = 0; + e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); + } else { + adapter->num_vfs = max_vfs; + } + } +#endif /* CONFIG_PCI_IOV */ + + /* enable itr by default in dynamic mode */ + adapter->rx_itr_setting = 1; + adapter->tx_itr_setting = 1; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBE_DEFAULT_TXD; + adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; + + /* initialize eeprom parameters */ + if (ixgbe_init_eeprom_params_generic(hw)) { + e_dev_err("EEPROM initialization failed\n"); + return -EIO; + } + + /* PF holds first pool slot */ + set_bit(0, &adapter->fwd_bitmask); + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int ring_node = -1; + int size; + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + ring_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + u64_stats_init(&tx_ring->syncp); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, ring_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbe_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int ring_node = -1; + int size; + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + ring_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, ring_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + +#ifdef IXGBE_FCOE + err = ixgbe_setup_fcoe_ddp_resources(adapter); + if (!err) +#endif + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbe_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * ixgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) +{ + ixgbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]->desc) + ixgbe_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ixgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) +{ + ixgbe_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i; + +#ifdef IXGBE_FCOE + ixgbe_free_fcoe_ddp_resources(adapter); + +#endif + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]->desc) + ixgbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ixgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + + /* + * For 82599EB we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + (adapter->hw.mac.type == ixgbe_mac_82599EB) && + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + + return 0; +} + +/** + * ixgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int ixgbe_open(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err, queues; + + /* disallow open during test */ + if (test_bit(__IXGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ixgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ixgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbe_configure(adapter); + err = ixgbe_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + if (adapter->num_rx_pools > 1) + queues = adapter->num_rx_queues_per_pool; + else + queues = adapter->num_tx_queues; + + err = netif_set_real_num_tx_queues(netdev, queues); + if (err) + goto err_set_queues; + + if (adapter->num_rx_pools > 1 && + adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) + queues = IXGBE_MAX_L2A_QUEUES; + else + queues = adapter->num_rx_queues; + err = netif_set_real_num_rx_queues(netdev, queues); + if (err) + goto err_set_queues; + + ixgbe_ptp_init(adapter); + + ixgbe_up_complete(adapter); + + ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); + udp_tunnel_get_rx_info(netdev); + + return 0; + +err_set_queues: + ixgbe_free_irq(adapter); +err_req_irq: + ixgbe_free_all_rx_resources(adapter); + if (hw->phy.ops.set_phy_power && !adapter->wol) + hw->phy.ops.set_phy_power(&adapter->hw, false); +err_setup_rx: + ixgbe_free_all_tx_resources(adapter); +err_setup_tx: + ixgbe_reset(adapter); + + return err; +} + +static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) +{ + ixgbe_ptp_suspend(adapter); + + if (adapter->hw.phy.ops.enter_lplu) { + adapter->hw.phy.reset_disable = true; + ixgbe_down(adapter); + adapter->hw.phy.ops.enter_lplu(&adapter->hw); + adapter->hw.phy.reset_disable = false; + } else { + ixgbe_down(adapter); + } + + ixgbe_free_irq(adapter); + + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); +} + +/** + * ixgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int ixgbe_close(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_ptp_stop(adapter); + + if (netif_device_present(netdev)) + ixgbe_close_suspend(adapter); + + ixgbe_fdir_filter_exit(adapter); + + ixgbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_resume(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__IXGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + ixgbe_reset(adapter); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + + rtnl_lock(); + err = ixgbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = ixgbe_open(netdev); + + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; +} +#endif /* CONFIG_PM */ + +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl, fctrl; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + ixgbe_close_suspend(adapter); + + ixgbe_clear_interrupt_scheme(adapter); + rtnl_unlock(); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (hw->mac.ops.stop_link_on_d3) + hw->mac.ops.stop_link_on_d3(hw); + + if (wufc) { + ixgbe_set_rx_mode(netdev); + + /* enable the optics for 82599 SFP+ fiber as we can WoL */ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IXGBE_WUFC_MC) { + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + ctrl |= IXGBE_CTRL_GIO_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + + IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); + } else { + IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); + IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); + } + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pci_wake_from_d3(pdev, false); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + pci_wake_from_d3(pdev, !!wufc); + break; + default: + break; + } + + *enable_wake = !!wufc; + if (hw->phy.ops.set_phy_power && !*enable_wake) + hw->phy.ops.set_phy_power(hw, false); + + ixgbe_release_hw_control(adapter); + + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __ixgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void ixgbe_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __ixgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * ixgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbe_update_stats(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + for (i = 0; i < adapter->num_rx_queues; i++) { + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; + } + adapter->rsc_total_count = rsc_count; + adapter->rsc_total_flush = rsc_flush; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + netdev->stats.rx_bytes = bytes; + netdev->stats.rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + netdev->stats.tx_bytes = bytes; + netdev->stats.tx_packets = packets; + + hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + + /* 8 register reads */ + for (i = 0; i < 8; i++) { + /* for packet buffers not used, the register should read 0 */ + mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + missed_rx += mpc; + hwstats->mpc[i] += mpc; + total_mpc += hwstats->mpc[i]; + hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + break; + default: + break; + } + } + + /*16 register reads */ + for (i = 0; i < 16; i++) { + hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + if ((hw->mac.type == ixgbe_mac_82599EB) || + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_x550em_a)) { + hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ + hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ + } + } + + hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + /* work around hardware counting issue */ + hwstats->gprc -= missed_rx; + + ixgbe_update_xoff_received(adapter); + + /* 82598 hardware only has a 32 bit counter in the high register */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + /* OS2BMC stats are X540 and later */ + hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); + hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); + hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); + hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); + case ixgbe_mac_82599EB: + for (i = 0; i < 16; i++) + adapter->hw_rx_no_dma_resources += + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); +#ifdef IXGBE_FCOE + hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); + /* Add up per cpu counters for total ddp aloc fail */ + if (adapter->fcoe.ddp_pool) { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct ixgbe_fcoe_ddp_pool *ddp_pool; + unsigned int cpu; + u64 noddp = 0, noddp_ext_buff = 0; + for_each_possible_cpu(cpu) { + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + noddp += ddp_pool->noddp; + noddp_ext_buff += ddp_pool->noddp_ext_buff; + } + hwstats->fcoe_noddp = noddp; + hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; + } +#endif /* IXGBE_FCOE */ + break; + default: + break; + } + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + hwstats->bprc += bprc; + hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + if (hw->mac.type == ixgbe_mac_82598EB) + hwstats->mprc -= bprc; + hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); + hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + hwstats->lxofftxc += lxoff; + hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + /* + * 82598 errata - tx of flow control packets is included in tx counters + */ + xon_off_tot = lxon + lxoff; + hwstats->gptc -= xon_off_tot; + hwstats->mptc -= xon_off_tot; + hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); + hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + hwstats->ptc64 -= xon_off_tot; + hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = hwstats->mprc; + + /* Rx Errors */ + netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; + netdev->stats.rx_dropped = 0; + netdev->stats.rx_length_errors = hwstats->rlec; + netdev->stats.rx_crc_errors = hwstats->crcerrs; + netdev->stats.rx_missed_errors = total_mpc; +} + +/** + * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table + * @adapter: pointer to the device adapter structure + **/ +static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + /* do nothing if we are not using signature filters */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) + return; + + adapter->fdir_overflow++; + + if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__IXGBE_TX_FDIR_INIT_DONE, + &(adapter->tx_ring[i]->state)); + /* re-enable flow director interrupts */ + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); + } else { + e_err(probe, "failed to finish FDIR re-initialization, " + "ignored adding FDIR ATR filters\n"); + } +} + +/** + * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 eics = 0; + int i; + + /* If we're down, removing or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* + * for legacy and MSI interrupts don't set any bits + * that are enabled for EIAM, because this operation + * would set *both* EIMS and EICS for any bit in EIAM + */ + IXGBE_WRITE_REG(hw, IXGBE_EICS, + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); + } else { + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_q_vectors; i++) { + struct ixgbe_q_vector *qv = adapter->q_vector[i]; + if (qv->rx.ring || qv->tx.ring) + eics |= BIT_ULL(i); + } + } + + /* Cause software interrupt to ensure rings are cleaned */ + ixgbe_irq_rearm_queues(adapter, eics); +} + +/** + * ixgbe_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + * @link_speed: pointer to a u32 to store the link_speed + **/ +static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) + return; + + if (hw->mac.ops.check_link) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* always assume link is up, if no check link function */ + link_speed = IXGBE_LINK_SPEED_10GB_FULL; + link_up = true; + } + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { + hw->mac.ops.fc_enable(hw); + ixgbe_set_rx_drop_en(adapter); + } + + if (link_up || + time_after(jiffies, (adapter->link_check_timeout + + IXGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); + IXGBE_WRITE_FLUSH(hw); + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_IXGBE_DCB + struct net_device *netdev = adapter->netdev; + struct dcb_app app = { + .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, + .protocol = 0, + }; + u8 up = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) + up = dcb_ieee_getapp_mask(netdev, &app); + + adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; +#endif +} + +/** + * ixgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *upper; + struct list_head *iter; + u32 link_speed = adapter->link_speed; + const char *speed_str; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: { + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); + flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); + flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); + } + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + case ixgbe_mac_82599EB: { + u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); + flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); + } + break; + default: + flow_tx = false; + flow_rx = false; + break; + } + + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_start_cyclecounter(adapter); + + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + speed_str = "10 Gbps"; + break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + speed_str = "2.5 Gbps"; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed_str = "1 Gbps"; + break; + case IXGBE_LINK_SPEED_100_FULL: + speed_str = "100 Mbps"; + break; + case IXGBE_LINK_SPEED_10_FULL: + speed_str = "10 Mbps"; + break; + default: + speed_str = "unknown speed"; + break; + } + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str, + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + ixgbe_check_vf_rate_limit(adapter); + + /* enable transmits */ + netif_tx_wake_all_queues(adapter->netdev); + + /* enable any upper devices */ + rtnl_lock(); + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) + netif_tx_wake_all_queues(upper); + } + } + rtnl_unlock(); + + /* update the default user priority for VFs */ + ixgbe_update_default_up(adapter); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); +} + +/** + * ixgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + /* poll for SFP+ cable when link is down */ + if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_start_cyclecounter(adapter); + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); +} + +static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + int i, j; + + if (!adapter->num_vfs) + return false; + + /* resetting the PF is only needed for MAC before X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); + t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + +/** + * ixgbe_watchdog_flush_tx - flush queues on link down + * @adapter: pointer to the device adapter structure + **/ +static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (ixgbe_ring_tx_pending(adapter) || + ixgbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset to clear Tx work after link loss\n"); + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + } + } +} + +#ifdef CONFIG_PCI_IOV +static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + if (!pci_wait_for_pending_transaction(vfdev)) + e_dev_warn("Issuing VFLR with pending transactions\n"); + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + + msleep(100); +} + +static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned int vf; + u32 gpc; + + if (!(netif_carrier_ok(adapter->netdev))) + return; + + gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); + if (gpc) /* If incrementing then no need for the check below */ + return; + /* Check to see if a bad DMA write target from an errant or + * malicious VF has caused a PCIe error. If so then we can + * issue a VFLR to the offending VF(s) and then resume without + * requesting a full slot reset. + */ + + if (!pdev) + return; + + /* check status reg for all VFs owned by this PF */ + for (vf = 0; vf < adapter->num_vfs; ++vf) { + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + u16 status_reg; + + if (!vfdev) + continue; + pci_read_config_word(vfdev, PCI_STATUS, &status_reg); + if (status_reg != IXGBE_FAILED_READ_CFG_WORD && + status_reg & PCI_STATUS_REC_MASTER_ABORT) + ixgbe_issue_vf_flr(adapter, vfdev); + } +} + +static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check for 82598 or if not in IOV mode */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB || + adapter->num_vfs == 0) + return; + + ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%u Spoofed packets detected\n", ssvpc); +} +#else +static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) +{ +} + +static void +ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) +{ +} +#endif /* CONFIG_PCI_IOV */ + + +/** + * ixgbe_watchdog_subtask - check and bring link up + * @adapter: pointer to the device adapter structure + **/ +static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) +{ + /* if interface is down, removing or resetting, do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + ixgbe_watchdog_update_link(adapter); + + if (adapter->link_up) + ixgbe_watchdog_link_is_up(adapter); + else + ixgbe_watchdog_link_is_down(adapter); + + ixgbe_check_for_bad_vf(adapter); + ixgbe_spoof_check(adapter); + ixgbe_update_stats(adapter); + + ixgbe_watchdog_flush_tx(adapter); +} + +/** + * ixgbe_sfp_detection_subtask - poll for SFP+ cable + * @adapter: the ixgbe adapter structure + **/ +static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + s32 err; + + /* not searching for SFP so there is nothing to do here */ + if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && + !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + return; + + if (adapter->sfp_poll_time && + time_after(adapter->sfp_poll_time, jiffies)) + return; /* If not yet time to poll for SFP */ + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; + + err = hw->phy.ops.identify_sfp(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + if (err == IXGBE_ERR_SFP_NOT_PRESENT) { + /* If no cable is present, then we need to reset + * the next time we find a good cable. */ + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + } + + /* exit on error */ + if (err) + goto sfp_out; + + /* exit if reset not needed */ + if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + goto sfp_out; + + adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; + + /* + * A module may be identified correctly, but the EEPROM may not have + * support for that module. setup_sfp() will fail in that case, so + * we should not allow that module to load. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + err = hw->phy.ops.reset(hw); + else + err = hw->mac.ops.setup_sfp(hw); + + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); + +sfp_out: + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + + if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && + (adapter->netdev->reg_state == NETREG_REGISTERED)) { + e_dev_err("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + e_dev_err("Reload the driver after installing a " + "supported module.\n"); + unregister_netdev(adapter->netdev); + } +} + +/** + * ixgbe_sfp_link_config_subtask - set up link SFP after module install + * @adapter: the ixgbe adapter structure + **/ +static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 speed; + bool autoneg = false; + + if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) + return; + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) { + hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + + /* setup the highest link when no autoneg */ + if (!autoneg) { + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + speed = IXGBE_LINK_SPEED_10GB_FULL; + } + } + + if (hw->mac.ops.setup_link) + hw->mac.ops.setup_link(hw, speed, true); + + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); +} + +/** + * ixgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbe_service_timer(unsigned long data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + unsigned long next_event_offset; + + /* poll faster when waiting for link */ + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + ixgbe_service_event_schedule(adapter); +} + +static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 status; + + if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; + + if (!hw->phy.ops.handle_lasi) + return; + + status = hw->phy.ops.handle_lasi(&adapter->hw); + if (status != IXGBE_ERR_OVERTEMP) + return; + + e_crit(drv, "%s\n", ixgbe_overheat_msg); +} + +static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) +{ + if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) + return; + + /* If we're already down, removing or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + ixgbe_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + ixgbe_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * ixgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ixgbe_service_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + service_task); + if (ixgbe_removed(adapter->hw.hw_addr)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + ixgbe_down(adapter); + rtnl_unlock(); + } + ixgbe_service_event_complete(adapter); + return; + } + if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { + rtnl_lock(); + adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + udp_tunnel_get_rx_info(adapter->netdev); + rtnl_unlock(); + } + ixgbe_reset_subtask(adapter); + ixgbe_phy_interrupt_subtask(adapter); + ixgbe_sfp_detection_subtask(adapter); + ixgbe_sfp_link_config_subtask(adapter); + ixgbe_check_overtemp_subtask(adapter); + ixgbe_watchdog_subtask(adapter); + ixgbe_fdir_reinit_subtask(adapter); + ixgbe_check_hang_subtask(adapter); + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { + ixgbe_ptp_overflow_check(adapter); + ixgbe_ptp_rx_hang(adapter); + } + + ixgbe_service_event_complete(adapter); +} + +static int ixgbe_tso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, + mss_l4len_idx); + + return 1; +} + +static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + +static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | + IXGBE_TX_FLAGS_CC))) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + ixgbe_ipv6_csum_is_sctp(skb))) { + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; + break; + } + /* fall through */ + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); +} + +#define IXGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, + IXGBE_ADVTXD_DCMD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, + IXGBE_ADVTXD_DCMD_TSE); + + /* set timestamp bit if present */ + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, + IXGBE_ADVTXD_MAC_TSTAMP); + + /* insert frame checksum */ + cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_CSUM, + IXGBE_ADVTXD_POPTS_TXSM); + + /* enble IPv4 checksum for TSO */ + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_IPV4, + IXGBE_ADVTXD_POPTS_IXSM); + + /* + * Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_CC, + IXGBE_ADVTXD_CC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ixgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + if (likely(ixgbe_desc_unused(tx_ring) >= size)) + return 0; + + return __ixgbe_maybe_stop_tx(tx_ring, size); +} + +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + +static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + struct skb_frag_struct *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = IXGBE_TX_DESC(tx_ring, i); + + ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + +#ifdef IXGBE_FCOE + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { + if (data_len < sizeof(struct fcoe_crc_eof)) { + size -= sizeof(struct fcoe_crc_eof) - data_len; + data_len = 0; + } else { + data_len -= sizeof(struct fcoe_crc_eof); + } + } + +#endif + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IXGBE_MAX_DATA_PER_TXD; + size -= IXGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + +#ifdef IXGBE_FCOE + size = min_t(unsigned int, data_len, skb_frag_size(frag)); +#else + size = skb_frag_size(frag); +#endif + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +static void ixgbe_atr(struct ixgbe_ring *ring, + struct ixgbe_tx_buffer *first) +{ + struct ixgbe_q_vector *q_vector = ring->q_vector; + union ixgbe_atr_hash_dword input = { .dword = 0 }; + union ixgbe_atr_hash_dword common = { .dword = 0 }; + union { + unsigned char *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + struct tcphdr *th; + unsigned int hlen; + struct sk_buff *skb; + __be16 vlan_id; + int l4_proto; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + /* do nothing if sampling is disabled */ + if (!ring->atr_sample_rate) + return; + + ring->atr_count++; + + /* currently only IPv4/IPv6 with TCP is supported */ + if ((first->protocol != htons(ETH_P_IP)) && + (first->protocol != htons(ETH_P_IPV6))) + return; + + /* snag network header to get L4 type and address */ + skb = first->skb; + hdr.network = skb_network_header(skb); + if (skb->encapsulation && + first->protocol == htons(ETH_P_IP) && + hdr.ipv4->protocol != IPPROTO_UDP) { + struct ixgbe_adapter *adapter = q_vector->adapter; + + /* verify the port is recognized as VXLAN */ + if (adapter->vxlan_port && + udp_hdr(skb)->dest == adapter->vxlan_port) + hdr.network = skb_inner_network_header(skb); + + if (adapter->geneve_port && + udp_hdr(skb)->dest == adapter->geneve_port) + hdr.network = skb_inner_network_header(skb); + } + + /* Currently only IPv4/IPv6 with TCP is supported */ + switch (hdr.ipv4->version) { + case IPVERSION: + /* access ihl as u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + l4_proto = hdr.ipv4->protocol; + break; + case 6: + hlen = hdr.network - skb->data; + l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); + hlen -= hdr.network - skb->data; + break; + default: + return; + } + + if (l4_proto != IPPROTO_TCP) + return; + + th = (struct tcphdr *)(hdr.network + hlen); + + /* skip this packet since the socket is closing */ + if (th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) + return; + + /* reset sample count */ + ring->atr_count = 0; + + vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); + + /* + * src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = vlan_id; + + /* + * since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) + common.port.src ^= th->dest ^ htons(ETH_P_8021Q); + else + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + + switch (hdr.ipv4->version) { + case IPVERSION: + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + break; + case 6: + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + break; + default: + break; + } + + if (hdr.network != skb_network_header(skb)) + input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, + input, common, ring->queue_index); +} + +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; +#ifdef IXGBE_FCOE + struct ixgbe_adapter *adapter; + struct ixgbe_ring_feature *f; + int txq; +#endif + + if (fwd_adapter) + return skb->queue_mapping + fwd_adapter->tx_base_queue; + +#ifdef IXGBE_FCOE + + /* + * only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_FCOE): + case htons(ETH_P_FIP): + adapter = netdev_priv(dev); + + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + break; + default: + return fallback(dev, skb); + } + + f = &adapter->ring_feature[RING_F_FCOE]; + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); + + while (txq >= f->indices) + txq -= f->indices; + + return txq + f->offset; +#else + return fallback(dev, skb); +#endif +} + +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + struct ixgbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; + } + protocol = vlan_get_protocol(skb); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock && + !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } + + skb_tx_timestamp(skb); + +#ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + tx_flags |= IXGBE_TX_FLAGS_CC; + +#endif + /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ + if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL))) { + tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; + tx_flags |= (skb->priority & 0x7) << + IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + + if (skb_cow_head(skb, 0)) + goto out_drop; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> + IXGBE_TX_FLAGS_VLAN_SHIFT); + } else { + tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; + } + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + +#ifdef IXGBE_FCOE + /* setup tx offload for FCoE */ + if ((protocol == htons(ETH_P_FCOE)) && + (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { + tso = ixgbe_fso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + + goto xmit_fcoe; + } + +#endif /* IXGBE_FCOE */ + tso = ixgbe_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + ixgbe_tx_csum(tx_ring, first); + + /* add the ATR filter if ATR is on */ + if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + ixgbe_atr(tx_ring, first); + +#ifdef IXGBE_FCOE +xmit_fcoe: +#endif /* IXGBE_FCOE */ + ixgbe_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev, + struct ixgbe_ring *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + return __ixgbe_xmit_frame(skb, netdev, NULL); +} + +/** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + ixgbe_mac_set_default_filter(adapter); + + return 0; +} + +static int +ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 value; + int rc; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + rc = hw->phy.ops.read_reg(hw, addr, devad, &value); + if (!rc) + rc = value; + return rc; +} + +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + return hw->phy.ops.write_reg(hw, addr, devad, value); +} + +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return ixgbe_ptp_set_ts_config(adapter, req); + case SIOCGHWTSTAMP: + return ixgbe_ptp_get_ts_config(adapter, req); + case SIOCGMIIPHY: + if (!adapter->hw.phy.ops.read_reg) + return -EOPNOTSUPP; + /* fall through */ + default: + return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); + } +} + +/** + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_add_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + if (is_valid_ether_addr(hw->mac.san_addr)) { + rtnl_lock(); + err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + + /* update SAN MAC vmdq pool selection */ + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + } + return err; +} + +/** + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_del_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbe_netpoll(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + /* loop through and schedule all active queues */ + for (i = 0; i < adapter->num_q_vectors; i++) + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); +} + +#endif +static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + return stats; +} + +#ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to ixgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg, rsave; + int i; + + /* 82598 have a static priority to TC mapping that can not + * be changed so no validation is needed. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + rsave = reg; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); + + /* If up2tc is out of bounds default to zero */ + if (up2tc > tc) + reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); + } + + if (reg != rsave) + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + return; +} + +/** + * ixgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct + * + * Populate the netdev user priority to tc map + */ +static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ieee_ets *ets = adapter->ixgbe_ieee_ets; + u8 prio; + + for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { + u8 tc = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) + tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); + else if (ets) + tc = ets->prio_tc[prio]; + + netdev_set_prio_tc_map(dev, prio, tc); + } +} + +#endif /* CONFIG_IXGBE_DCB */ +/** + * ixgbe_setup_tc - configure net_device for multiple traffic classes + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ixgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + bool pools; + + /* Hardware supports up to 8 traffic classes */ + if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) + return -EINVAL; + + pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); + if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) + return -EBUSY; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ixgbe_close(dev); + else + ixgbe_reset(adapter); + + ixgbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_IXGBE_DCB + if (tc) { + netdev_set_num_tc(dev, tc); + ixgbe_set_prio_tc_map(adapter); + + adapter->flags |= IXGBE_FLAG_DCB_ENABLED; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->last_lfc_mode = adapter->hw.fc.requested_mode; + adapter->hw.fc.requested_mode = ixgbe_fc_none; + } + } else { + netdev_reset_tc(dev); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + ixgbe_validate_rtr(adapter, tc); + +#endif /* CONFIG_IXGBE_DCB */ + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + return ixgbe_open(dev); + + return 0; +} + +static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + u32 hdl = cls->knode.handle; + u32 uhtid = TC_U32_USERHTID(cls->knode.handle); + u32 loc = cls->knode.handle & 0xfffff; + int err = 0, i, j; + struct ixgbe_jump_table *jump = NULL; + + if (loc > IXGBE_MAX_HW_ENTRIES) + return -EINVAL; + + if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) + return -EINVAL; + + /* Clear this filter in the link data it is associated with */ + if (uhtid != 0x800) { + jump = adapter->jump_tables[uhtid]; + if (!jump) + return -EINVAL; + if (!test_bit(loc - 1, jump->child_loc_map)) + return -EINVAL; + clear_bit(loc - 1, jump->child_loc_map); + } + + /* Check if the filter being deleted is a link */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + jump = adapter->jump_tables[i]; + if (jump && jump->link_hdl == hdl) { + /* Delete filters in the hardware in the child hash + * table associated with this link + */ + for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { + if (!test_bit(j, jump->child_loc_map)) + continue; + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, + NULL, + j + 1); + spin_unlock(&adapter->fdir_perfect_lock); + clear_bit(j, jump->child_loc_map); + } + /* Remove resources for this link */ + kfree(jump->input); + kfree(jump->mask); + kfree(jump); + adapter->jump_tables[i] = NULL; + return err; + } + } + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); + spin_unlock(&adapter->fdir_perfect_lock); + return err; +} + +static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); + + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return -EINVAL; + + /* This ixgbe devices do not support hash tables at the moment + * so abort when given hash tables. + */ + if (cls->hnode.divisor > 0) + return -EINVAL; + + set_bit(uhtid - 1, &adapter->tables); + return 0; +} + +static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); + + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return -EINVAL; + + clear_bit(uhtid - 1, &adapter->tables); + return 0; +} + +#ifdef CONFIG_NET_CLS_ACT +static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, + u8 *queue, u64 *action) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + struct net_device *upper; + struct list_head *iter; + + /* redirect to a SRIOV VF */ + for (vf = 0; vf < num_vfs; ++vf) { + upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); + if (upper->ifindex == ifindex) { + if (adapter->num_rx_pools > 1) + *queue = vf * 2; + else + *queue = vf * adapter->num_rx_queues_per_pool; + + *action = vf + 1; + *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + return 0; + } + } + + /* redirect to a offloaded macvlan netdev */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + + if (vadapter && vadapter->netdev->ifindex == ifindex) { + *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; + *action = *queue; + return 0; + } + } + } + + return -EINVAL; +} + +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + const struct tc_action *a; + LIST_HEAD(actions); + int err; + + if (tc_no_actions(exts)) + return -EINVAL; + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + + /* Drop action */ + if (is_tcf_gact_shot(a)) { + *action = IXGBE_FDIR_DROP_QUEUE; + *queue = IXGBE_FDIR_DROP_QUEUE; + return 0; + } + + /* Redirect to a VF or a offloaded macvlan */ + if (is_tcf_mirred_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + + err = handle_redirect_action(adapter, ifindex, queue, + action); + if (err == 0) + return err; + } + } + + return -EINVAL; +} +#else +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + return -EINVAL; +} +#endif /* CONFIG_NET_CLS_ACT */ + +static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + struct tc_cls_u32_offload *cls, + struct ixgbe_mat_field *field_ptr, + struct ixgbe_nexthdr *nexthdr) +{ + int i, j, off; + __be32 val, m; + bool found_entry = false, found_jump_field = false; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + off = cls->knode.sel->keys[i].off; + val = cls->knode.sel->keys[i].val; + m = cls->knode.sel->keys[i].mask; + + for (j = 0; field_ptr[j].val; j++) { + if (field_ptr[j].off == off) { + field_ptr[j].val(input, mask, val, m); + input->filter.formatted.flow_type |= + field_ptr[j].type; + found_entry = true; + break; + } + } + if (nexthdr) { + if (nexthdr->off == cls->knode.sel->keys[i].off && + nexthdr->val == cls->knode.sel->keys[i].val && + nexthdr->mask == cls->knode.sel->keys[i].mask) + found_jump_field = true; + else + continue; + } + } + + if (nexthdr && !found_jump_field) + return -EINVAL; + + if (!found_entry) + return 0; + + mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + return 0; +} + +static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + u32 loc = cls->knode.handle & 0xfffff; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_mat_field *field_ptr; + struct ixgbe_fdir_filter *input = NULL; + union ixgbe_atr_input *mask = NULL; + struct ixgbe_jump_table *jump = NULL; + int i, err = -EINVAL; + u8 queue; + u32 uhtid, link_uhtid; + + uhtid = TC_U32_USERHTID(cls->knode.handle); + link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); + + /* At the moment cls_u32 jumps to network layer and skips past + * L2 headers. The canonical method to match L2 frames is to use + * negative values. However this is error prone at best but really + * just broken because there is no way to "know" what sort of hdr + * is in front of the network layer. Fix cls_u32 to support L2 + * headers when needed. + */ + if (protocol != htons(ETH_P_IP)) + return err; + + if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return err; + } + + /* cls u32 is a graph starting at root node 0x800. The driver tracks + * links and also the fields used to advance the parser across each + * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map + * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h + * To add support for new nodes update ixgbe_model.h parse structures + * this function _should_ be generic try not to hardcode values here. + */ + if (uhtid == 0x800) { + field_ptr = (adapter->jump_tables[0])->mat; + } else { + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return err; + if (!adapter->jump_tables[uhtid]) + return err; + field_ptr = (adapter->jump_tables[uhtid])->mat; + } + + if (!field_ptr) + return err; + + /* At this point we know the field_ptr is valid and need to either + * build cls_u32 link or attach filter. Because adding a link to + * a handle that does not exist is invalid and the same for adding + * rules to handles that don't exist. + */ + + if (link_uhtid) { + struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; + + if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) + return err; + + if (!test_bit(link_uhtid - 1, &adapter->tables)) + return err; + + /* Multiple filters as links to the same hash table are not + * supported. To add a new filter with the same next header + * but different match/jump conditions, create a new hash table + * and link to it. + */ + if (adapter->jump_tables[link_uhtid] && + (adapter->jump_tables[link_uhtid])->link_hdl) { + e_err(drv, "Link filter exists for link: %x\n", + link_uhtid); + return err; + } + + for (i = 0; nexthdr[i].jump; i++) { + if (nexthdr[i].o != cls->knode.sel->offoff || + nexthdr[i].s != cls->knode.sel->offshift || + nexthdr[i].m != cls->knode.sel->offmask) + return err; + + jump = kzalloc(sizeof(*jump), GFP_KERNEL); + if (!jump) + return -ENOMEM; + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) { + err = -ENOMEM; + goto free_jump; + } + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } + jump->input = input; + jump->mask = mask; + jump->link_hdl = cls->knode.handle; + + err = ixgbe_clsu32_build_input(input, mask, cls, + field_ptr, &nexthdr[i]); + if (!err) { + jump->mat = nexthdr[i].jump; + adapter->jump_tables[link_uhtid] = jump; + break; + } + } + return 0; + } + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } + + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { + if ((adapter->jump_tables[uhtid])->input) + memcpy(input, (adapter->jump_tables[uhtid])->input, + sizeof(*input)); + if ((adapter->jump_tables[uhtid])->mask) + memcpy(mask, (adapter->jump_tables[uhtid])->mask, + sizeof(*mask)); + + /* Lookup in all child hash tables if this location is already + * filled with a filter + */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + struct ixgbe_jump_table *link = adapter->jump_tables[i]; + + if (link && (test_bit(loc - 1, link->child_loc_map))) { + e_err(drv, "Filter exists in location: %x\n", + loc); + err = -EINVAL; + goto err_out; + } + } + } + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); + if (err) + goto err_out; + + err = parse_tc_actions(adapter, cls->knode.exts, &input->action, + &queue); + if (err < 0) + goto err_out; + + input->sw_idx = loc; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, mask); + if (err) + goto err_out_w_lock; + } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { + err = -EINVAL; + goto err_out_w_lock; + } + + ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); + err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, + input->sw_idx, queue); + if (!err) + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) + set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); + + kfree(mask); + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(mask); +free_input: + kfree(input); +free_jump: + kfree(jump); + return err; +} + +static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && + tc->type == TC_SETUP_CLSU32) { + switch (tc->cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, + proto, tc->cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, tc->cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, proto, + tc->cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, + tc->cls_u32); + default: + return -EINVAL; + } + } + + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return ixgbe_setup_tc(dev, tc->tc); +} + +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} + +#endif +void ixgbe_do_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); +} + +static netdev_features_t ixgbe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; + + return features; +} + +static int ixgbe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; + bool need_reset = false; + + /* Make sure RSC matches LRO, reset if change */ + if (!(features & NETIF_F_LRO)) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if ((changed ^ features) & NETIF_F_LRO) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + } + } + + /* + * Check if Flow Director n-tuple support or hw_tc support was + * enabled or disabled. If the state changed, we need to reset. + */ + if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + } else { + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if SR-IOV is enabled */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || + /* We cannot enable ATR if we have 2 or more tcs */ + (netdev_get_num_tc(netdev) > 1) || + /* We cannot enable ATR if RSS is disabled */ + (adapter->ring_feature[RING_F_RSS].limit <= 1) || + /* A sample rate of 0 indicates ATR disabled */ + (!adapter->atr_sample_rate)) + ; /* do nothing not supported */ + else /* otherwise supported and set the flag */ + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + + if (changed & NETIF_F_RXALL) + need_reset = true; + + netdev->features = features; + + if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { + if (features & NETIF_F_RXCSUM) { + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + } else { + u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + } + } + + if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { + if (features & NETIF_F_RXCSUM) { + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + } else { + u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + } + } + + if (need_reset) + ixgbe_do_reset(netdev); + else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER)) + ixgbe_set_rx_mode(netdev); + + return 0; +} + +/** + * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void ixgbe_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + __be16 port = ti->port; + u32 port_shift = 0; + u32 reg; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + + if (adapter->geneve_port == port) + return; + + if (adapter->geneve_port) { + netdev_info(dev, + "GENEVE port %d set, not adding port %d\n", + ntohs(adapter->geneve_port), + ntohs(port)); + return; + } + + port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; + adapter->geneve_port = port; + break; + default: + return; + } + + reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); +} + +/** + * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void ixgbe_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + u32 port_mask; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN && + ti->type != UDP_TUNNEL_TYPE_GENEVE) + return; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + + if (adapter->geneve_port != ti->port) { + netdev_info(dev, "GENEVE port %d not found\n", + ntohs(ti->port)); + return; + } + + port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + break; + default: + return; + } + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; +} + +static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 pool = VMDQ_P(0); + + if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +/** + * ixgbe_configure_bridge_mode - set various bridge modes + * @adapter - the private structure + * @mode - requested bridge mode + * + * Configure some settings require for various bridge modes. + **/ +static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, + __u16 mode) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int p, num_pools; + u32 vmdctl; + + switch (mode) { + case BRIDGE_MODE_VEPA: + /* disable Tx loopback, rely on switch hairpin mode */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); + + /* must enable Rx switching replication to allow multicast + * packet reception on all VFs, and to enable source address + * pruning. + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + true, + p); + } + break; + case BRIDGE_MODE_VEB: + /* enable Tx loopback for internal VF/PF communication */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, + IXGBE_PFDTXGSWC_VT_LBEN); + + /* disable Rx switching replication unless we have SR-IOV + * virtual functions + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + if (!adapter->num_vfs) + vmdctl &= ~IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + false, + p); + } + break; + default: + return -EINVAL; + } + + adapter->bridge_mode = mode; + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return 0; +} + +static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, u16 flags) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + int status; + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + status = ixgbe_configure_bridge_mode(adapter, mode); + if (status) + return status; + + break; + } + + return 0; +} + +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask, int nlflags) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + adapter->bridge_mode, 0, 0, nlflags, + filter_mask, NULL); +} + +static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) +{ + struct ixgbe_fwd_adapter *fwd_adapter = NULL; + struct ixgbe_adapter *adapter = netdev_priv(pdev); + int used_pools = adapter->num_vfs + adapter->num_rx_pools; + unsigned int limit; + int pool, err; + + /* Hardware has a limited number of available pools. Each VF, and the + * PF require a pool. Check to ensure we don't attempt to use more + * then the available number of pools. + */ + if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_RPS + if (vdev->num_rx_queues != vdev->num_tx_queues) { + netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", + vdev->name); + return ERR_PTR(-EINVAL); + } +#endif + /* Check for hardware restriction on number of rx/tx queues */ + if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || + vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { + netdev_info(pdev, + "%s: Supports RX/TX Queue counts 1,2, and 4\n", + pdev->name); + return ERR_PTR(-EINVAL); + } + + if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || + (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) + return ERR_PTR(-EBUSY); + + fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL); + if (!fwd_adapter) + return ERR_PTR(-ENOMEM); + + pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); + adapter->num_rx_pools++; + set_bit(pool, &adapter->fwd_bitmask); + limit = find_last_bit(&adapter->fwd_bitmask, 32); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; + adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; + adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; + + /* Force reinit of ring allocation with VMDQ enabled */ + err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); + if (err) + goto fwd_add_err; + fwd_adapter->pool = pool; + fwd_adapter->real_adapter = adapter; + + if (netif_running(pdev)) { + err = ixgbe_fwd_ring_up(vdev, fwd_adapter); + if (err) + goto fwd_add_err; + netif_tx_start_all_queues(vdev); + } + + return fwd_adapter; +fwd_add_err: + /* unwind counter and free adapter struct */ + netdev_info(pdev, + "%s: dfwd hardware acceleration failed\n", vdev->name); + clear_bit(pool, &adapter->fwd_bitmask); + adapter->num_rx_pools--; + kfree(fwd_adapter); + return ERR_PTR(err); +} + +static void ixgbe_fwd_del(struct net_device *pdev, void *priv) +{ + struct ixgbe_fwd_adapter *fwd_adapter = priv; + struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; + unsigned int limit; + + clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); + adapter->num_rx_pools--; + + limit = find_last_bit(&adapter->fwd_bitmask, 32); + adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; + ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); + ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); + netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + fwd_adapter->pool, adapter->num_rx_pools, + fwd_adapter->rx_base_queue, + fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, + adapter->fwd_bitmask); + kfree(fwd_adapter); +} + +#define IXGBE_MAX_MAC_HDR_LEN 127 +#define IXGBE_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static const struct net_device_ops ixgbe_netdev_ops = { + .ndo_open = ixgbe_open, + .ndo_stop = ixgbe_close, + .ndo_start_xmit = ixgbe_xmit_frame, + .ndo_select_queue = ixgbe_select_queue, + .ndo_set_rx_mode = ixgbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbe_set_mac, + .ndo_change_mtu = ixgbe_change_mtu, + .ndo_tx_timeout = ixgbe_tx_timeout, + .ndo_set_tx_maxrate = ixgbe_tx_maxrate, + .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, + .ndo_do_ioctl = ixgbe_ioctl, + .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, + .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, + .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, + .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, + .ndo_get_vf_config = ixgbe_ndo_get_vf_config, + .ndo_get_stats64 = ixgbe_get_stats64, + .ndo_setup_tc = __ixgbe_setup_tc, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbe_netpoll, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = ixgbe_low_latency_recv, +#endif +#ifdef IXGBE_FCOE + .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, + .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, + .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, + .ndo_fcoe_enable = ixgbe_fcoe_enable, + .ndo_fcoe_disable = ixgbe_fcoe_disable, + .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, + .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, +#endif /* IXGBE_FCOE */ + .ndo_set_features = ixgbe_set_features, + .ndo_fix_features = ixgbe_fix_features, + .ndo_fdb_add = ixgbe_ndo_fdb_add, + .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, + .ndo_dfwd_add_station = ixgbe_fwd_add, + .ndo_dfwd_del_station = ixgbe_fwd_del, + .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, + .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, + .ndo_features_check = ixgbe_features_check, +}; + +/** + * ixgbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + /* Some cards can not use the generic count PCIe functions method, + * because they are behind a parent switch, so we hardcode these with + * the correct number of functions. + */ + if (ixgbe_pcie_from_parent(&adapter->hw)) + physfns = 4; + + list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; + + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; + + physfns++; + } + + return physfns; +} + +/** + * ixgbe_wol_supported - Check whether device supports WoL + * @adapter: the adapter private structure + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; + + /* WOL not supported on 82598 */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + /* check eeprom to see if WOL is enabled for X540 and newer */ + if (hw->mac.type >= ixgbe_mac_X540) { + if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && + (hw->bus.func == 0))) + return true; + } + + /* WOL is determined based on device IDs for 82599 MACs */ + switch (device_id) { + case IXGBE_DEV_ID_82599_SFP: + /* Only these subdevices could supports WOL */ + switch (subdevice_id) { + case IXGBE_SUBDEV_ID_82599_560FLR: + case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: + case IXGBE_SUBDEV_ID_82599_SFP_WOL0: + case IXGBE_SUBDEV_ID_82599_SFP_2OCP: + /* only support first port */ + if (hw->bus.func != 0) + break; + case IXGBE_SUBDEV_ID_82599_SP_560FLR: + case IXGBE_SUBDEV_ID_82599_SFP: + case IXGBE_SUBDEV_ID_82599_RNDC: + case IXGBE_SUBDEV_ID_82599_ECNA_DP: + case IXGBE_SUBDEV_ID_82599_SFP_1OCP: + case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: + case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: + return true; + } + break; + case IXGBE_DEV_ID_82599EN_SFP: + /* Only these subdevices support WOL */ + switch (subdevice_id) { + case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: + return true; + } + break; + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) + return true; + break; + case IXGBE_DEV_ID_82599_KX4: + return true; + default: + break; + } + + return false; +} + +/** + * ixgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbe_adapter *adapter = NULL; + struct ixgbe_hw *hw; + const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; + int i, err, pci_using_dac, expected_gts; + unsigned int indices = MAX_TX_QUEUES; + u8 part_str[IXGBE_PBANUM_LENGTH]; + bool disable_dev = false; +#ifdef IXGBE_FCOE + u16 device_caps; +#endif + u32 eec; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + pci_using_dac = 0; + } + + err = pci_request_mem_regions(pdev, ixgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + if (ii->mac == ixgbe_mac_82598EB) { +#ifdef CONFIG_IXGBE_DCB + /* 8 TC w/ 4 queues per TC */ + indices = 4 * MAX_TRAFFIC_CLASS; +#else + indices = IXGBE_MAX_RSS_INDICES; +#endif + } + + netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + netdev->netdev_ops = &ixgbe_netdev_ops; + ixgbe_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + /* Setup hw api */ + hw->mac.ops = *ii->mac_ops; + hw->mac.type = ii->mac; + hw->mvals = ii->mvals; + if (ii->link_ops) + hw->link.ops = *ii->link_ops; + + /* EEPROM */ + hw->eeprom.ops = *ii->eeprom_ops; + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + if (ixgbe_removed(hw->hw_addr)) { + err = -EIO; + goto err_ioremap; + } + /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ + if (!(eec & BIT(8))) + hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; + + /* PHY */ + hw->phy.ops = *ii->phy_ops; + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + /* ixgbe_identify_phy_generic will set prtad and mmds properly */ + hw->phy.mdio.prtad = MDIO_PRTAD_NONE; + hw->phy.mdio.mmds = 0; + hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + hw->phy.mdio.dev = netdev; + hw->phy.mdio.mdio_read = ixgbe_mdio_read; + hw->phy.mdio.mdio_write = ixgbe_mdio_write; + + ii->get_invariants(hw); + + /* setup the private structure */ + err = ixgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + /* Make sure the SWFW semaphore is in a valid state */ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); + + /* Make it possible the adapter to be woken up via WOL */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + break; + default: + break; + } + + /* + * If there is a fan on this device and it has failed log the + * failure. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + e_crit(probe, "Fan has stopped, replace the adapter\n"); + } + + if (allow_unsupported_sfp) + hw->allow_unsupported_sfp = allow_unsupported_sfp; + + /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; + err = hw->mac.ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; + if (err == IXGBE_ERR_SFP_NOT_PRESENT) { + err = 0; + } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported module.\n"); + goto err_sw_init; + } else if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + +#ifdef CONFIG_PCI_IOV + /* SR-IOV not supported on the 82598 */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + goto skip_sriov; + /* Mailbox */ + ixgbe_init_mbx_params_pf(hw); + hw->mbx.ops = ii->mbx_ops; + pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); + ixgbe_enable_sriov(adapter); +skip_sriov: + +#endif + netdev->features = NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + +#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + IXGBE_GSO_PARTIAL_FEATURES; + + if (hw->mac.type >= ixgbe_mac_82599EB) + netdev->features |= NETIF_F_SCTP_CRC; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL | + NETIF_F_HW_L2FW_DOFFLOAD; + + if (hw->mac.type >= ixgbe_mac_82599EB) + netdev->hw_features |= NETIF_F_NTUPLE | + NETIF_F_HW_TC; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + +#ifdef CONFIG_IXGBE_DCB + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + netdev->dcbnl_ops = &dcbnl_ops; +#endif + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + unsigned int fcoe_l; + + if (hw->mac.ops.get_device_caps) { + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + } + + + fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); + adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; + + netdev->features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC; + + netdev->vlan_features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC | + NETIF_F_FCOE_MTU; + } +#endif /* IXGBE_FCOE */ + + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + netdev->features |= NETIF_F_LRO; + + /* make sure the EEPROM is good */ + if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_sw_init; + } + + eth_platform_get_mac_address(&adapter->pdev->dev, + adapter->hw.mac.perm_addr); + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + /* Set hw->mac.addr to permanent MAC address */ + ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); + ixgbe_mac_set_default_filter(adapter); + + setup_timer(&adapter->service_timer, &ixgbe_service_timer, + (unsigned long) adapter); + + if (ixgbe_removed(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, ixgbe_service_task); + set_bit(__IXGBE_SERVICE_INITED, &adapter->state); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); + + err = ixgbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + /* WOL not supported for all devices */ + adapter->wol = 0; + hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); + hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, + pdev->subsystem_device); + if (hw->wol_enabled) + adapter->wol = IXGBE_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* save off EEPROM version number */ + hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); + hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); + + /* pick up the PCI bus settings for reporting later */ + if (ixgbe_pcie_from_parent(hw)) + ixgbe_get_parent_bus_info(adapter); + else + hw->mac.ops.get_bus_info(hw); + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure no warning is displayed if it can't be fixed. + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); + break; + default: + expected_gts = ixgbe_enumerate_functions(adapter) * 10; + break; + } + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + ixgbe_check_minimum_link(adapter, expected_gts); + + err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); + if (err) + strlcpy(part_str, "Unknown", sizeof(part_str)); + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, + part_str); + else + e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); + + e_dev_info("%pM\n", netdev->dev_addr); + + /* reset the hardware with the new settings */ + err = hw->mac.ops.start_hw(hw); + if (err == IXGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + } + + /*2019/06/03, change OOB from eth2 to eth0, for pegatron fn-6524-dn-f, Peter5_Lin*/ + if(!strcmp("0000:03:00.0", pci_name(pdev))) + strcpy(netdev->name, "eth0"); + else if(!strcmp("0000:03:00.1", pci_name(pdev))) + strcpy(netdev->name, "eth1"); + else if(!strcmp("0000:02:00.0", pci_name(pdev))) + strcpy(netdev->name, "eth2"); + else if(!strcmp("0000:02:00.1", pci_name(pdev))) + strcpy(netdev->name, "eth3"); + + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, adapter); + + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_IXGBE_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + } +#endif + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(pdev, (i | 0x10000000)); + } + + /* firmware requires driver version to be 0xFFFFFFFF + * since os does not support feature + */ + if (hw->mac.ops.set_fw_drv_ver) + hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, + sizeof(ixgbe_driver_version) - 1, + ixgbe_driver_version); + + /* add san mac addr to netdev */ + ixgbe_add_sanmac_netdev(netdev); + + e_dev_info("%s\n", ixgbe_default_device_descr); + +#ifdef CONFIG_IXGBE_HWMON + if (ixgbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); +#endif /* CONFIG_IXGBE_HWMON */ + + ixgbe_dbg_adapter_init(adapter); + + /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ + if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) + hw->mac.ops.setup_link(hw, + IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, + true); + + return 0; + +err_register: + ixgbe_release_hw_control(adapter); + ixgbe_clear_interrupt_scheme(adapter); +err_sw_init: + ixgbe_disable_sriov(adapter); + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + iounmap(adapter->io_addr); + kfree(adapter->jump_tables[0]); + kfree(adapter->mac_table); +err_ioremap: + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * ixgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void ixgbe_remove(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + bool disable_dev; + int i; + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; + ixgbe_dbg_adapter_exit(adapter); + + set_bit(__IXGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + dca_remove_requester(&pdev->dev); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, + IXGBE_DCA_CTRL_DCA_DISABLE); + } + +#endif +#ifdef CONFIG_IXGBE_HWMON + ixgbe_sysfs_exit(adapter); +#endif /* CONFIG_IXGBE_HWMON */ + + /* remove the added san mac */ + ixgbe_del_sanmac_netdev(netdev); + +#ifdef CONFIG_PCI_IOV + ixgbe_disable_sriov(adapter); +#endif + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + ixgbe_clear_interrupt_scheme(adapter); + + ixgbe_release_hw_control(adapter); + +#ifdef CONFIG_DCB + kfree(adapter->ixgbe_ieee_pfc); + kfree(adapter->ixgbe_ieee_ets); + +#endif + iounmap(adapter->io_addr); + pci_release_mem_regions(pdev); + + e_dev_info("complete\n"); + + for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { + if (adapter->jump_tables[i]) { + kfree(adapter->jump_tables[i]->input); + kfree(adapter->jump_tables[i]->mask); + } + kfree(adapter->jump_tables[i]); + } + + kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * ixgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB || + adapter->num_vfs == 0) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); + dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); + dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); + if (ixgbe_removed(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; + /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + unsigned int device_id; + + vf = (req_id & 0x7F) >> 1; + e_dev_err("VF %d has caused a PCIe error\n", vf); + e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " + "%8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + device_id = IXGBE_82599_VF_DEVICE_ID; + break; + case ixgbe_mac_X540: + device_id = IXGBE_X540_VF_DEVICE_ID; + break; + case ixgbe_mac_X550: + device_id = IXGBE_DEV_ID_X550_VF; + break; + case ixgbe_mac_X550EM_x: + device_id = IXGBE_DEV_ID_X550EM_X_VF; + break; + case ixgbe_mac_x550em_a: + device_id = IXGBE_DEV_ID_X550EM_A_VF; + break; + default: + device_id = 0; + break; + } + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, + device_id, vfdev); + } + /* + * There's a slim chance the VF could have been hot plugged, + * so if it is no longer present we don't need to issue the + * VFLR. Just clean up the AER in that case. + */ + if (vfdev) { + ixgbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + } + + /* + * Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + +skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + ixgbe_close_suspend(adapter); + + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result; + int err; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + smp_mb__before_atomic(); + clear_bit(__IXGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + ixgbe_reset(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + e_dev_err("pci_cleanup_aer_uncorrect_error_status " + "failed 0x%0x\n", err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * ixgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ixgbe_io_resume(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + if (adapter->vferr_refcount) { + e_info(drv, "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } + +#endif + rtnl_lock(); + if (netif_running(netdev)) + ixgbe_open(netdev); + + netif_device_attach(netdev); + rtnl_unlock(); +} + +static const struct pci_error_handlers ixgbe_err_handler = { + .error_detected = ixgbe_io_error_detected, + .slot_reset = ixgbe_io_slot_reset, + .resume = ixgbe_io_resume, +}; + +static struct pci_driver ixgbe_driver = { + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = ixgbe_remove, +#ifdef CONFIG_PM + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, +#endif + .shutdown = ixgbe_shutdown, + .sriov_configure = ixgbe_pci_sriov_configure, + .err_handler = &ixgbe_err_handler +}; + +/** + * ixgbe_init_module - Driver Registration Routine + * + * ixgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); + pr_info("%s\n", ixgbe_copyright); + + ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); + if (!ixgbe_wq) { + pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); + return -ENOMEM; + } + + ixgbe_dbg_init(); + + ret = pci_register_driver(&ixgbe_driver); + if (ret) { + destroy_workqueue(ixgbe_wq); + ixgbe_dbg_exit(); + return ret; + } + +#ifdef CONFIG_IXGBE_DCA + dca_register_notify(&dca_notifier); +#endif + + return 0; +} + +module_init(ixgbe_init_module); + +/** + * ixgbe_exit_module - Driver Exit Cleanup Routine + * + * ixgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbe_exit_module(void) +{ +#ifdef CONFIG_IXGBE_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&ixgbe_driver); + + ixgbe_dbg_exit(); + if (ixgbe_wq) { + destroy_workqueue(ixgbe_wq); + ixgbe_wq = NULL; + } +} + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, + __ixgbe_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} + +#endif /* CONFIG_IXGBE_DCA */ + +module_exit(ixgbe_exit_module); + +/* ixgbe_main.c */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c new file mode 100644 index 000000000000..a0cb84381cd0 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c @@ -0,0 +1,460 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include "ixgbe.h" +#include "ixgbe_mbx.h" + +/** + * ixgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (!mbx->ops) + return IXGBE_ERR_MBX; + + return mbx->ops->read(hw, msg, size, mbx_id); +} + +/** + * ixgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (size > mbx->size) + return IXGBE_ERR_MBX; + + if (!mbx->ops) + return IXGBE_ERR_MBX; + + return mbx->ops->write(hw, msg, size, mbx_id); +} + +/** + * ixgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (!mbx->ops) + return IXGBE_ERR_MBX; + + return mbx->ops->check_for_msg(hw, mbx_id); +} + +/** + * ixgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (!mbx->ops) + return IXGBE_ERR_MBX; + + return mbx->ops->check_for_ack(hw, mbx_id); +} + +/** + * ixgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (!mbx->ops) + return IXGBE_ERR_MBX; + + return mbx->ops->check_for_rst(hw, mbx_id); +} + +/** + * ixgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops) + return IXGBE_ERR_MBX; + + while (mbx->ops->check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + return IXGBE_ERR_MBX; + udelay(mbx->usec_delay); + } + + return 0; +} + +/** + * ixgbe_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops) + return IXGBE_ERR_MBX; + + while (mbx->ops->check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + return IXGBE_ERR_MBX; + udelay(mbx->usec_delay); + } + + return 0; +} + +/** + * ixgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val; + + if (!mbx->ops) + return IXGBE_ERR_MBX; + + ret_val = ixgbe_poll_for_msg(hw, mbx_id); + if (ret_val) + return ret_val; + + /* if ack received read message */ + return mbx->ops->read(hw, msg, size, mbx_id); +} + +/** + * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops || !mbx->timeout) + return IXGBE_ERR_MBX; + + /* send msg */ + ret_val = mbx->ops->write(hw, msg, size, mbx_id); + if (ret_val) + return ret_val; + + /* if msg sent wait until we receive an ack */ + return ixgbe_poll_for_ack(hw, mbx_id); +} + +static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ + u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); + + if (mbvficr & mask) { + IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + hw->mbx.stats.reqs++; + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + hw->mbx.stats.acks++; + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + u32 reg_offset = (vf_number < 32) ? 0 : 1; + u32 vf_shift = vf_number % 32; + u32 vflre = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); + break; + default: + break; + } + + if (vflre & BIT(vf_shift)) { + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift)); + hw->mbx.stats.rsts++; + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); + if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) + return 0; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + return ret_val; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_pf(hw, vf_number); + ixgbe_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + return 0; +} + +/** + * ixgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + return ret_val; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return 0; +} + +#ifdef CONFIG_PCI_IOV +/** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_x550em_a && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + mbx->size = IXGBE_VFMAILBOX_SIZE; +} +#endif /* CONFIG_PCI_IOV */ + +const struct ixgbe_mbx_operations mbx_ops_generic = { + .read = ixgbe_read_mbx_pf, + .write = ixgbe_write_mbx_pf, + .read_posted = ixgbe_read_posted_mbx, + .write_posted = ixgbe_write_posted_mbx, + .check_for_msg = ixgbe_check_for_msg_pf, + .check_for_ack = ixgbe_check_for_ack_pf, + .check_for_rst = ixgbe_check_for_rst_pf, +}; + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h new file mode 100644 index 000000000000..01c2667c0f92 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h @@ -0,0 +1,128 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "ixgbe_type.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * Each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ + +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +#ifdef CONFIG_PCI_IOV +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); +#endif /* CONFIG_PCI_IOV */ + +extern const struct ixgbe_mbx_operations mbx_ops_generic; + +#endif /* _IXGBE_MBX_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h new file mode 100644 index 000000000000..538a1c5475b6 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h @@ -0,0 +1,121 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux drive + * Copyright(c) 2016 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _IXGBE_MODEL_H_ +#define _IXGBE_MODEL_H_ + +#include "ixgbe.h" +#include "ixgbe_type.h" + +struct ixgbe_mat_field { + unsigned int off; + int (*val)(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m); + unsigned int type; +}; + +struct ixgbe_jump_table { + struct ixgbe_mat_field *mat; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input *mask; + u32 link_hdl; + unsigned long child_loc_map[32]; +}; + +#define IXGBE_MAX_HW_ENTRIES 2045 + +static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.src_ip[0] = val; + mask->formatted.src_ip[0] = m; + return 0; +} + +static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.dst_ip[0] = val; + mask->formatted.dst_ip[0] = m; + return 0; +} + +static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { + { .off = 12, .val = ixgbe_mat_prgm_sip, + .type = IXGBE_ATR_FLOW_TYPE_IPV4}, + { .off = 16, .val = ixgbe_mat_prgm_dip, + .type = IXGBE_ATR_FLOW_TYPE_IPV4}, + { .val = NULL } /* terminal node */ +}; + +static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + u32 val, u32 m) +{ + input->filter.formatted.src_port = val & 0xffff; + mask->formatted.src_port = m & 0xffff; + input->filter.formatted.dst_port = val >> 16; + mask->formatted.dst_port = m >> 16; + + return 0; +}; + +static struct ixgbe_mat_field ixgbe_tcp_fields[] = { + {.off = 0, .val = ixgbe_mat_prgm_ports, + .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, + { .val = NULL } /* terminal node */ +}; + +static struct ixgbe_mat_field ixgbe_udp_fields[] = { + {.off = 0, .val = ixgbe_mat_prgm_ports, + .type = IXGBE_ATR_FLOW_TYPE_UDPV4}, + { .val = NULL } /* terminal node */ +}; + +struct ixgbe_nexthdr { + /* offset, shift, and mask of position to next header */ + unsigned int o; + u32 s; + u32 m; + /* match criteria to make this jump*/ + unsigned int off; + u32 val; + u32 mask; + /* location of jump to make */ + struct ixgbe_mat_field *jump; +}; + +static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { + { .o = 0, .s = 6, .m = 0xf, + .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, + { .o = 0, .s = 6, .m = 0xf, + .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields}, + { .jump = NULL } /* terminal node */ +}; +#endif /* _IXGBE_MODEL_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c new file mode 100644 index 000000000000..d914b4070f92 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c @@ -0,0 +1,2474 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" + +static void ixgbe_i2c_start(struct ixgbe_hw *hw); +static void ixgbe_i2c_stop(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); +static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); + +/** + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send + * + * Returns an error code on error. + **/ +static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +{ + s32 status; + + status = ixgbe_clock_out_i2c_byte(hw, byte); + if (status) + return status; + return ixgbe_get_i2c_ack(hw); +} + +/** + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte + * + * Returns an error code on error. + **/ +static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +{ + s32 status; + + status = ixgbe_clock_in_i2c_byte(hw, byte); + if (status) + return status; + /* ACK */ + return ixgbe_clock_out_i2c_bit(hw, false); +} + +/** + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1: addend 1 + * @add2: addend 2 + * + * Returns one's complement 8-bit sum. + **/ +static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) +{ + u16 sum = add1 + add2; + + sum = (sum & 0xFF) + (sum >> 8); + return sum & 0xFF; +} + +/** + * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * @lock: true if to take and release semaphore + * + * Returns an error code on error. + */ +s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val, bool lock) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 3; + int retry = 0; + u8 csum_byte; + u8 high_bits; + u8 low_bits; + u8 reg_high; + u8 csum; + + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ~csum; + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + /* Re-start condition */ + ixgbe_i2c_start(hw); + /* Device Address and read indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) + goto fail; + /* Get upper bits */ + if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) + goto fail; + /* Get low bits */ + if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) + goto fail; + /* Get csum */ + if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) + goto fail; + /* NACK */ + if (ixgbe_clock_out_i2c_bit(hw, false)) + goto fail; + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + *val = (high_bits << 8) | low_bits; + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte read combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte read combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * @lock: true if to take and release semaphore + * + * Returns an error code on error. + */ +s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val, bool lock) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 1; + int retry = 0; + u8 reg_high; + u8 csum; + + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ixgbe_ones_comp_byte_add(csum, val >> 8); + csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); + csum = ~csum; + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write data 15:8 */ + if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) + goto fail; + /* Write data 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte write combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte write combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_probe_phy - Probe a single address for a PHY + * @hw: pointer to hardware structure + * @phy_addr: PHY address to probe + * + * Returns true if PHY found + **/ +static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) +{ + u16 ext_ability = 0; + + hw->phy.mdio.prtad = phy_addr; + if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0) { + return false; + } + + if (ixgbe_get_phy_id(hw)) { + return false; + } + + hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); + + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.ops.read_reg(hw, + MDIO_PMA_EXTABLE, + MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & + (MDIO_PMA_EXTABLE_10GBT | + MDIO_PMA_EXTABLE_1000BT)) + hw->phy.type = ixgbe_phy_cu_unknown; + else + hw->phy.type = ixgbe_phy_generic; + } + + return true; +} + +/** + * ixgbe_identify_phy_generic - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) +{ + u32 phy_addr; + u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + + if (!hw->phy.phy_semaphore_mask) { + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + + if (hw->phy.type != ixgbe_phy_unknown) + return 0; + + if (hw->phy.nw_mng_if_sel) { + phy_addr = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + if (ixgbe_probe_phy(hw, phy_addr)) + return 0; + else + return IXGBE_ERR_PHY_ADDR_INVALID; + } + + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + if (ixgbe_probe_phy(hw, phy_addr)) { + status = 0; + break; + } + } + + /* Certain media types do not have a phy so an address will not + * be found and the code will take this path. Caller has to + * decide if it is an error or not. + */ + if (status) + hw->phy.mdio.prtad = MDIO_PRTAD_NONE; + + return status; +} + +/** + * ixgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return false since the link can not be blocked + * via this method. + **/ +bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) +{ + u32 mmngc; + + /* If we don't have this bit, it can't be blocking */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); + if (mmngc & IXGBE_MMNGC_MNG_VETO) { + hw_dbg(hw, "MNG_VETO bit detected.\n"); + return true; + } + + return false; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + s32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, + &phy_id_high); + + if (!status) { + hw->phy.id = (u32)(phy_id_high << 16); + status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } + return status; +} + +/** + * ixgbe_get_phy_type_from_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ + enum ixgbe_phy_type phy_type; + + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; + case X550_PHY_ID2: + case X550_PHY_ID3: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; + case QT2022_PHY_ID: + phy_type = ixgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = ixgbe_phy_nl; + break; + case X557_PHY_ID: + case X557_PHY_ID2: + phy_type = ixgbe_phy_x550em_ext_t; + break; + default: + phy_type = ixgbe_phy_unknown; + break; + } + + return phy_type; +} + +/** + * ixgbe_reset_phy_generic - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) +{ + u32 i; + u16 ctrl = 0; + s32 status = 0; + + if (hw->phy.type == ixgbe_phy_unknown) + status = ixgbe_identify_phy_generic(hw); + + if (status != 0 || hw->phy.type == ixgbe_phy_none) + return status; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) + return 0; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + /* + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, + MDIO_CTRL1_RESET); + + /* + * Poll for reset bit to self-clear indicating reset is complete. + * Some PHYs could take up to 3 seconds to complete and need about + * 1.7 usec delay after the reset is complete. + */ + for (i = 0; i < 30; i++) { + msleep(100); + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + MDIO_MMD_PMAPMD, &ctrl); + if (status) + return status; + + if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + udelay(2); + break; + } + } else { + status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, &ctrl); + if (status) + return status; + + if (!(ctrl & MDIO_CTRL1_RESET)) { + udelay(2); + break; + } + } + } + + if (ctrl & MDIO_CTRL1_RESET) { + hw_dbg(hw, "PHY reset polling failed to complete.\n"); + return IXGBE_ERR_RESET_FAILED; + } + + return 0; +} + +/** + * ixgbe_read_phy_mdio - Reads a value from a specified PHY register without + * the SWFW lock. This Clasue 22 API is patched by Hilbert + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 i, data, command; + + /* Setup and write the read command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address command did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + + return 0; +} + +/** + * ixgbe_write_phy_reg_mdio - Writes a value to specified PHY register + * without SWFW lock. This Clause 22 API is patched by Hilbert + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 i, command; + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the write command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} +/** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 i, data, command; + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address command did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY read command didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + + return 0; +} + +/** + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + return IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 i, command; + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + return IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_setup_phy_link_generic - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) +{ + s32 status = 0; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && + (speed & IXGBE_LINK_SPEED_10GB_FULL)) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); + + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, &autoneg_reg); + + if (hw->mac.type == ixgbe_mac_X550) { + /* Set or unset auto-negotiation 5G advertisement */ + autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; + if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && + (speed & IXGBE_LINK_SPEED_5GB_FULL)) + autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; + + /* Set or unset auto-negotiation 2.5G advertisement */ + autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; + if ((hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_2_5GB_FULL) && + (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) + autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; + } + + /* Set or unset auto-negotiation 1G advertisement */ + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; + if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && + (speed & IXGBE_LINK_SPEED_1GB_FULL)) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, autoneg_reg); + + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); + if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && + (speed & IXGBE_LINK_SPEED_100_FULL)) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); + + return status; +} + +/** + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + **/ +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + /* Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + if (speed & IXGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; + + /* Setup link based on the new speed settings */ + if (hw->phy.ops.setup_link) + hw->phy.ops.setup_link(hw); + + return 0; +} + +/** + * ixgbe_get_copper_speeds_supported - Get copper link speed from phy + * @hw: pointer to hardware structure + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. + */ +static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) +{ + u16 speed_ability; + s32 status; + + status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, + &speed_ability); + if (status) + return status; + + if (speed_ability & MDIO_SPEED_10G) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_1000) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_100) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; + break; + default: + break; + } + + return 0; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + */ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = 0; + + *autoneg = true; + if (!hw->phy.speeds_supported) + status = ixgbe_get_copper_speeds_supported(hw); + + *speed = hw->phy.speeds_supported; + return status; +} + +/** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + status = hw->phy.ops.read_reg(hw, + MDIO_STAT1, + MDIO_MMD_VEND1, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + * This function always returns success, this is nessary since + * it is called via a function pointer that could call other + * functions that could return an error. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); + return 0; +} + +/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +{ + u16 phy_offset, control, eword, edata, block_crc; + bool end_data = false; + u16 list_offset, data_offset; + u16 phy_data = 0; + s32 ret_val; + u32 i; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); + + /* reset the PHY and poll for completion */ + hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + (phy_data | MDIO_CTRL1_RESET)); + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + &phy_data); + if ((phy_data & MDIO_CTRL1_RESET) == 0) + break; + usleep_range(10000, 20000); + } + + if ((phy_data & MDIO_CTRL1_RESET) != 0) { + hw_dbg(hw, "PHY reset did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Get init offsets */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val) + return ret_val; + + ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); + data_offset++; + while (!end_data) { + /* + * Read control word from PHY init contents offset + */ + ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); + if (ret_val) + goto err_eeprom; + control = (eword & IXGBE_CONTROL_MASK_NL) >> + IXGBE_CONTROL_SHIFT_NL; + edata = eword & IXGBE_DATA_MASK_NL; + switch (control) { + case IXGBE_DELAY_NL: + data_offset++; + hw_dbg(hw, "DELAY: %d MS\n", edata); + usleep_range(edata * 1000, edata * 2000); + break; + case IXGBE_DATA_NL: + hw_dbg(hw, "DATA:\n"); + data_offset++; + ret_val = hw->eeprom.ops.read(hw, data_offset++, + &phy_offset); + if (ret_val) + goto err_eeprom; + for (i = 0; i < edata; i++) { + ret_val = hw->eeprom.ops.read(hw, data_offset, + &eword); + if (ret_val) + goto err_eeprom; + hw->phy.ops.write_reg(hw, phy_offset, + MDIO_MMD_PMAPMD, eword); + hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, + phy_offset); + data_offset++; + phy_offset++; + } + break; + case IXGBE_CONTROL_NL: + data_offset++; + hw_dbg(hw, "CONTROL:\n"); + if (edata == IXGBE_CONTROL_EOL_NL) { + hw_dbg(hw, "EOL\n"); + end_data = true; + } else if (edata == IXGBE_CONTROL_SOL_NL) { + hw_dbg(hw, "SOL\n"); + } else { + hw_dbg(hw, "Bad control value\n"); + return IXGBE_ERR_PHY; + } + break; + default: + hw_dbg(hw, "Bad control type\n"); + return IXGBE_ERR_PHY; + } + } + + return ret_val; + +err_eeprom: + hw_err(hw, "eeprom read at offset %d failed\n", data_offset); + return IXGBE_ERR_PHY; +} + +/** + * ixgbe_identify_module_generic - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) +{ + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + return ixgbe_identify_sfp_module_generic(hw); + case ixgbe_media_type_fiber_qsfp: + return ixgbe_identify_qsfp_module_generic(hw); + default: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + return IXGBE_ERR_SFP_NOT_PRESENT; + } + + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + s32 status; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + u16 enforce_sfp = 0; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + return IXGBE_ERR_SFP_NOT_PRESENT; + } + + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + * 11 SFP_1g_sx_CORE0 - 82599-specific + * 12 SFP_1g_sx_CORE1 - 82599-specific + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_sr; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_lr; + else + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } else { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core1; + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { + hw->phy.ops.read_i2c_eeprom( + hw, IXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + ixgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core1; + } else { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor */ + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case IXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_tyco; + break; + case IXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = ixgbe_phy_sfp_ftl_active; + else + hw->phy.type = ixgbe_phy_sfp_ftl; + break; + case IXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = ixgbe_phy_sfp_avago; + break; + case IXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = ixgbe_phy_sfp_intel; + break; + default: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_unknown; + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_active_unknown; + else + hw->phy.type = ixgbe_phy_sfp_unknown; + break; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) + return 0; + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) + return 0; + + hw->mac.ops.get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) + return 0; + if (hw->allow_unsupported_sfp) { + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + return 0; + } + hw_dbg(hw, "SFP+ module not supported\n"); + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + return 0; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + } + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the QSFP module and assigns appropriate PHY type + **/ +static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + s32 status; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u16 enforce_sfp = 0; + u8 connector = 0; + u8 cable_length = 0; + u8 device_tech = 0; + bool active_cable = false; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + return IXGBE_ERR_SFP_NOT_PRESENT; + } + + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + hw->phy.id = identifier; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, + &comp_codes_10g); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, + &comp_codes_1g); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_passive_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; + } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; + } else { + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) + active_cable = true; + + if (!active_cable) { + /* check for active DA cables that pre-date + * SFF-8436 v3.6 + */ + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CONNECTOR, + &connector); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CABLE_LENGTH, + &cable_length); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_DEVICE_TECH, + &device_tech); + + if ((connector == + IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && + (cable_length > 0) && + ((device_tech >> 4) == + IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) + active_cable = true; + } + + if (active_cable) { + hw->phy.type = ixgbe_phy_qsfp_active_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + /* unsupported module type */ + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the QSFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor for optical modules */ + if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) + hw->phy.type = ixgbe_phy_qsfp_intel; + else + hw->phy.type = ixgbe_phy_qsfp_unknown; + + hw->mac.ops.get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_qsfp_intel) + return 0; + if (hw->allow_unsupported_sfp) { + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + return 0; + } + hw_dbg(hw, "QSFP module not supported\n"); + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + return 0; + } + return 0; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence + * @hw: pointer to hardware structure + * @list_offset: offset to the SFP ID list + * @data_offset: offset to the SFP data block + * + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + * so it returns the offsets to the phy init sequence block. + **/ +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset) +{ + u16 sfp_id; + u16 sfp_type = hw->phy.sfp_type; + + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return IXGBE_ERR_SFP_NOT_PRESENT; + + if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && + (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + /* + * Limiting active cables and 1G Phys must be initialized as + * SR modules + */ + if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || + sfp_type == ixgbe_sfp_type_1g_lx_core0 || + sfp_type == ixgbe_sfp_type_1g_cu_core0 || + sfp_type == ixgbe_sfp_type_1g_sx_core0) + sfp_type = ixgbe_sfp_type_srlr_core0; + else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || + sfp_type == ixgbe_sfp_type_1g_lx_core1 || + sfp_type == ixgbe_sfp_type_1g_cu_core1 || + sfp_type == ixgbe_sfp_type_1g_sx_core1) + sfp_type = ixgbe_sfp_type_srlr_core1; + + /* Read offset to PHY init contents */ + if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { + hw_err(hw, "eeprom read at %d failed\n", + IXGBE_PHY_INIT_OFFSET_NL); + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + } + + if ((!*list_offset) || (*list_offset == 0xFFFF)) + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + + /* Shift offset to first ID word */ + (*list_offset)++; + + /* + * Find the matching SFP ID in the EEPROM + * and program the init sequence + */ + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; + + while (sfp_id != IXGBE_PHY_INIT_END_NL) { + if (sfp_id == sfp_type) { + (*list_offset)++; + if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) + goto err_phy; + if ((!*data_offset) || (*data_offset == 0xFFFF)) { + hw_dbg(hw, "SFP+ module not supported\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + break; + } + } else { + (*list_offset) += 2; + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; + } + } + + if (sfp_id == IXGBE_PHY_INIT_END_NL) { + hw_dbg(hw, "No matching SFP+ module found\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return 0; + +err_phy: + hw_err(hw, "eeprom read at offset %d failed\n", *list_offset); + return IXGBE_ERR_PHY; +} + +/** + * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} + +/** + * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) +{ + return hw->phy.ops.write_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_is_sfp_probe - Returns true if SFP is being detected + * @hw: pointer to hardware structure + * @offset: eeprom offset to be read + * @addr: I2C address to be read + */ +static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) +{ + if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && + offset == IXGBE_SFF_IDENTIFIER && + hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return true; + return false; +} + +/** + * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) +{ + s32 status; + u32 max_retry = 10; + u32 retry = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + bool nack = true; + + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; + if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) + max_retry = IXGBE_SFP_DETECT_RETRIES; + + *data = 0; + + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + ixgbe_i2c_start(hw); + + /* Device Address and write indication */ + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + ixgbe_i2c_start(hw); + + /* Device Address and read indication */ + status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_in_i2c_byte(hw, data); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_bit(hw, nack); + if (status != 0) + goto fail; + + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) { + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(100); + } + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte read error - Retrying.\n"); + else + hw_dbg(hw, "I2C byte read error.\n"); + + } while (retry < max_retry); + + return status; +} + +/** + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** + * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * @lock: true if to take and release semaphore + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data, bool lock) +{ + s32 status; + u32 max_retry = 1; + u32 retry = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + do { + ixgbe_i2c_start(hw); + + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, data); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte write error - Retrying.\n"); + else + hw_dbg(hw, "I2C byte write error.\n"); + } while (retry < max_retry); + + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + return status; +} + +/** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + */ +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** + * ixgbe_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. + **/ +static void ixgbe_i2c_start(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + + i2cctl |= IXGBE_I2C_BB_EN(hw); + + /* Start condition must begin with data and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 1); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + udelay(IXGBE_I2C_T_SU_STA); + + ixgbe_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + udelay(IXGBE_I2C_T_HD_STA); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + +} + +/** + * ixgbe_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. + **/ +static void ixgbe_i2c_stop(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); + u32 bb_en_bit = IXGBE_I2C_BB_EN(hw); + + /* Stop condition must begin with data low and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 0); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + udelay(IXGBE_I2C_T_SU_STO); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + udelay(IXGBE_I2C_T_BUF); + + if (bb_en_bit || data_oe_bit || clk_oe_bit) { + i2cctl &= ~bb_en_bit; + i2cctl |= data_oe_bit | clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } +} + +/** + * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +{ + s32 i; + bool bit = false; + + *data = 0; + for (i = 7; i >= 0; i--) { + ixgbe_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return 0; +} + +/** + * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +{ + s32 status; + s32 i; + u32 i2cctl; + bool bit = false; + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = ixgbe_clock_out_i2c_bit(hw, bit); + + if (status != 0) + break; + } + + /* Release SDA line (set high) */ + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + + return status; +} + +/** + * ixgbe_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + s32 status = 0; + u32 i = 0; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 timeout = 10; + bool ack = true; + + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + ack = ixgbe_get_i2c_data(hw, &i2cctl); + + udelay(1); + if (ack == 0) + break; + } + + if (ack == 1) { + hw_dbg(hw, "I2C ack was not received.\n"); + status = IXGBE_ERR_I2C; + } + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + + return status; +} + +/** + * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + *data = ixgbe_get_i2c_data(hw, &i2cctl); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + + return 0; +} + +/** + * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + + status = ixgbe_set_i2c_data(hw, &i2cctl, data); + if (status == 0) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + udelay(IXGBE_I2C_T_LOW); + } else { + hw_dbg(hw, "I2C data was not set to %X\n", data); + return IXGBE_ERR_I2C; + } + + return 0; +} +/** + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. + **/ +static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); + u32 i = 0; + u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; + u32 i2cctl_r = 0; + + if (clk_oe_bit) { + *i2cctl |= clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + } + + for (i = 0; i < timeout; i++) { + *i2cctl |= IXGBE_I2C_CLK_OUT(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + /* SCL rise time (1000ns) */ + udelay(IXGBE_I2C_T_RISE); + + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN(hw)) + break; + } +} + +/** + * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. + **/ +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + + *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw); + *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + udelay(IXGBE_I2C_T_FALL); +} + +/** + * ixgbe_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. + **/ +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + + if (data) + *i2cctl |= IXGBE_I2C_DATA_OUT(hw); + else + *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw); + *i2cctl &= ~data_oe_bit; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + + if (!data) /* Can't verify data in this case */ + return 0; + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + + /* Verify data was set correctly */ + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + if (data != ixgbe_get_i2c_data(hw, i2cctl)) { + hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); + return IXGBE_ERR_I2C; + } + + return 0; +} + +/** + * ixgbe_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. + **/ +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + udelay(IXGBE_I2C_T_FALL); + } + + if (*i2cctl & IXGBE_I2C_DATA_IN(hw)) + return true; + return false; +} + +/** + * ixgbe_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +{ + u32 i2cctl; + u32 i; + + ixgbe_i2c_start(hw); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + udelay(IXGBE_I2C_T_LOW); + } + + ixgbe_i2c_start(hw); + + /* Put the i2c bus back to default state */ + ixgbe_i2c_stop(hw); +} + +/** + * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +{ + u16 phy_data = 0; + + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) + return 0; + + /* Check that the LASI temp alarm status was triggered */ + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, + MDIO_MMD_PMAPMD, &phy_data); + + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) + return 0; + + return IXGBE_ERR_OVERTEMP; +} + +/** ixgbe_set_copper_phy_power - Control power for copper phy + * @hw: pointer to hardware structure + * @on: true for on, false for off + **/ +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) +{ + u32 status; + u16 reg; + + /* Bail if we don't have copper phy */ + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return 0; + + if (!on && ixgbe_mng_present(hw)) + return 0; + + status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®); + if (status) + return status; + + if (on) { + reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } else { + if (ixgbe_check_reset_blocked(hw)) + return 0; + reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } + + status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg); + return status; +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h new file mode 100644 index 000000000000..e9f94ee42c9f --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h @@ -0,0 +1,205 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 + +/* EEPROM byte offsets */ +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 +#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define IXGBE_SFF_QSFP_CONNECTOR 0x82 +#define IXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define IXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_GLOBAL_ID_MSB 1 +#define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F +#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ +#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ +#define IXGBE_CS4227_RESET_PENDING 0x1357 +#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 +#define IXGBE_CS4227_RETRIES 15 +#define IXGBE_CS4227_EFUSE_STATUS 0x0181 +#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to set speed */ +#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to set EDC */ +#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to set speed */ +#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define IXGBE_CS4227_EEPROM_STATUS 0x5001 +#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 +#define IXGBE_CS4227_SPEED_1G 0x8000 +#define IXGBE_CS4227_SPEED_10G 0 +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 +#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 +#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define IXGBE_PE 0xE0 /* Port expander addr */ +#define IXGBE_PE_OUTPUT 1 /* Output reg offset */ +#define IXGBE_PE_CONFIG 3 /* Config reg offset */ +#define IXGBE_PE_BIT1 BIT(1) + +/* Flow control defines */ +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define IXGBE_I2C_T_HD_STA 4 +#define IXGBE_I2C_T_LOW 5 +#define IXGBE_I2C_T_HIGH 4 +#define IXGBE_I2C_T_SU_STA 5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE 1 +#define IXGBE_I2C_T_FALL 1 +#define IXGBE_I2C_T_SU_STO 4 +#define IXGBE_I2C_T_BUF 5 + +#define IXGBE_SFP_DETECT_RETRIES 2 + +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance code */ +#define IXGBE_SFF_SFF_8472_UNSUP 0x00 + +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +#if 1 //by hilbert +s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +#endif +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); + +/* PHY specific */ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); + +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset); +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *val, bool lock); +s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, + u16 val, bool lock); +#endif /* _IXGBE_PHY_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c new file mode 100644 index 000000000000..a92277683a64 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c @@ -0,0 +1,1343 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ +#include "ixgbe.h" +#include +#include + +/* + * The 82599 and the X540 do not have true 64bit nanosecond scale + * counter registers. Instead, SYSTIME is defined by a fixed point + * system which allows the user to define the scale counter increment + * value at every level change of the oscillator driving the SYSTIME + * value. For both devices the TIMINCA:IV field defines this + * increment. On the X540 device, 31 bits are provided. However on the + * 82599 only provides 24 bits. The time unit is determined by the + * clock frequency of the oscillator in combination with the TIMINCA + * register. When these devices link at 10Gb the oscillator has a + * period of 6.4ns. In order to convert the scale counter into + * nanoseconds the cyclecounter and timecounter structures are + * used. The SYSTIME registers need to be converted to ns values by use + * of only a right shift (division by power of 2). The following math + * determines the largest incvalue that will fit into the available + * bits in the TIMINCA register. + * + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator + * round(): discard the fractional portion of the calculation + * + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * + * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns + * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns + * + * The period also changes based on the link speed: + * At 10Gb link or no link, the period remains the same. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * These diagrams are only for the 10Gb link period + * + * SYSTIMEH SYSTIMEL + * +--------------+ +--------------+ + * X540 | 32 | | 1 | 3 | 28 | + * *--------------+ +--------------+ + * \________ 36 bits ______/ fract + * + * +--------------+ +--------------+ + * 82599 | 32 | | 8 | 3 | 21 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 36 bit X540 SYSTIME overflows every + * 2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds + * + * The 43 bit 82599 SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define IXGBE_INCVAL_10GB 0x66666666 +#define IXGBE_INCVAL_1GB 0x40000000 +#define IXGBE_INCVAL_100 0x50000000 + +#define IXGBE_INCVAL_SHIFT_10GB 28 +#define IXGBE_INCVAL_SHIFT_1GB 24 +#define IXGBE_INCVAL_SHIFT_100 21 + +#define IXGBE_INCVAL_SHIFT_82599 7 +#define IXGBE_INCPER_SHIFT_82599 24 + +#define IXGBE_OVERFLOW_PERIOD (HZ * 30) +#define IXGBE_PTP_TX_TIMEOUT (HZ * 15) + +/* half of a one second clock period, for use with PPS signal. We have to use + * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in + * order to force at least 64bits of precision for shifting + */ +#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL + +/* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL + * which contain measurements of seconds and nanoseconds respectively. This + * matches the standard linux representation of time in the kernel. In addition, + * the X550 also has a SYSTIMER register which represents residue, or + * subnanosecond overflow adjustments. To control clock adjustment, the TIMINCA + * register is used, but it is unlike the X540 and 82599 devices. TIMINCA + * represents units of 2^-32 nanoseconds, and uses 31 bits for this, with the + * high bit representing whether the adjustent is positive or negative. Every + * clock cycle, the X550 will add 12.5 ns + TIMINCA which can result in a range + * of 12 to 13 nanoseconds adjustment. Unlike the 82599 and X540 devices, the + * X550's clock for purposes of SYSTIME generation is constant and not dependent + * on the link speed. + * + * SYSTIMEH SYSTIMEL SYSTIMER + * +--------------+ +--------------+ +-------------+ + * X550 | 32 | | 32 | | 32 | + * *--------------+ +--------------+ +-------------+ + * \____seconds___/ \_nanoseconds_/ \__2^-32 ns__/ + * + * This results in a full 96 bits to represent the clock, with 32 bits for + * seconds, 32 bits for nanoseconds (largest value is 0d999999999 or just under + * 1 second) and an additional 32 bits to measure sub nanosecond adjustments for + * underflow of adjustments. + * + * The 32 bits of seconds for the X550 overflows every + * 2^32 / ( 365.25 * 24 * 60 * 60 ) = ~136 years. + * + * In order to adjust the clock frequency for the X550, the TIMINCA register is + * provided. This register represents a + or minus nearly 0.5 ns adjustment to + * the base frequency. It is measured in 2^-32 ns units, with the high bit being + * the sign bit. This register enables software to calculate frequency + * adjustments and apply them directly to the clock rate. + * + * The math for converting ppb into TIMINCA values is fairly straightforward. + * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL + * + * This assumes that ppb is never high enough to create a value bigger than + * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this + * value is also simple. + * Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL + * + * For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is + * 12.5 nanoseconds. This means that the Max ppb is 39999999 + * Note: We subtract one in order to ensure no overflow, because the TIMINCA + * register can only hold slightly under 0.5 nanoseconds. + * + * Because TIMINCA is measured in 2^-32 ns units, we have to convert 12.5 ns + * into 2^-32 units, which is + * + * 12.5 * 2^32 = C80000000 + * + * Some revisions of hardware have a faster base frequency than the registers + * were defined for. To fix this, we use a timecounter structure with the + * proper mult and shift to convert the cycles into nanoseconds of time. + */ +#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL +#define INCVALUE_MASK 0x7FFFFFFF +#define ISGN 0x80000000 +#define MAX_TIMADJ 0x7FFFFFFF + +/** + * ixgbe_ptp_setup_sdp_x540 + * @hw: the hardware private structure + * + * this function enables or disables the clock out feature on SDP0 for + * the X540 device. It will create a 1second periodic output that can + * be used as the PPS (via an interrupt). + * + * It calculates when the systime will be on an exact second, and then + * aligns the start of the PPS signal to that value. The shift is + * necessary because it can change based on the link speed. + */ +static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int shift = adapter->hw_cc.shift; + u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; + u64 ns = 0, clock_edge = 0; + + /* disable the pin first */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + IXGBE_WRITE_FLUSH(hw); + + if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) + return; + + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* enable the SDP0 pin as output, and connected to the + * native function for Timesync (ClockOut) + */ + esdp |= IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE; + + /* enable the Clock Out feature on SDP0, and allow + * interrupts to occur when the pin changes + */ + tsauxc = IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT; + + /* clock period (or pulse length) */ + clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); + clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); + + /* Account for the cyclecounter wrap-around value by + * using the converted ns value of the current time to + * check for when the next aligned second would occur. + */ + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge); + + div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); + clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); + + /* specify the initial clock start time */ + trgttiml = (u32)clock_edge; + trgttimh = (u32)(clock_edge >> 32); + + IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ptp_read_X550 - read cycle counter value + * @hw_cc: cyclecounter structure + * + * This function reads SYSTIME registers. It is called by the cyclecounter + * structure to convert from internal representation into nanoseconds. We need + * this for X550 since some skews do not have expected clock frequency and + * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of + * "cycles", rather than seconds and nanoseconds. + */ +static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) +{ + struct ixgbe_adapter *adapter = + container_of(hw_cc, struct ixgbe_adapter, hw_cc); + struct ixgbe_hw *hw = &adapter->hw; + struct timespec64 ts; + + /* storage is 32 bits of 'billions of cycles' and 32 bits of 'cycles'. + * Some revisions of hardware run at a higher frequency and so the + * cycles are not guaranteed to be nanoseconds. The timespec64 created + * here is used for its math/conversions but does not necessarily + * represent nominal time. + * + * It should be noted that this cyclecounter will overflow at a + * non-bitmask field since we have to convert our billions of cycles + * into an actual cycles count. This results in some possible weird + * situations at high cycle counter stamps. However given that 32 bits + * of "seconds" is ~138 years this isn't a problem. Even at the + * increased frequency of some revisions, this is still ~103 years. + * Since the SYSTIME values start at 0 and we never write them, it is + * highly unlikely for the cyclecounter to overflow in practice. + */ + IXGBE_READ_REG(hw, IXGBE_SYSTIMR); + ts.tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); + ts.tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); + + return (u64)timespec64_to_ns(&ts); +} + +/** + * ixgbe_ptp_read_82599 - read raw cycle counter (to be used by time counter) + * @cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static cycle_t ixgbe_ptp_read_82599(const struct cyclecounter *cc) +{ + struct ixgbe_adapter *adapter = + container_of(cc, struct ixgbe_adapter, hw_cc); + struct ixgbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + + return stamp; +} + +/** + * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + struct timespec64 systime; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + switch (adapter->hw.mac.type) { + /* X550 and later hardware supposedly represent time using a seconds + * and nanoseconds counter, instead of raw 64bits nanoseconds. We need + * to convert the timestamp into cycles before it can be fed to the + * cyclecounter. We need an actual cyclecounter because some revisions + * of hardware run at a higher frequency and thus the counter does + * not represent seconds/nanoseconds. Instead it can be thought of as + * cycles and billions of cycles. + */ + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + /* Upper 32 bits represent billions of cycles, lower 32 bits + * represent cycles. However, we use timespec64_to_ns for the + * correct math even though the units haven't been corrected + * yet. + */ + systime.tv_sec = timestamp >> 32; + systime.tv_nsec = timestamp & 0xFFFFFFFF; + + timestamp = timespec64_to_ns(&systime); + break; + default: + break; + } + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * ixgbe_ptp_adjfreq_82599 + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; + u64 freq, incval; + u32 diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); + incval = ACCESS_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + + switch (hw->mac.type) { + case ixgbe_mac_X540: + if (incval > 0xFFFFFFFFULL) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval); + break; + case ixgbe_mac_82599EB: + if (incval > 0x00FFFFFFULL) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + BIT(IXGBE_INCPER_SHIFT_82599) | + ((u32)incval & 0x00FFFFFFUL)); + break; + default: + break; + } + + return 0; +} + +/** + * ixgbe_ptp_adjfreq_X550 + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the SYSTIME registers by the indicated ppb from base + * frequency + */ +static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; + int neg_adj = 0; + u64 rate = IXGBE_X550_BASE_PERIOD; + u32 inca; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate *= ppb; + rate = div_u64(rate, 1000000000ULL); + + /* warn if rate is too large */ + if (rate >= INCVALUE_MASK) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, inca); + + return 0; +} + +/** + * ixgbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by + * + * adjust the timer by resetting the timecounter structure. + */ +static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + return 0; +} + +/** + * ixgbe_ptp_gettime + * @ptp: the ptp clock structure + * @ts: timespec structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * ixgbe_ptp_settime + * @ptp: the ptp clock structure + * @ts: the timespec containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + unsigned long flags; + u64 ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + return 0; +} + +/** + * ixgbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + + /** + * When PPS is enabled, unmask the interrupt for the ClockOut + * feature, so that the interrupt handler can send the PPS + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ + if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) + return -ENOTSUPP; + + if (on) + adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + + adapter->ptp_setup_sdp(adapter); + return 0; +} + +/** + * ixgbe_ptp_check_pps_event + * @adapter: the private adapter structure + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ptp_clock_event event; + + event.type = PTP_CLOCK_PPS; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + + switch (hw->mac.type) { + case ixgbe_mac_X540: + ptp_clock_event(adapter->ptp_clock, &event); + break; + default: + break; + } +} + +/** + * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute. + */ +void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + IXGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + struct ixgbe_ring *rx_ring; + unsigned long rx_event; + int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + + adapter->rx_hwtstamp_cleared++; + e_warn(drv, "clearing RX Timestamp hang\n"); + } +} + +/** + * ixgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_READ_REG(hw, IXGBE_TXSTMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0; + + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; + + ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + ixgbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ixgbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necessary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, + ptp_tx_work); + struct ixgbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IXGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + ixgbe_ptp_clear_tx_timestamp(adapter); + return; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) { + ixgbe_ptp_tx_hwtstamp(adapter); + return; + } + + if (timeout) { + ixgbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang\n"); + } else { + /* reschedule to keep checking if it's not available yet */ + schedule_work(&adapter->ptp_tx_work); + } +} + +/** + * ixgbe_ptp_rx_pktstamp - utility function to get RX time stamp from buffer + * @q_vector: structure containing interrupt and ring information + * @skb: the packet + * + * This function will be called by the Rx routine of the timestamp for this + * packet is stored in the buffer. The value is stored in little endian format + * starting at the end of the packet data. + */ +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + __le64 regval; + + /* copy the bits out of the skb, and then trim the skb length */ + skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, ®val, + IXGBE_TS_HDR_LEN); + __pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN); + + /* The timestamp is recorded in little endian format, and is stored at + * the end of the packet. + * + * DWORD: N N + 1 N + 2 + * Field: End of Packet SYSTIMH SYSTIML + */ + ixgbe_ptp_convert_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), + le64_to_cpu(regval)); +} + +/** + * ixgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + u64 regval = 0; + u32 tsyncrxctl; + + /* we cannot process timestamps on a ring without a q_vector */ + if (!q_vector || !q_vector->adapter) + return; + + adapter = q_vector->adapter; + hw = &adapter->hw; + + /* Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + + tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) + return; + + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; + + ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private ixgbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + /* The X550 controller is capable of timestamping all packets, + * which allows it to accept any filter. + */ + if (hw->mac.type >= ixgbe_mac_X550) { + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + break; + } + /* fall through */ + default: + /* + * register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages and hardware does not support + * timestamping all packets => return error + */ + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == ixgbe_mac_82598EB) { + adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + if (tsync_rx_ctl | tsync_tx_ctl) + return -ERANGE; + return 0; + } + + /* Per-packet timestamping only works if the filter is set to all + * packets. Since this is desired, always timestamp all packets as long + * as any Rx filter was configured. + */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + /* enable timestamping all packets only if at least some + * packets were requested. Otherwise, play nice and disable + * timestamping + */ + if (config->rx_filter == HWTSTAMP_FILTER_NONE) + break; + + tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED | + IXGBE_TSYNCRXCTL_TYPE_ALL | + IXGBE_TSYNCRXCTL_TSIP_UT_EN; + config->rx_filter = HWTSTAMP_FILTER_ALL; + adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + is_l2 = true; + break; + default: + break; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), + (IXGBE_ETQF_FILTER_EN | /* enable filter */ + IXGBE_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); + + /* enable/disable TX */ + regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + regval &= ~IXGBE_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl); + + IXGBE_WRITE_FLUSH(hw); + + /* clear TX/RX time stamp registers, just to be sure */ + ixgbe_ptp_clear_tx_timestamp(adapter); + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + + return 0; +} + +/** + * ixgbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = ixgbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, + u32 *shift, u32 *incval) +{ + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + *shift = IXGBE_INCVAL_SHIFT_100; + *incval = IXGBE_INCVAL_100; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + *shift = IXGBE_INCVAL_SHIFT_1GB; + *incval = IXGBE_INCVAL_1GB; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + default: + *shift = IXGBE_INCVAL_SHIFT_10GB; + *incval = IXGBE_INCVAL_10GB; + break; + } +} + +/** + * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct cyclecounter cc; + unsigned long flags; + u32 incval = 0; + u32 tsauxc = 0; + u32 fuse0 = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + switch (hw->mac.type) { + case ixgbe_mac_X550EM_x: + /* SYSTIME assumes X550EM_x board frequency is 300Mhz, and is + * designed to represent seconds and nanoseconds when this is + * the case. However, some revisions of hardware have a 400Mhz + * clock and we have to compensate for this frequency + * variation using corrected mult and shift values. + */ + fuse0 = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)); + if (!(fuse0 & IXGBE_FUSES0_300MHZ)) { + cc.mult = 3; + cc.shift = 2; + } + /* fallthrough */ + case ixgbe_mac_x550em_a: + case ixgbe_mac_X550: + cc.read = ixgbe_ptp_read_X550; + + /* enable SYSTIME counter */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, + tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); + IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); + + IXGBE_WRITE_FLUSH(hw); + break; + case ixgbe_mac_X540: + cc.read = ixgbe_ptp_read_82599; + + ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + cc.read = ixgbe_ptp_read_82599; + + ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + incval >>= IXGBE_INCVAL_SHIFT_82599; + cc.shift -= IXGBE_INCVAL_SHIFT_82599; + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + BIT(IXGBE_INCPER_SHIFT_82599) | incval); + break; + default: + /* other devices aren't supported */ + return; + } + + /* update the base incval used to calculate frequency adjustment */ + ACCESS_ONCE(adapter->base_incval) = incval; + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ixgbe_ptp_reset + * @adapter: the ixgbe private board structure + * + * When the MAC resets, all the hardware bits for timesync are reset. This + * function is used to re-enable the device for PTP based on current settings. + * We do lose the current clock time, so just reset the cyclecounter to the + * system real clock time. + * + * This function will maintain hwtstamp_config settings, and resets the SDP + * output if it was enabled. + */ +void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the hardware timestamping mode */ + ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + /* 82598 does not support PTP */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + ixgbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + adapter->last_overflow_check = jiffies; + + /* Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); +} + +/** + * ixgbe_ptp_create_clock + * @adapter: the ixgbe private adapter structure + * + * This function performs setup of the user entry point function table and + * initializes the PTP clock device, which is used to access the clock-like + * features of the PTP core. It will be called by ixgbe_ptp_init, and may + * reuse a previously initialized clock (such as during a suspend/resume + * cycle). + */ +static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + snprintf(adapter->ptp_caps.name, + sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540; + break; + case ixgbe_mac_82599EB: + snprintf(adapter->ptp_caps.name, + sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 30000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = NULL; + break; + default: + adapter->ptp_clock = NULL; + adapter->ptp_setup_sdp = NULL; + return -EOPNOTSUPP; + } + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; + } else if (adapter->ptp_clock) + e_dev_info("registered PHC device on %s\n", netdev->name); + + /* set default timestamp mode to disabled here. We do this in + * create_clock instead of init, because we don't want to override the + * previous settings during a resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * ixgbe_ptp_init + * @adapter: the ixgbe private adapter structure + * + * This function performs the required steps for enabling PTP + * support. If PTP support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void ixgbe_ptp_init(struct ixgbe_adapter *adapter) +{ + /* initialize the spin lock first since we can't control when a user + * will call the entry functions once we have initialized the clock + * device + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a PTP device, or re-use an existing device */ + if (ixgbe_ptp_create_clock(adapter)) + return; + + /* we have a clock so we can initialize work now */ + INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work); + + /* reset the PTP related hardware bits */ + ixgbe_ptp_reset(adapter); + + /* enter the IXGBE_PTP_RUNNING state */ + set_bit(__IXGBE_PTP_RUNNING, &adapter->state); + + return; +} + +/** + * ixgbe_ptp_suspend - stop PTP work items + * @ adapter: pointer to adapter struct + * + * this function suspends PTP activity, and prevents more PTP work from being + * generated, but does not destroy the PTP clock device. + */ +void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) +{ + /* Leave the IXGBE_PTP_RUNNING state. */ + if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + /* ensure that we cancel any pending PTP Tx work item in progress */ + cancel_work_sync(&adapter->ptp_tx_work); + ixgbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ixgbe_ptp_stop - close the PTP device + * @adapter: pointer to adapter struct + * + * completely destroy the PTP device, should only be called when the device is + * being fully closed. + */ +void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) +{ + /* first, suspend PTP activity */ + ixgbe_ptp_suspend(adapter); + + /* disable the PTP clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c new file mode 100644 index 000000000000..7e5d9850e4b2 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c @@ -0,0 +1,1608 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef NETIF_F_HW_VLAN_CTAG_TX +#include +#endif + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_sriov.h" + +#ifdef CONFIG_PCI_IOV +static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; + e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + + num_vf_macvlans = hw->mac.num_rar_entries - + (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + + adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* Initialize default switching mode VEB */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + adapter->bridge_mode = BRIDGE_MODE_VEB; + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = + kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (adapter->vfinfo) { + /* limit trafffic classes based on VFs enabled */ + if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && + (adapter->num_vfs < 16)) { + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + } else if (adapter->num_vfs < 32) { + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + } else { + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + } + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | + IXGBE_FLAG2_RSC_ENABLED); + + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; + } + + return 0; + } + + return -ENOMEM; +} + +/** + * ixgbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + pci_dev_get(vfdev); + adapter->vfinfo[vf].vfdev = vfdev; + ++vf; + } +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); + } else { + int err; + /* + * The 82599 supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater than + * 63 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } + } + + if (!__ixgbe_enable_sriov(adapter)) { + ixgbe_get_vfs(adapter); + return; + } + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); + ixgbe_disable_sriov(adapter); +} + +#endif /* #ifdef CONFIG_PCI_IOV */ +int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + struct ixgbe_hw *hw = &adapter->hw; + u32 gpie; + u32 vmdctl; + int rss; + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + adapter->vfinfo[vf].vfdev = NULL; + pci_dev_put(vfdev); + } + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* turn off device IOV mode */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* set default pool back to 0 */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + IXGBE_WRITE_FLUSH(hw); + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); + } else { + rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); + } + + adapter->ring_feature[RING_F_VMDQ].offset = 0; + adapter->ring_feature[RING_F_RSS].limit = rss; + + /* take a breather then clean up driver data */ + msleep(100); + return 0; +} + +static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = ixgbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return num_vfs; + + if (err) + return err; + + /* While the SR-IOV capability structure reports total VFs to be 64, + * we have to limit the actual number allocated based on two factors. + * First, we reserve some transmit/receive resources for the PF. + * Second, VMDQ also uses the same pools that SR-IOV does. We need to + * account for this, so that we don't accidentally allocate more VFs + * than we have available pools. The PCI bus driver already checks for + * other values out of range. + */ + if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS) + return -EPERM; + + adapter->num_vfs = num_vfs; + + err = __ixgbe_enable_sriov(adapter); + if (err) + return err; + + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(dev, (i | 0x10000000)); + + /* reset before enabling SRIOV to avoid mailbox issues */ + ixgbe_sriov_reinit(adapter); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + return err; + } + ixgbe_get_vfs(adapter); + + return num_vfs; +#else + return 0; +#endif +} + +static int ixgbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err; +#ifdef CONFIG_PCI_IOV + u32 current_flags = adapter->flags; +#endif + + err = ixgbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ +#ifdef CONFIG_PCI_IOV + if (!err && current_flags != adapter->flags) + ixgbe_sriov_reinit(adapter); +#endif + + return err; +} + +int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + if (num_vfs == 0) + return ixgbe_pci_sriov_disable(dev); + else + return ixgbe_pci_sriov_enable(dev, num_vfs); +} + +static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) + >> IXGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + /* only so many hash values supported */ + entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); + + /* + * salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* + * VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) { + vfinfo->vf_mc_hashes[i] = hash_list[i]; + } + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= BIT(vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + } + vmolr |= IXGBE_VMOLR_ROMPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + int i, j; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= BIT(vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + } + + if (vfinfo->num_vf_mc_hashes) + vmolr |= IXGBE_VMOLR_ROMPE; + else + vmolr &= ~IXGBE_VMOLR_ROMPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); + } + + /* Restore any VF macvlans */ + ixgbe_full_sync_mac_table(adapter); +} +#endif + +static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, + u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); + if (err) + return err; + } + + err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); + + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + ixgbe_update_pf_promisc_vlvf(adapter, vid); + + return err; +} + +static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = msgbuf[1]; + u32 max_frs; + + /* + * For 82599EB we have to keep all PFs and VFs operating with + * the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 reg_offset, vf_shift, vfre; + s32 err = 0; + +#ifdef CONFIG_FCOE + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); + +#endif /* CONFIG_FCOE */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + /* + * Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; + default: + /* + * If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ + if ((pf_max_frame > ETH_FRAME_LEN) || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + err = -EINVAL; + break; + } + + /* determine VF receive enable location */ + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable or disable receive depending on error */ + vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + if (err) + vfre &= ~BIT(vf_shift); + else + vfre |= BIT(vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); + + if (err) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return err; + } + } + + /* MTU < 68 is an error and causes problems on some kernels */ + if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return -EINVAL; + } + + /* pull current max frame size from hardware */ + max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + max_frs &= IXGBE_MHADD_MFS_MASK; + max_frs >>= IXGBE_MHADD_MFS_SHIFT; + + if (max_frs < max_frame) { + max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); + } + + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; +} + +static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) +{ + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr |= IXGBE_VMOLR_BAM; + if (aupe) + vmolr |= IXGBE_VMOLR_AUPE; + else + vmolr &= ~IXGBE_VMOLR_AUPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); +} + +static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); +} + +static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlvfb_mask, pool_mask, i; + + /* create mask for VF and other pools */ + pool_mask = ~BIT(VMDQ_P(0) % 32); + vlvfb_mask = BIT(vf % 32); + + /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ + for (i = IXGBE_VLVF_ENTRIES; i--;) { + u32 bits[2], vlvfb, vid, vfta, vlvf; + u32 word = i * 2 + vf / 32; + u32 mask; + + vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); + + /* if our bit isn't set we can skip it */ + if (!(vlvfb & vlvfb_mask)) + continue; + + /* clear our bit from vlvfb */ + vlvfb ^= vlvfb_mask; + + /* create 64b mask to chedk to see if we should clear VLVF */ + bits[word % 2] = vlvfb; + bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); + + /* if other pools are present, just remove ourselves */ + if (bits[(VMDQ_P(0) / 32) ^ 1] || + (bits[VMDQ_P(0) / 32] & pool_mask)) + goto update_vlvfb; + + /* if PF is present, leave VFTA */ + if (bits[0] || bits[1]) + goto update_vlvf; + + /* if we cannot determine VLAN just remove ourselves */ + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); + if (!vlvf) + goto update_vlvfb; + + vid = vlvf & VLAN_VID_MASK; + mask = BIT(vid % 32); + + /* clear bit from VFTA */ + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); + if (vfta & mask) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); +update_vlvf: + /* clear POOL selection enable */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); + + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + vlvfb = 0; +update_vlvfb: + /* clear pool bits */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); + } +} + +static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* remove VLAN filters beloning to this VF */ + ixgbe_clear_vf_vlans(adapter, vf); + + /* add back PF assigned VLAN or VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ixgbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ixgbe_set_rx_mode(adapter->netdev); + + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; +} + +static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + return 0; +} + +static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, + int vf, int index, unsigned char *mac_addr) +{ + struct list_head *pos; + struct vf_macvlans *entry; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + ixgbe_del_mac_filter(adapter, + entry->vf_macvlan, vf); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + + ixgbe_add_mac_filter(adapter, mac_addr, vf); + + return 0; +} + +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) + eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); + + return 0; +} + +static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + int i; + + for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { + u32 reg; + + /* flush previous write */ + IXGBE_WRITE_FLUSH(hw); + + /* indicate to hardware that we want to set drop enable */ + reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; + reg |= i << IXGBE_QDE_IDX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); + } +} + +static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg, reg_offset, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + int i; + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + + /* reset the filters for the device */ + ixgbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + ixgbe_set_vf_mac(adapter, vf, vf_mac); + + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable transmit for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); + reg |= BIT(vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + + /* force drop enable for all VF Rx queues */ + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); + + /* enable receive for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + reg |= BIT(vf_shift); + /* + * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. + * For more info take a look at ixgbe_set_vf_lpe + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + +#ifdef CONFIG_FCOE + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); + +#endif /* CONFIG_FCOE */ + if (pf_max_frame > ETH_FRAME_LEN) + reg &= ~BIT(vf_shift); + } + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* Enable counting of spoofed packets in the SSVPC register */ + reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); + reg |= BIT(vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); + + /* + * Reset the VFs TDWBAL and TDWBAH registers + * which are not cleared by an FLR + */ + for (i = 0; i < q_per_pool; i++) { + IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); + IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); + } + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = IXGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + dev_warn(&adapter->pdev->dev, + "VF %d has no MAC address assigned, you may have to assign one manually\n", + vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { + e_warn(drv, + "VF %d attempted to override administratively set MAC address\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return ixgbe_set_vf_vlan(adapter, add, vid, vf); +} + +static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) { + struct ixgbe_hw *hw = &adapter->hw; + + hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + } + } + + err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no space for it\n", + vf); + + return err < 0; +} + +static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case ixgbe_mbox_api_10: + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_20: + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + /* if TCs > 1 determine which TC belongs to default user priority */ + if (num_tcs > 1) + default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[IXGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[IXGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u32 i, j; + u32 *out_buf = &msgbuf[1]; + const u8 *reta = adapter->rss_indir_tbl; + u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + /* Check if operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + /* This mailbox command is supported (required) only for 82599 and x540 + * VFs which support up to 4 RSS queues. Therefore we will compress the + * RETA by saving only 2 bits from each entry. This way we will be able + * to transfer the whole RETA in a single mailbox operation. + */ + for (i = 0; i < reta_size / 16; i++) { + out_buf[i] = 0; + for (j = 0; j < 16; j++) + out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); + } + + return 0; +} + +static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *rss_key = &msgbuf[1]; + + /* Check if the operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); + + return 0; +} + +static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = IXGBEVF_XCAST_MODE_MULTI; + } + + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case IXGBEVF_XCAST_MODE_NONE: + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + enable = 0; + break; + case IXGBEVF_XCAST_MODE_MULTI: + disable = IXGBE_VMOLR_MPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; + break; + case IXGBEVF_XCAST_MODE_ALLMULTI: + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr &= ~disable; + vmolr |= enable; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) +{ + u32 mbx_size = IXGBE_VFMAILBOX_SIZE; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + struct ixgbe_hw *hw = &adapter->hw; + s32 retval; + + retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) + return 0; + + /* flush the ack before we write any messages back */ + IXGBE_WRITE_FLUSH(hw); + + if (msgbuf[0] == IXGBE_VF_RESET) + return ixgbe_vf_reset_msg(adapter, vf); + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + ixgbe_write_mbx(hw, msgbuf, 1, vf); + return 0; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case IXGBE_VF_SET_MAC_ADDR: + retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_MULTICAST: + retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_VLAN: + retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_LPE: + retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_MACVLAN: + retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_API_NEGOTIATE: + retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_QUEUES: + retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_RETA: + retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_RSS_KEY: + retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); + break; + case IXGBE_VF_UPDATE_XCAST_MODE: + retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = IXGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; + + ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 msg = IXGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + ixgbe_write_mbx(hw, &msg, 1, vf); +} + +void ixgbe_msg_task(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!ixgbe_check_for_rst(hw, vf)) + ixgbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!ixgbe_check_for_msg(hw, vf)) + ixgbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!ixgbe_check_for_ack(hw, vf)) + ixgbe_rcv_ack_from_vf(adapter, vf); + } +} + +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); + + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); +} + +static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ping; + + ping = IXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, vf); +} + +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = IXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, i); + } +} + +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective."); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + return ixgbe_set_vf_mac(adapter, vf, mac); +} + +static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + + /* Revoke tagless access via VLAN 0 */ + ixgbe_set_vf_vlan(adapter, false, 0, vf); + + ixgbe_set_vmvir(adapter, vlan, qos, vf); + ixgbe_set_vmolr(hw, vf, false); + + /* enable hide vlan on X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | + IXGBE_QDE_HIDE_VLAN); + + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + +out: + return err; +} + +static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + /* Restore tagless access via VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, 0, vf); + ixgbe_clear_vmvir(adapter, vf); + ixgbe_set_vmolr(hw, vf, true); + + /* disable hide VLAN on X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); + + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} + +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + if (vlan || qos) { + /* Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].pf_vlan) + err = ixgbe_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); + } else { + err = ixgbe_disable_port_vlan(adapter, vf); + } + +out: + return err; +} + +int ixgbe_link_mbps(struct ixgbe_adapter *adapter) +{ + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + return 100; + case IXGBE_LINK_SPEED_1GB_FULL: + return 1000; + case IXGBE_LINK_SPEED_10GB_FULL: + return 10000; + default: + return 0; + } +} + +static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = 0; + u16 queue, queues_per_pool; + u16 tx_rate = adapter->vfinfo[vf].tx_rate; + + if (tx_rate) { + /* start with base link speed value */ + bcnrc_val = adapter->vf_rate_link_speed; + + /* Calculate the rate factor values to set */ + bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= tx_rate; + + /* clear everything but the rate factor */ + bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | + IXGBE_RTTBCNRC_RF_DEC_MASK; + + /* enable the rate scheduler */ + bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; + } + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); + break; + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); + break; + default: + break; + } + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + } +} + +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) +{ + int i; + + /* VF Tx rate limit was not set */ + if (!adapter->vf_rate_link_speed) + return; + + if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vf_rate_link_speed) + adapter->vfinfo[i].tx_rate = 0; + + ixgbe_set_vf_rate_limit(adapter, i); + } +} + +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int link_speed; + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 10Gbps */ + link_speed = ixgbe_link_mbps(adapter); + if (link_speed != 10000) + return -EINVAL; + + if (min_tx_rate) + return -EINVAL; + + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) + return -EINVAL; + + /* store values */ + adapter->vf_rate_link_speed = link_speed; + adapter->vfinfo[vf].tx_rate = max_tx_rate; + + /* update hardware configuration */ + ixgbe_set_vf_rate_limit(adapter, vf); + + return 0; +} + +int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + /* configure MAC spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); + + /* configure VLAN spoofing */ + hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETH_P_LLDP)); + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + ETH_P_PAUSE)); + + hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); + } + + return 0; +} + +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* This operation is currently supported only for 82599 and x540 + * devices. + */ + if (adapter->hw.mac.type < ixgbe_mac_82599EB || + adapter->hw.mac.type >= ixgbe_mac_X550) + return -EOPNOTSUPP; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].rss_query_enabled = setting; + + return 0; +} + +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; + ivi->trusted = adapter->vfinfo[vf].trusted; + return 0; +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h new file mode 100644 index 000000000000..0c7977d27b71 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h @@ -0,0 +1,73 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_SRIOV_H_ +#define _IXGBE_SRIOV_H_ + +/* ixgbe driver limit the max number of VFs could be enabled to + * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) + */ +#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) + +#ifdef CONFIG_PCI_IOV +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); +#endif +void ixgbe_msg_task(struct ixgbe_adapter *adapter); +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos, __be16 vlan_proto); +int ixgbe_link_mbps(struct ixgbe_adapter *adapter); +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); +int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting); +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); +#endif +int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); + +static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, + u16 vid, u16 qos, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); +} + +#endif /* _IXGBE_SRIOV_H_ */ + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c new file mode 100644 index 000000000000..ef6df3d6437e --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c @@ -0,0 +1,230 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_type.h" + +#include +#include +#include +#include +#include +#include +#include + +/* hwmon callback functions */ +static ssize_t ixgbe_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + ixgbe_attr->sensor->location); +} + +static ssize_t ixgbe_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw); + + value = ixgbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ixgbe_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ixgbe_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/** + * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @offset: offset in the eeprom sensor data table + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, + unsigned int offset, int type) { + int rc; + unsigned int n_attr; + struct hwmon_attr *ixgbe_attr; + + n_attr = adapter->ixgbe_hwmon_buff->n_hwmon; + ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case IXGBE_HWMON_TYPE_LOC: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_label", offset + 1); + break; + case IXGBE_HWMON_TYPE_TEMP: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_input", offset + 1); + break; + case IXGBE_HWMON_TYPE_CAUTION: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_max", offset + 1); + break; + case IXGBE_HWMON_TYPE_MAX: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_crit", offset + 1); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + ixgbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + ixgbe_attr->hw = &adapter->hw; + ixgbe_attr->dev_attr.store = NULL; + ixgbe_attr->dev_attr.attr.mode = S_IRUGO; + ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; + sysfs_attr_init(&ixgbe_attr->dev_attr.attr); + + adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr; + + ++adapter->ixgbe_hwmon_buff->n_hwmon; + + return 0; +} + +static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) +{ +} + +/* called from ixgbe_main.c */ +void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) +{ + ixgbe_sysfs_del_adapter(adapter); +} + +/* called from ixgbe_main.c */ +int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) +{ + struct hwmon_buff *ixgbe_hwmon; + struct device *hwmon_dev; + unsigned int i; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { + goto exit; + } + + /* Don't create thermal hwmon interface if no sensors present */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) + goto exit; + + ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon), + GFP_KERNEL); + if (ixgbe_hwmon == NULL) { + rc = -ENOMEM; + goto exit; + } + adapter->ixgbe_hwmon_buff = ixgbe_hwmon; + + for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + /* + * Only create hwmon sysfs entries for sensors that have + * meaningful data for. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); + if (rc) + goto exit; + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); + if (rc) + goto exit; + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); + if (rc) + goto exit; + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); + if (rc) + goto exit; + } + + ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group; + ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, + "ixgbe", + ixgbe_hwmon, + ixgbe_hwmon->groups); + if (IS_ERR(hwmon_dev)) + rc = PTR_ERR(hwmon_dev); +exit: + return rc; +} + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h new file mode 100644 index 000000000000..531990b2f2fb --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h @@ -0,0 +1,3820 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +#include +#include +#include + +/* Device IDs */ +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 +#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 +#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 +#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D +#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_LS 0x154F +#define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +#define IXGBE_DEV_ID_X540T1 0x1560 + +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 + +/* VF Device IDs */ +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 + +#define IXGBE_CAT(r, m) IXGBE_##r##_##m + +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, IDX)]) + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 + +#define IXGBE_I2CCTL_8259X 0x00028 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_8259X +#define IXGBE_I2CCTL_X550 0x15F5C +#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL(_hw) IXGBE_BY_MAC((_hw), I2CCTL) + +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 + +/* NVM Registers */ +#define IXGBE_EEC_8259X 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC_8259X +#define IXGBE_EEC_X550 IXGBE_EEC_8259X +#define IXGBE_EEC_X550EM_x IXGBE_EEC_8259X +#define IXGBE_EEC_X550EM_a 0x15FF8 +#define IXGBE_EEC(_hw) IXGBE_BY_MAC((_hw), EEC) +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 +#define IXGBE_FLA_8259X 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA_8259X +#define IXGBE_FLA_X550 IXGBE_FLA_8259X +#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X +#define IXGBE_FLA_X550EM_a 0x15F68 +#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C +#define IXGBE_GRC_8259X 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC_8259X +#define IXGBE_GRC_X550 IXGBE_GRC_8259X +#define IXGBE_GRC_X550EM_x IXGBE_GRC_8259X +#define IXGBE_GRC_X550EM_a 0x15F64 +#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) + +/* General Receive Control */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 + +/* I2CCTL Bit Masks */ +#define IXGBE_I2C_CLK_IN_8259X 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN_8259X +#define IXGBE_I2C_CLK_IN_X550 0x00004000 +#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) + +#define IXGBE_I2C_CLK_OUT_8259X 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT_8259X +#define IXGBE_I2C_CLK_OUT_X550 0x00000200 +#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) + +#define IXGBE_I2C_DATA_IN_8259X 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN_8259X +#define IXGBE_I2C_DATA_IN_X550 0x00001000 +#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) + +#define IXGBE_I2C_DATA_OUT_8259X 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT_8259X +#define IXGBE_I2C_DATA_OUT_X550 0x00000400 +#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) + +#define IXGBE_I2C_DATA_OE_N_EN_8259X 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN_8259X +#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) + +#define IXGBE_I2C_BB_EN_8259X 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN_8259X +#define IXGBE_I2C_BB_EN_X550 0x00000100 +#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) + +#define IXGBE_I2C_CLK_OE_N_EN_8259X 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN_8259X +#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) + +#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A + +#define IXGBE_MAX_SENSORS 3 + +struct ixgbe_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct ixgbe_thermal_sensor_data { + struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; +}; + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* + * 82598 EITR is 16 bits but set the limits based on the max + * supported by all ixgbe hardware. 82599 EITR is only 12 bits, + * with the lower 3 always zero. + */ +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + (((_i) - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + (((_i) - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + (((_i) - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + (((_i) - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + (((_i) - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + (((_i) - 64) * 0x40)))) +#define IXGBE_RDRXCTL 0x02F00 +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) + /* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +/* Multicast Table Array - 128 entries */ +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +/*array of 4096 4-bit vlan vmdq indices */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + +/* Flow Director registers */ +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRSCTPM 0x0EE78 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 + +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 +#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 +#define IXGBE_PFVFSPOOF_REG_COUNT 8 + +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ +/* Tx DCA Control register : 128 of these (0-127) */ +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host + * Filter Table */ + +/* masks for accessing VXLAN and GENEVE UDP ports */ +#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ +#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ + +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 + +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ + +/* Wake Up Filter Control */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ +#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ + +/* Wake Up Status */ +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + +/* DCB registers */ +#define MAX_TRAFFIC_CLASS 8 +#define X540_TRAFFIC_CLASS 4 +#define DEF_TRAFFIC_CLASS 1 +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + +/* Security Control Registers */ +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 + +/* Security Bit Fields and Masks */ +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 + +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 + +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 + +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 + +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ + +/* LinkSec (MacSec) Bit Fields and Masks */ +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 + +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + +/* DCB registers */ +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTQCNCR 0x08B00 +#define IXGBE_RTTQCNTG 0x04A90 +#define IXGBE_RTTBCNRD 0x0498C +#define IXGBE_RTTQCNRR 0x0498C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +#define IXGBE_RTTBCNRM 0x04980 +#define IXGBE_RTTQCNRM 0x04980 + +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) +/* FCoE DMA Context Registers */ +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ +#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) +#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */ +#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 + +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) +/* FCoE Filter Context Registers */ +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ + (0x08600 + ((_i) * 4))) +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_PCRC8ECL 0x0E810 +#define IXGBE_PCRC8ECH 0x0E811 +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 +#define IXGBE_LDPCECH 0x0E821 + +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15014 + +/* Management Bit Fields and Masks */ +#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ + +/* Firmware Semaphore Register */ +#define IXGBE_FWSM_MODE_MASK 0xE +#define IXGBE_FWSM_FW_MODE_PT 0x4 + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_FACTPS_8259X 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550 IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550EM_a 0x15FEC +#define IXGBE_FACTPS(_hw) IXGBE_BY_MAC((_hw), FACTPS) + +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM_8259X 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550 IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550EM_a 0x15F70 +#define IXGBE_SWSM(_hw) IXGBE_BY_MAC((_hw), SWSM) +#define IXGBE_FWSM_8259X 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550 IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550EM_a 0x15F74 +#define IXGBE_FWSM(_hw) IXGBE_BY_MAC((_hw), FWSM) +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 +#define IXGBE_SWFW_SYNC_8259X IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 +#define IXGBE_SWFW_SYNC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) + +/* PCIe registers 82599-specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 + +#define IXGBE_CIAA_8259X 0x11088 +#define IXGBE_CIAA_X540 IXGBE_CIAA_8259X +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 +#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 +#define IXGBE_CIAA(_hw) IXGBE_BY_MAC((_hw), CIAA) + +#define IXGBE_CIAD_8259X 0x1108C +#define IXGBE_CIAD_X540 IXGBE_CIAD_8259X +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 +#define IXGBE_CIAD(_hw) IXGBE_BY_MAC((_hw), CIAD) + +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 + +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ + IXGBE_GCR_EXT_VT_MODE_64) + +/* Time Sync Registers */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDSTATCTL 0x07C20 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 + +/* Statistics Registers */ +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 + +/* Copper Pond 2 link timeout */ +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + +/* Omer CORECTL */ +#define IXGBE_CORECTL 0x014F00 +/* BARCTRL */ +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C + +/* RSCDBU Bit Masks */ +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 + +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad small packet */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 + +/* RQTC Bit Masks and Shifts */ +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) + +/* PSRTYPE.RQPL Bit masks and shift */ +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) + +/* FACTPS */ +#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Omer bit masks */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 + +/* MDIO definitions */ + +#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ +#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ + +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ +#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ +#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ +#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* global fault msg */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ +/* autoneg vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 +#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ +#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /*int dev fault enable */ + +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_AUTONEG_REG 0x0 + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 +#define X550_PHY_ID2 0x01540223 +#define X550_PHY_ID3 0x01540221 +#define X557_PHY_ID 0x01540240 +#define X557_PHY_ID2 0x01540250 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 +#define AQ_FW_REV 0x20 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 + +/* General purpose Interrupt Enable */ +#define IXGBE_SDP0_GPIEN_8259X 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN_8259X 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN_8259X 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) + +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ + +/* Packet Buffer Initialization */ +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_MAX_PB 8 + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* VT_CTL bitmasks */ +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + +/* VMOLR bitmasks */ +#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ +#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ + +/* VFRE bitmask */ +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 + +#define IXGBE_TXDESCIC_READY 0x80000000 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* FCCFG Bit Masks */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ +#define IXGBE_EICR_GPI_SDP0_8259X 0x01000000 /* Gen Purpose INT on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_8259X 0x02000000 /* Gen Purpose INT on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_8259X 0x04000000 /* Gen Purpose INT on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 +#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) + +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EICS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EICS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EIMS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EIMS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EIMC_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EIMC_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* ETYPE Queue Filter/Select Bit Masks */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */ +#define IXGBE_ETQF_POOL_SHIFT 20 + +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ + +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +#define IXGBE_ETQF_FILTER_LLDP 5 +#define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 + +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +/* VLAN pool filtering masks */ +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF + +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK BIT(8) +#define IXGBE_X557_MAX_LED_INDEX 3 +#define IXGBE_X557_LED_PROVISIONING 0xC430 + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 +#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 + +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* Veto Bit definition */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINKS_SPEED_10_X550EM_A 0 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + +/* PCS1GLSTA Bit Masks */ +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 + +/* PCS1GLCTL Bit Masks */ +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 + +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + +/* SW_FW_SYNC/GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF + +/* FW Status register bitmask */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + +/* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_EEPROM_CTRL_4 0x45 +#define IXGBE_EE_CTRL_4_INST_ID 0x10 +#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_FREE_SPACE_PTR 0X3E + +/* External Thermal Sensor Config */ +#define IXGBE_ETS_CFG 0x26 +#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 +#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 +#define IXGBE_ETS_TYPE_MASK 0x0038 +#define IXGBE_ETS_TYPE_SHIFT 3 +#define IXGBE_ETS_TYPE_EMC 0x000 +#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000 +#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 +#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 +#define IXGBE_ETS_DATA_LOC_SHIFT 10 +#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 +#define IXGBE_ETS_DATA_INDEX_SHIFT 8 +#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF + +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_MAX_MSIX_VECTORS_82599 0x40 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 +#define IXGBE_MAX_MSIX_VECTORS_82598 0x13 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ + +#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */ + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif + +#ifndef IXGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef IXGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define IXGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7) +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ + +#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_LINK_SPEED_8000 0x3 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define IXGBE_PCIDEVCTRL2_50_100us 0x1 +#define IXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define IXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define IXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define IXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define IXGBE_PCIDEVCTRL2_1_2s 0xa +#define IXGBE_PCIDEVCTRL2_4_8s 0xd +#define IXGBE_PCIDEVCTRL2_17_34s 0xe + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_RSC_DIS 0x00000020 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ + +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 + +#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ + +#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 +#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ +#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ + +#define IXGBE_TSIM_TXTS 0x00000002 + +#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF +#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 +#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01 +#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02 +#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 +#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 + +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enable */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */ + +#define IXGBE_MFLCN_RPFCE_SHIFT 4 + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 + +#define IXGBE_FWSM_TS_ENABLED 0x1 + +/* Queue Drop Enable */ +#define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 +#define IXGBE_QDE_IDX_MASK 0x00007F00 +#define IXGBE_QDE_IDX_SHIFT 8 +#define IXGBE_QDE_WRITE 0x00010000 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +/* Multiple Transmit Queue Command Register */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ +#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* SR-IOV specific macros */ +#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) +#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) +#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) +/* Translated register #defines */ +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) +#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) + +#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) + +#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) + +enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, +}; +#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 +#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 +#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ +#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT 23 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 + +#define IXGBE_FDIR_DROP_QUEUE 127 + +/* Manageablility Host Interface defines */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_PHY_TOKEN_REQ_CMD 0x0A +#define FW_PHY_TOKEN_REQ_LEN 2 +#define FW_PHY_TOKEN_REQ 0 +#define FW_PHY_TOKEN_REL 1 +#define FW_PHY_TOKEN_OK 1 +#define FW_PHY_TOKEN_RETRY 0x80 +#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ +#define FW_PHY_TOKEN_WAIT 5 /* seconds */ +#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) +#define FW_INT_PHY_REQ_CMD 0xB +#define FW_INT_PHY_REQ_LEN 10 +#define FW_INT_PHY_REQ_READ 0 +#define FW_INT_PHY_REQ_WRITE 1 +#define FW_PHY_ACT_REQ_CMD 5 +#define FW_PHY_ACT_DATA_COUNT 4 +#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) +#define FW_PHY_ACT_INIT_PHY 1 +#define FW_PHY_ACT_SETUP_LINK 2 +#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) +#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) +#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) +#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) +#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) +#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) +#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) +#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) +#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) +#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) +#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 +#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ + HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u +#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) +#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) +#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) +#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) +#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) +#define FW_PHY_ACT_GET_LINK_INFO 3 +#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) +#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) +#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) +#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) +#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) +#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) +#define FW_PHY_ACT_FORCE_LINK_DOWN 4 +#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) +#define FW_PHY_ACT_PHY_SW_RESET 5 +#define FW_PHY_ACT_PHY_HW_RESET 6 +#define FW_PHY_ACT_GET_PHY_INFO 7 +#define FW_PHY_ACT_UD_2 0x1002 +#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) +#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) +#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) +#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) +#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) +#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) +#define FW_PHY_ACT_RETRIES 50 +#define FW_PHY_INFO_SPEED_MASK 0xFFFu +#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u +#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu + +/* Host Interface Command Structures */ +struct ixgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + +struct ixgbe_hic_drv_info { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +struct ixgbe_hic_drv_info2 { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; +}; + +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + __be32 address; + __be16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct ixgbe_hic_phy_token_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + __be16 address; + u16 rsv1; + __be32 write_data; + u16 pad; +} __packed; + +struct ixgbe_hic_internal_phy_resp { + struct ixgbe_hic_hdr hdr; + __be32 read_data; +}; + +struct ixgbe_hic_phy_activity_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad; + __le16 activity_id; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +struct ixgbe_hic_phy_activity_resp { + struct ixgbe_hic_hdr hdr; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE 1588 Time Stamp */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_10_FULL 0x0002 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define IXGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define IXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define IXGBE_CABLE_DC 5556 /* Delay Copper */ +#define IXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) + +/* Calculate Interface Delay 82598, 82599 */ +#define IXGBE_PHY_D 12800 +#define IXGBE_MAC_D 4096 +#define IXGBE_XAUI_D (2 * 1024) + +#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define IXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define IXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID_X540) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate 82599, 82598 delay value in bit times */ +#define IXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define IXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * IXGBE_B2BT(_max_frame_tc) + \ + (36 * IXGBE_PCI_DELAY / 25) + 1) +#define IXGBE_LOW_DV(_max_frame_tc) \ + (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) + +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum ixgbe_atr_flow_type { + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, +}; + +/* Flow Director ATR input struct. */ +union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +#define IXGBE_MVALS_INIT(m) \ + IXGBE_CAT(EEC, m), \ + IXGBE_CAT(FLA, m), \ + IXGBE_CAT(GRC, m), \ + IXGBE_CAT(FACTPS, m), \ + IXGBE_CAT(SWSM, m), \ + IXGBE_CAT(SWFW_SYNC, m), \ + IXGBE_CAT(FWSM, m), \ + IXGBE_CAT(SDP0_GPIEN, m), \ + IXGBE_CAT(SDP1_GPIEN, m), \ + IXGBE_CAT(SDP2_GPIEN, m), \ + IXGBE_CAT(EICR_GPI_SDP0, m), \ + IXGBE_CAT(EICR_GPI_SDP1, m), \ + IXGBE_CAT(EICR_GPI_SDP2, m), \ + IXGBE_CAT(CIAA, m), \ + IXGBE_CAT(CIAD, m), \ + IXGBE_CAT(I2C_CLK_IN, m), \ + IXGBE_CAT(I2C_CLK_OUT, m), \ + IXGBE_CAT(I2C_DATA_IN, m), \ + IXGBE_CAT(I2C_DATA_OUT, m), \ + IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ + IXGBE_CAT(I2C_BB_EN, m), \ + IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ + IXGBE_CAT(I2CCTL, m) + +enum ixgbe_mvals { + IXGBE_MVALS_INIT(IDX), + IXGBE_MVALS_IDX_LIMIT +}; + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_flash, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_mac_82599EB, + ixgbe_mac_X540, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, + ixgbe_mac_x550em_a, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_none, + ixgbe_phy_tn, + ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_xfi, + ixgbe_phy_x550em_ext_t, + ixgbe_phy_ext_1g_t, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, + ixgbe_phy_nl, + ixgbe_phy_sfp_passive_tyco, + ixgbe_phy_sfp_passive_unknown, + ixgbe_phy_sfp_active_unknown, + ixgbe_phy_sfp_avago, + ixgbe_phy_sfp_ftl, + ixgbe_phy_sfp_ftl_active, + ixgbe_phy_sfp_unknown, + ixgbe_phy_sfp_intel, + ixgbe_phy_qsfp_passive_unknown, + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, + ixgbe_phy_sfp_unsupported, + ixgbe_phy_sgmii, + ixgbe_phy_fw, + ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + */ +enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, + ixgbe_sfp_type_sr = 1, + ixgbe_sfp_type_lr = 2, + ixgbe_sfp_type_da_cu_core0 = 3, + ixgbe_sfp_type_da_cu_core1 = 4, + ixgbe_sfp_type_srlr_core0 = 5, + ixgbe_sfp_type_srlr_core1 = 6, + ixgbe_sfp_type_da_act_lmt_core0 = 7, + ixgbe_sfp_type_da_act_lmt_core1 = 8, + ixgbe_sfp_type_1g_cu_core0 = 9, + ixgbe_sfp_type_1g_cu_core1 = 10, + ixgbe_sfp_type_1g_sx_core0 = 11, + ixgbe_sfp_type_1g_sx_core1 = 12, + ixgbe_sfp_type_1g_lx_core0 = 13, + ixgbe_sfp_type_1g_lx_core1 = 14, + ixgbe_sfp_type_not_present = 0xFFFE, + ixgbe_sfp_type_unknown = 0xFFFF +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_fiber_qsfp, + ixgbe_media_type_fiber_lco, + ixgbe_media_type_copper, + ixgbe_media_type_backplane, + ixgbe_media_type_cx4, + ixgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ixgbe_fc_mode { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, + ixgbe_fc_default +}; + +/* Smart Speed Settings */ +#define IXGBE_SMARTSPEED_MAX_RETRIES 3 +enum ixgbe_smart_speed { + ixgbe_smart_speed_auto = 0, + ixgbe_smart_speed_on, + ixgbe_smart_speed_off +}; + +/* PCI bus types */ +enum ixgbe_bus_type { + ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci_express, + ixgbe_bus_type_internal, + ixgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ixgbe_bus_speed { + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_8000 = 8000, + ixgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ixgbe_bus_width { + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, + ixgbe_bus_width_reserved +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool uc_set_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ixgbe_bus_info { + enum ixgbe_bus_speed speed; + enum ixgbe_bus_width width; + enum ixgbe_bus_type type; + + u8 func; + u8 lan_id; + u8 instance_id; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ + u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ixgbe_fc_mode current_mode; /* FC mode in effect */ + enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 rqsmr[16]; + u64 tqsmr[8]; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); +}; + +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + void (*set_lan_id)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + s32 (*setup_sfp)(struct ixgbe_hw *); + s32 (*disable_rx_buff)(struct ixgbe_hw *); + s32 (*enable_rx_buff)(struct ixgbe_hw *); + s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + void (*release_swfw_sync)(struct ixgbe_hw *, u32); + void (*init_swfw_sync)(struct ixgbe_hw *); + s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); + s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); + + /* Link */ + void (*disable_tx_laser)(struct ixgbe_hw *); + void (*enable_tx_laser)(struct ixgbe_hw *); + void (*flap_tx_laser)(struct ixgbe_hw *); + void (*stop_link_on_d3)(struct ixgbe_hw *); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); + + /* Packet Buffer Manipulation */ + void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); + s32 (*init_led_link_act)(struct ixgbe_hw *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); + s32 (*init_uta_tables)(struct ixgbe_hw *); + void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); + void (*fc_autoneg)(struct ixgbe_hw *); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, + const char *); + s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*disable_rx)(struct ixgbe_hw *hw); + void (*enable_rx)(struct ixgbe_hw *hw); + void (*set_source_address_pruning)(struct ixgbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); + + /* DMA Coalescing */ + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); + s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); + s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); +}; + +struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); + s32 (*identify_sfp)(struct ixgbe_hw *); + s32 (*init)(struct ixgbe_hw *); + s32 (*reset)(struct ixgbe_hw *); + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); + s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_internal_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); + s32 (*check_overtemp)(struct ixgbe_hw *); + s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*enter_lplu)(struct ixgbe_hw *); + s32 (*handle_lasi)(struct ixgbe_hw *hw); + s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + u8 *value); + s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + u8 value); +}; + +struct ixgbe_link_operations { + s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *val); + s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); + s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 val); +}; + +struct ixgbe_link_info { + struct ixgbe_link_operations ops; + u8 addr; +}; + +struct ixgbe_eeprom_info { + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; +}; + +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; + u16 max_msix_vectors; +#define IXGBE_MAX_MTA 128 + u32 mta_shadow[IXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_autoc; + u32 orig_autoc2; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + u8 san_mac_rar_index; + struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool set_lben; + u8 led_link_act; +}; + +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + struct mdio_if_info mdio; + enum ixgbe_phy_type type; + u32 id; + enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ixgbe_media_type media_type; + u32 phy_semaphore_mask; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; + ixgbe_link_speed eee_speeds_supported; + ixgbe_link_speed eee_speeds_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; + u32 nw_mng_if_sel; +}; + +#include "ixgbe_mbx.h" + +struct ixgbe_mbx_operations { + s32 (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct ixgbe_hw *, u16); + s32 (*check_for_ack)(struct ixgbe_hw *, u16); + s32 (*check_for_rst)(struct ixgbe_hw *, u16); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + const struct ixgbe_mbx_operations *ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + u8 __iomem *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_link_info link; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; + const u32 *mvals; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + bool need_crosstalk_fix; +}; + +struct ixgbe_info { + enum ixgbe_mac_type mac; + s32 (*get_invariants)(struct ixgbe_hw *); + const struct ixgbe_mac_operations *mac_ops; + const struct ixgbe_eeprom_operations *eeprom_ops; + const struct ixgbe_phy_operations *phy_ops; + const struct ixgbe_mbx_operations *mbx_ops; + const struct ixgbe_link_operations *link_ops; + const u32 *mvals; +}; + + +/* Error Codes */ +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 +#define IXGBE_ERR_FW_RESP_INVALID -39 +#define IXGBE_ERR_TOKEN_RETRY -40 +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) +#define IXGBE_FUSES0_300MHZ BIT(5) +#define IXGBE_FUSES0_REV_MASK (3u << 6) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) +#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) +#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) + +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) +#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) + +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29) + +#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) + +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31) + +#define IXGBE_KX4_LINK_CNTL_1 0x4C +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 +#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 + +#define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) +#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) +#endif /* _IXGBE_TYPE_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c new file mode 100644 index 000000000000..6ea0d6a5fb90 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c @@ -0,0 +1,943 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" +#include "ixgbe_x540.h" + +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES 128 +#define IXGBE_X540_MC_TBL_SIZE 128 +#define IXGBE_X540_VFT_TBL_SIZE 128 +#define IXGBE_X540_RX_PB_SIZE 384 + +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + return ixgbe_media_type_copper; +} + +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + /* set_phy_power was set by default to NULL */ + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); +} + +/** + * ixgbe_reset_hw_X540 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +{ + s32 status; + u32 ctrl, i; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + +mac_reset_top: + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_dbg(hw, "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(1000, 1200); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + udelay(1); + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + msleep(100); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (is_valid_ether_addr(hw->mac.san_addr)) { + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + return status; +} + +/** + * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +{ + s32 ret_val; + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val) + return ret_val; + + return ixgbe_start_hw_gen2(hw); +} + +/** + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** + * ixgbe_read_eerd_X540- Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_read_eerd_generic(hw, offset, data); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_write_eewr_generic(hw, offset, data); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * + * This function does not use synchronization for EERD and EEWR. It can + * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; + u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; + + /* + * Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores here. Instead use + * ixgbe_read_eerd_generic + */ + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < checksum_last_word; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = ptr_start; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + if (ixgbe_read_eerd_generic(hw, i, &pointer)) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + if (ixgbe_read_eerd_generic(hw, pointer, &length)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + break; + } + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (pointer + length) >= hw->eeprom.word_size) + continue; + + for (j = pointer + 1; j <= pointer + length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores twice here. + */ + status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + goto out; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + hw_dbg(hw, "Invalid EEPROM checksum"); + status = IXGBE_ERR_EEPROM_CHECKSUM; + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.write because we do not want to + * take the synchronization semaphores twice here. + */ + status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); + if (status) + goto out; + + status = ixgbe_update_flash_X540(hw); + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. + **/ +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +{ + u32 flup; + s32 status; + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_ERR_EEPROM) { + hw_dbg(hw, "Flash update time out\n"); + return status; + } + + flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == 0) + hw_dbg(hw, "Flash update complete\n"); + else + hw_dbg(hw, "Flash update time out\n"); + + if (hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + + if (flup & IXGBE_EEC_SEC1VAL) { + flup |= IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); + } + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == 0) + hw_dbg(hw, "Flash update complete\n"); + else + hw_dbg(hw, "Flash update time out\n"); + } + + return status; +} + +/** + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure + * + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. + **/ +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + + for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + if (reg & IXGBE_EEC_FLUDONE) + return 0; + udelay(5); + } + return IXGBE_ERR_EEPROM; +} + +/** + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +{ + u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; + u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; + u32 fwmask = swmask << 5; + u32 timeout = 200; + u32 hwmask = 0; + u32 swfw_sync; + u32 i; + + if (swmask & IXGBE_GSSR_EEP_SM) + hwmask = IXGBE_GSSR_FLASH_SM; + + /* SW only mask does not have FW bit pair */ + if (mask & IXGBE_GSSR_SW_MNG_SM) + swmask |= IXGBE_GSSR_SW_MNG_SM; + + swmask |= swi2c_mask; + fwmask |= swi2c_mask << 2; + for (i = 0; i < timeout; i++) { + /* SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + if (!(swfw_sync & (fwmask | swmask | hwmask))) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 6000); + return 0; + } + /* Firmware currently using resource (fwmask), hardware + * currently using resource (hwmask), or other software + * thread currently using resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 10000); + } + + /* Failed to get SW only semaphore */ + if (swmask == IXGBE_GSSR_SW_MNG_SM) { + hw_dbg(hw, "Failed to get SW only semaphore\n"); + return IXGBE_ERR_SWFW_SYNC; + } + + /* If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should set the SW bit(s) + * of the requested resource(s) while ignoring the corresponding FW/HW + * bits in the SW_FW_SYNC register. + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + if (swfw_sync & (fwmask | hwmask)) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 6000); + return 0; + } + /* If the resource is not released by other SW the SW can assume that + * the other SW malfunctions. In that case the SW should clear all SW + * flags that it does not own and then repeat the whole process once + * again. + */ + if (swfw_sync & swmask) { + u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM; + + if (swi2c_mask) + rmask |= IXGBE_GSSR_I2C_MASK; + ixgbe_release_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_semaphore(hw); + return IXGBE_ERR_SWFW_SYNC; + } + ixgbe_release_swfw_sync_semaphore(hw); + + return IXGBE_ERR_SWFW_SYNC; +} + +/** + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +{ + u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); + u32 swfw_sync; + + if (mask & IXGBE_GSSR_I2C_MASK) + swmask |= mask & IXGBE_GSSR_I2C_MASK; + ixgbe_get_swfw_sync_semaphore(hw); + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + swfw_sync &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 6000); +} + +/** + * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so SW/FW can gain control of shared resources + */ +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) + break; + usleep_range(50, 100); + } + + if (i == timeout) { + hw_dbg(hw, + "Software semaphore SMBI between device drivers not granted.\n"); + return IXGBE_ERR_EEPROM; + } + + /* Now get the semaphore between SW/FW through the REGSMP bit */ + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + if (!(swsm & IXGBE_SWFW_REGSMP)) + return 0; + + usleep_range(50, 100); + } + + /* Release semaphores and return error if SW NVM semaphore + * was not granted because we do not have access to the EEPROM + */ + hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n"); + ixgbe_release_swfw_sync_semaphore(hw); + return IXGBE_ERR_EEPROM; +} + +/** + * ixgbe_release_nvm_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + swsm &= ~IXGBE_SWFW_REGSMP; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_init_swfw_sync_X540 - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function reset hardware semaphore bits for a semaphore that may + * have be left locked due to a catastrophic failure. + **/ +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) +{ + /* First try to grab the semaphore but we don't need to bother + * looking to see whether we got the lock or not since we do + * the same thing regardless of whether we got the lock or not. + * We got the lock - we release it. + * We timeout trying to get the lock - we force its release. + */ + ixgbe_get_swfw_sync_semaphore(hw); + ixgbe_release_swfw_sync_semaphore(hw); +} + +/** + * ixgbe_blink_led_start_X540 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + ixgbe_link_speed speed; + bool link_up; + + if (index > 3) + return IXGBE_ERR_PARAM; + + /* Link should be up in order for the blink bit in the LED control + * register to work. Force link and speed in the MAC if link is down. + * This will be reversed when we stop the blinking. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + } + /* Set the LED to LINK_UP + BLINK. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + + if (index > 3) + return IXGBE_ERR_PARAM; + + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + ledctl_reg &= ~IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + + /* Unforce link and speed in the MAC. */ + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} +static const struct ixgbe_mac_operations mac_ops_X540 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_X540, + .start_hw = &ixgbe_start_hw_X540, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_X540, + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_device_caps = &ixgbe_get_device_caps_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, + .read_analog_reg8 = NULL, + .write_analog_reg8 = NULL, + .setup_link = &ixgbe_setup_mac_link_X540, + .set_rxpba = &ixgbe_set_rxpba_generic, + .check_link = &ixgbe_check_mac_link_generic, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .blink_led_start = &ixgbe_blink_led_start_X540, + .blink_led_stop = &ixgbe_blink_led_stop_X540, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_generic, + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, + .clear_vmdq = &ixgbe_clear_vmdq_generic, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_generic, + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, + .fc_autoneg = ixgbe_fc_autoneg, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = NULL, + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, + .get_thermal_sensor_data = NULL, + .init_thermal_sensor_thresh = NULL, + .prot_autoc_read = &prot_autoc_read_generic, + .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, +}; + +static const struct ixgbe_eeprom_operations eeprom_ops_X540 = { + .init_params = &ixgbe_init_eeprom_params_X540, + .read = &ixgbe_read_eerd_X540, + .read_buffer = &ixgbe_read_eerd_buffer_X540, + .write = &ixgbe_write_eewr_X540, + .write_buffer = &ixgbe_write_eewr_buffer_X540, + .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, + .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, + .update_checksum = &ixgbe_update_eeprom_checksum_X540, +}; + +static const struct ixgbe_phy_operations phy_ops_X540 = { + .identify = &ixgbe_identify_phy_generic, + .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = NULL, + .reset = NULL, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, + .set_phy_power = &ixgbe_set_copper_phy_power, +}; + +static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X540) +}; + +const struct ixgbe_info ixgbe_X540_info = { + .mac = ixgbe_mac_X540, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X540, + .eeprom_ops = &eeprom_ops_X540, + .phy_ops = &phy_ops_X540, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X540, +}; diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h new file mode 100644 index 000000000000..e21cd48491d3 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h @@ -0,0 +1,40 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include "ixgbe_type.h" + +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c new file mode 100644 index 000000000000..3236248bdb52 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c @@ -0,0 +1,4123 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2016 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); +static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); +static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); +static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); +static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); + +static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_link_info *link = &hw->link; + + /* Start with X540 invariants, since so simular */ + ixgbe_get_invariants_X540(hw); + + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + link->addr = IXGBE_CS4227; + + return 0; +} + +static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so similar */ + ixgbe_get_invariants_X540(hw); + + phy->ops.set_phy_power = NULL; + + return 0; +} + +static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so simular */ + ixgbe_get_invariants_X540(hw); + + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + return 0; +} + +static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so similar */ + ixgbe_get_invariants_X540(hw); + + phy->ops.set_phy_power = NULL; + + return 0; +} + +/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control + * @hw: pointer to hardware structure + **/ +static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_read_cs4227 - Read CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: pointer to receive value read + * + * Returns status code + */ +static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) +{ + return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); +} + +/** + * ixgbe_write_cs4227 - Write CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write to register + * + * Returns status code + */ +static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) +{ + return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); +} + +/** + * ixgbe_read_pe - Read register from port expander + * @hw: pointer to hardware structure + * @reg: register number to read + * @value: pointer to receive read value + * + * Returns status code + */ +static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) +{ + s32 status; + + status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); + if (status) + hw_err(hw, "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_write_pe - Write register to port expander + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write + * + * Returns status code + */ +static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) +{ + s32 status; + + status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, + value); + if (status) + hw_err(hw, "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_reset_cs4227 - Reset CS4227 using port expander + * @hw: pointer to hardware structure + * + * This function assumes that the caller has acquired the proper semaphore. + * Returns error code + */ +static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +{ + s32 status; + u32 retry; + u16 value; + u8 reg; + + /* Trigger hard reset. */ + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); + if (status) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); + if (status) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100); + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status) + return status; + + /* Wait for the reset to complete. */ + msleep(IXGBE_CS4227_RESET_DELAY); + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, + &value); + if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK) + break; + msleep(IXGBE_CS4227_CHECK_DELAY); + } + if (retry == IXGBE_CS4227_RETRIES) { + hw_err(hw, "CS4227 reset did not complete\n"); + return IXGBE_ERR_PHY; + } + + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); + if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { + hw_err(hw, "CS4227 EEPROM did not load successfully\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** + * ixgbe_check_cs4227 - Check CS4227 and reset as needed + * @hw: pointer to hardware structure + */ +static void ixgbe_check_cs4227(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + s32 status; + u16 value; + u8 retry; + + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d\n", status); + msleep(IXGBE_CS4227_CHECK_DELAY); + continue; + } + + /* Get status of reset flow. */ + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + if (!status && value == IXGBE_CS4227_RESET_COMPLETE) + goto out; + + if (status || value != IXGBE_CS4227_RESET_PENDING) + break; + + /* Reset is pending. Wait and check again. */ + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(IXGBE_CS4227_CHECK_DELAY); + } + /* If still pending, assume other instance failed. */ + if (retry == IXGBE_CS4227_RETRIES) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d\n", status); + return; + } + } + + /* Reset the CS4227. */ + status = ixgbe_reset_cs4227(hw); + if (status) { + hw_err(hw, "CS4227 reset failed: %d", status); + goto out; + } + + /* Reset takes so long, temporarily release semaphore in case the + * other driver instance is waiting for the reset indication. + */ + ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_PENDING); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(10000, 12000); + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d", status); + return; + } + + /* Record completion for next time. */ + status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_COMPLETE); + +out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(hw->eeprom.semaphore_delay); +} + +/** ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +{ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + return ixgbe_identify_module_generic(hw); + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + ixgbe_check_cs4227(hw); + /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_A_SFP_N: + return ixgbe_identify_module_generic(hw); + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->phy.type = ixgbe_phy_x550em_xfi; + break; + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + hw->phy.type = ixgbe_phy_x550em_kr; + break; + case IXGBE_DEV_ID_X550EM_A_10G_T: + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->phy.type = ixgbe_phy_ext_1g_t; + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + hw->phy.type = ixgbe_phy_fw; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + break; + default: + break; + } + return 0; +} + +static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +static s32 +ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +static s32 +ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** + * ixgbe_fw_phy_activity - Perform an activity on a PHY + * @hw: pointer to hardware structure + * @activity: activity to perform + * @data: Pointer to 4 32-bit words of data + */ +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]) +{ + union { + struct ixgbe_hic_phy_activity_req cmd; + struct ixgbe_hic_phy_activity_resp rsp; + } hic; + u16 retries = FW_PHY_ACT_RETRIES; + s32 rc; + u32 i; + + do { + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; + hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.activity_id = cpu_to_le16(activity); + for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) + hic.cmd.data[i] = cpu_to_be32((*data)[i]); + + rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (rc) + return rc; + if (hic.rsp.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) + (*data)[i] = be32_to_cpu(hic.rsp.data[i]); + return 0; + } + usleep_range(20, 30); + --retries; + } while (retries > 0); + + return IXGBE_ERR_HOST_INTERFACE_COMMAND; +} + +static const struct { + u16 fw_speed; + ixgbe_link_speed phy_speed; +} ixgbe_fw_map[] = { + { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, + { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, + { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, +}; + +/** + * ixgbe_get_phy_id_fw - Get the phy ID via firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + u16 phy_speeds; + u16 phy_id_lo; + s32 rc; + u16 i; + + if (hw->phy.id) + return 0; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); + if (rc) + return rc; + + hw->phy.speeds_supported = 0; + phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; + for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + if (phy_speeds & ixgbe_fw_map[i].fw_speed) + hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; + } + + hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; + phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; + hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; + hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; + if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) + return IXGBE_ERR_PHY_ADDR_INVALID; + + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + return 0; +} + +static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +/** + * ixgbe_identify_phy_fw - Get PHY type based on firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) +{ + s32 rc; + u16 value=0; + + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + +#if 0 /* Try also to get PHY ID through MDIO by using C22 in read_reg op. + * By hilbert + */ + rc = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &value); + hw_err(hw, "####rc:%x, PHY ID-1:%x\n", rc, value); +#endif + + hw->phy.type = ixgbe_phy_fw; +#if 0 /* We still need read/write ops later, don't NULL it. By hilbert */ + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; +#endif + return ixgbe_get_phy_id_fw(hw); +} + +/** + * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + + setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; + return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); +} + +/** + * ixgbe_setup_fw_link - Setup firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + u16 i; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_rx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_tx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + default: + break; + } + + for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) + setup[0] |= ixgbe_fw_map[i].fw_speed; + } + setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; + + if (hw->phy.eee_speeds_advertised) + setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); + if (rc) + return rc; + if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) + return IXGBE_ERR_OVERTEMP; + return 0; +} + +/** + * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + */ +static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) +{ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + return ixgbe_setup_fw_link(hw); +} + +/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** + * ixgbe_iosf_wait - Wait for IOSF command completion + * @hw: pointer to hardware structure + * @ctrl: pointer to location to receive final IOSF control value + * + * Return: failing status on timeout + * + * Note: ctrl can be NULL if the IOSF control register value is not needed + */ +static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) +{ + u32 i, command; + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete. + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) + break; + udelay(10); + } + if (ctrl) + *ctrl = command; + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "IOSF wait timed out\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @phy_data: Pointer to read data from the register + **/ +static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); + if (ret) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to read, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (!ret) + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + +out: + hw->mac.ops.release_swfw_sync(hw, gssr); + return ret; +} + +/** + * ixgbe_get_phy_token - Get the token for shared PHY access + * @hw: Pointer to hardware structure + */ +static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REQ; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return 0; + if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) + return IXGBE_ERR_FW_RESP_INVALID; + + return IXGBE_ERR_TOKEN_RETRY; +} + +/** + * ixgbe_put_phy_token - Put the token for shared PHY access + * @hw: Pointer to hardware structure + */ +static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REL; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return 0; + return IXGBE_ERR_FW_RESP_INVALID; +} + +/** + * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + __always_unused u32 device_type, + u32 data) +{ + struct ixgbe_hic_internal_phy_req write_cmd; + + memset(&write_cmd, 0, sizeof(write_cmd)); + write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + write_cmd.port_number = hw->bus.lan_id; + write_cmd.command_type = FW_INT_PHY_REQ_WRITE; + write_cmd.address = cpu_to_be16(reg_addr); + write_cmd.write_data = cpu_to_be32(data); + + return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd), + IXGBE_HI_COMMAND_TIMEOUT, false); +} + +/** + * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register + **/ +static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + __always_unused u32 device_type, + u32 *data) +{ + union { + struct ixgbe_hic_internal_phy_req cmd; + struct ixgbe_hic_internal_phy_resp rsp; + } hic; + s32 status; + + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.command_type = FW_INT_PHY_REQ_READ; + hic.cmd.address = cpu_to_be16(reg_addr); + + status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* Extract the register value from the response. */ + *data = be32_to_cpu(hic.rsp.read_data); + + return status; +} + +/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) { + hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); + return status; + } + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32((offset + current_word) * 2); + buffer.length = cpu_to_be16(words_to_read * 2); + buffer.pad2 = 0; + buffer.pad3 = 0; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + if (status) { + hw_dbg(hw, "Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, mask); + return status; +} + +/** ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * + * Returns error status for any failure + **/ +static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return 0; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return 0; +} + +/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, + u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + s32 status; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32(offset * 2); + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + if (!status) { + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + } + + hw->mac.ops.release_swfw_sync(hw, mask); + return status; +} + +/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + hw_dbg(hw, "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + buffer.data = data; + buffer.address = cpu_to_be32(offset * 2); + + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + hw_dbg(hw, "write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = 0; + union ixgbe_hic_hdr2 buffer; + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** + * ixgbe_get_bus_info_X550em - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets bus link width and speed to unknown because X550em is + * not a PCI device. + **/ +static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) +{ + hw->bus.type = ixgbe_bus_type_internal; + hw->bus.width = ixgbe_bus_width_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; + + hw->mac.ops.set_lan_id(hw); + + return 0; +} + +/** ixgbe_disable_rx_x550 - Disable RX unit + * + * Enables the Rx DMA unit for x550 + **/ +static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, &fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + +/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u32 i = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); + return status; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + if (status) { + hw_dbg(hw, "Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); + if (ret) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to write, error %x\n", error); + return IXGBE_ERR_PHY; + } + +out: + hw->mac.ops.release_swfw_sync(hw, gssr); + return ret; +} + +/** + * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration + * @hw: pointer to hardware structure + * + * iXfI configuration needed for ixgbe_mac_X550EM_x devices. + **/ +static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable Flex from training TXFFE. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Enable override for coefficients. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + return status; +} + +/** + * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the + * internal PHY + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 link_ctrl; + + /* Restart auto-negotiation. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); + + if (status) { + hw_dbg(hw, "Auto-negotiation did not complete\n"); + return status; + } + + link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); + + if (hw->mac.type == ixgbe_mac_x550em_a) { + u32 flx_mask_st20; + + /* Indicate to FW that AN restart has been asserted */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); + + if (status) { + hw_dbg(hw, "Auto-negotiation did not complete\n"); + return status; + } + + flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); + } + + return status; +} + +/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; + + /* iXFI is only supported with X552 */ + if (mac->type != ixgbe_mac_X550EM_x) + return IXGBE_ERR_LINK_SETUP; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Additional configuration needed for x550em_x */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + status = ixgbe_setup_ixfi_x550em_x(hw); + if (status) + return status; + } + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; +} + +/** + * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported + * @hw: pointer to hardware structure + * @linear: true if SFP module is linear + */ +static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +{ + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + *linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + *linear = false; + break; + case ixgbe_sfp_type_unknown: + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return 0; +} + +/** + * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP. + * @hw: pointer to hardware structure + * + * Configures the extern PHY and the integrated KR PHY for SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + s32 status; + u16 reg_slice, reg_val; + bool setup_linear = false; + + /* Check if SFP module is supported and linear */ + status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * there is no reason to configure CS4227 and SFP not present error is + * not accepted in the setup MAC link flow. + */ + if (status == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (status) + return status; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + /* Configure CS4227 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + + status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + + return status; +} + +/** + * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated PHY for native SFI mode. Used to connect the + * internal PHY directly to an SFP cage, without autonegotiation. + **/ +static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; + + /* Disable all AN and force speed to 10G Serial. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; +} + +/** + * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for native SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + bool setup_linear = false; + u32 reg_phy_int; + s32 ret_val; + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (ret_val) + return ret_val; + + /* Configure internal PHY for native SFI based on module type */ + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); + if (ret_val) + return ret_val; + + reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; + if (!setup_linear) + reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; + + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); + if (ret_val) + return ret_val; + + /* Setup SFI internal link. */ + return ixgbe_setup_sfi_x550a(hw, &speed); +} + +/** + * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + u32 reg_slice, slice_offset; + bool setup_linear = false; + u16 reg_phy_ext; + s32 ret_val; + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (ret_val) + return ret_val; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) + return IXGBE_ERR_PHY_ADDR_INVALID; + + /* Get external PHY SKU id */ + ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + if (ret_val) + return ret_val; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ + if (reg_phy_ext == IXGBE_CS4223_SKU_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else + slice_offset = hw->bus.lan_id << 12; + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; + + ret_val = hw->phy.ops.read_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + if (ret_val) + return ret_val; + + reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | + (IXGBE_CS4227_EDC_MODE_SR << 1)); + + if (setup_linear) + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + + ret_val = hw->phy.ops.write_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); + if (ret_val) + return ret_val; + + /* Flush previous write with a read */ + return hw->phy.ops.read_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); +} + +/** + * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Setup internal/external PHY link speed based on link speed, then set + * external PHY auto advertised link speed. + * + * Returns error status for any failure + **/ +static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait) +{ + s32 status; + ixgbe_link_speed force_speed; + + /* Setup internal/external PHY link speed to iXFI (10G), unless + * only 1G is auto advertised then setup KX link. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If X552 and internal link mode is XFI, then setup XFI internal link. + */ + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status) + return status; + } + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** ixgbe_check_link_t_X550em - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Check that both the MAC and X557 external PHY have link. + **/ +static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 status; + u16 i, autoneg_status; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + status = ixgbe_check_mac_link_generic(hw, speed, link_up, + link_up_wait_to_complete); + + /* If check link fails or MAC link is not up, then return */ + if (status || !(*link_up)) + return status; + + /* MAC link is up, so check external PHY link. + * Link status is latching low, and can only be used to detect link + * drop, and not the current status of the link without performing + * back-to-back reads. + */ + for (i = 0; i < 2; i++) { + status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, + &autoneg_status); + + if (status) + return status; + } + + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + *link_up = false; + + return 0; +} + +/** + * ixgbe_setup_sgmii - Set up link for sgmii + * @hw: pointer to hardware structure + */ +static s32 +ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + rc = ixgbe_restart_an_internal_phy_x550em(hw); + return rc; +} + +/** + * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + ixgbe_restart_an_internal_phy_x550em(hw); + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** + * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + */ +static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) + goto out; + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) + goto out; + + /* Check if auto-negotiation has completed */ + status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); + if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Negotiate the flow control */ + status = ixgbe_negotiate_fc(hw, info[0], info[0], + FW_PHY_ACT_GET_LINK_INFO_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_FC_TX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); + +out: + if (!status) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers + * @hw: pointer to hardware structure + **/ +static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + switch (mac->ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + mac->ops.setup_fc = NULL; + mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; + break; + case ixgbe_media_type_copper: + if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && + hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + break; + } + mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; + mac->ops.setup_fc = ixgbe_fc_autoneg_fw; + mac->ops.setup_link = ixgbe_setup_sgmii_fw; + mac->ops.check_link = ixgbe_check_mac_link_generic; + break; + case ixgbe_media_type_backplane: + mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; + mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; + break; + default: + break; + } +} + +/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + **/ +static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + mac->ops.setup_fc = ixgbe_setup_fc_x550em; + + switch (mac->ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP_N: + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; + break; + case IXGBE_DEV_ID_X550EM_A_SFP: + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550a; + break; + default: + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550em; + break; + } + mac->ops.set_rate_select_speed = + ixgbe_set_soft_rate_select_speed; + break; + case ixgbe_media_type_copper: + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) + break; + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.setup_fc = ixgbe_setup_fc_generic; + mac->ops.check_link = ixgbe_check_link_t_X550em; + break; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) + mac->ops.setup_link = ixgbe_setup_sgmii; + break; + default: + break; + } + + /* Additional modification for X550em_a devices */ + if (hw->mac.type == ixgbe_mac_x550em_a) + ixgbe_init_mac_link_ops_X550em_a(hw); +} + +/** ixgbe_setup_sfp_modules_X550em - Setup SFP module + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + s32 status; + bool linear; + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + if (status) + return status; + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + return 0; +} + +/** ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + **/ +static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + if (hw->phy.type == ixgbe_phy_fw) { + *autoneg = true; + *speed = hw->phy.speeds_supported; + return 0; + } + + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return 0; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + *speed = IXGBE_LINK_SPEED_1GB_FULL | + IXGBE_LINK_SPEED_2_5GB_FULL | + IXGBE_LINK_SPEED_10GB_FULL; + break; + case ixgbe_phy_x550em_xfi: + *speed = IXGBE_LINK_SPEED_1GB_FULL | + IXGBE_LINK_SPEED_10GB_FULL; + break; + case ixgbe_phy_ext_1g_t: + case ixgbe_phy_sgmii: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case ixgbe_phy_x550em_kr: + if (hw->mac.type == ixgbe_mac_x550em_a) { + /* check different backplane modes */ + if (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + break; + } else if (hw->device_id == + IXGBE_DEV_ID_X550EM_A_KR_L) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + /* fall through */ + default: + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + break; + } + *autoneg = true; + } + return 0; +} + +/** + * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause + * @hw: pointer to hardware structure + * @lsc: pointer to boolean flag which indicates whether external Base T + * PHY interrupt is lsc + * + * Determime if external Base T PHY interrupt cause is high temperature + * failure alarm or link status change. + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + **/ +static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) +{ + u32 status; + u16 reg; + + *lsc = false; + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + MDIO_MMD_VEND1, + ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) + return status; + + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, + MDIO_MMD_VEND1, + ®); + + if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + return status; + + /* Global alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, + MDIO_MMD_VEND1, + ®); + + if (status) + return status; + + /* If high temperature failure, then return over temp error and exit */ + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { + /* power down the PHY in case the PHY FW didn't already */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, + MDIO_MMD_VEND1, + ®); + if (status) + return status; + + /* if device fault was due to high temp alarm handle and exit */ + if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { + /* power down the PHY in case the PHY FW didn't */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + } + + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + MDIO_MMD_AN, ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, + MDIO_MMD_AN, ®); + + if (status) + return status; + + /* Indicate LSC */ + if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) + *lsc = true; + + return 0; +} + +/** + * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts + * @hw: pointer to hardware structure + * + * Enable link status change and temperature failure alarm for the external + * Base T PHY + * + * Returns PHY access status + **/ +static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + bool lsc; + + /* Clear interrupt flags */ + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + /* Enable link status change alarm */ + + /* Enable the LASI interrupts on X552 devices to receive notifications + * of the link configurations of the external PHY and correspondingly + * support the configuration of the internal iXFI link, since iXFI does + * not support auto-negotiation. This is not required for X553 devices + * having KR support, which performs auto-negotiations and which is used + * as the internal link to the external PHY. Hence adding a check here + * to avoid enabling LASI interrupts for X553 devices. + */ + if (hw->mac.type != ixgbe_mac_x550em_a) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + MDIO_MMD_AN, ®); + if (status) + return status; + + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + MDIO_MMD_AN, reg); + if (status) + return status; + } + + /* Enable high temperature failure and global fault alarms */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + MDIO_MMD_VEND1, + ®); + if (status) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + MDIO_MMD_VEND1, + reg); + if (status) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + MDIO_MMD_VEND1, + ®); + if (status) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + MDIO_MMD_VEND1, + reg); + if (status) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + MDIO_MMD_VEND1, + ®); + if (status) + return status; + + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + MDIO_MMD_VEND1, + reg); + + return status; +} + +/** + * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + **/ +static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + bool lsc; + u32 status; + + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + if (status) + return status; + + if (lsc && phy->ops.setup_internal_link) + return phy->ops.setup_internal_link(hw); + + return 0; +} + +/** + * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. + * @hw: pointer to hardware structure + * @speed: link speed + * + * Configures the integrated KR PHY. + **/ +static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u32 reg_val; + + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + if (hw->mac.type == ixgbe_mac_x550em_a) { + /* Set lane mode to KR auto negotiation */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + + if (status) + return status; + + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + } + + return ixgbe_restart_an_internal_phy_x550em(hw); +} + +/** + * ixgbe_setup_kr_x550em - Configure the KR PHY + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + /* leave link alone for 2.5G */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + return 0; + + if (ixgbe_check_reset_blocked(hw)) + return 0; + + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); +} + +/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status + * @hw: address of hardware structure + * @link_up: address of boolean to indicate link status + * + * Returns error code if unable to get link status. + **/ +static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) +{ + u32 ret; + u16 autoneg_status; + + *link_up = false; + + /* read this twice back to back to indicate current status */ + ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, + &autoneg_status); + if (ret) + return ret; + + ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, + &autoneg_status); + if (ret) + return ret; + + *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); + + return 0; +} + +/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link + * @hw: point to hardware structure + * + * Configures the link between the integrated KR PHY and the external X557 PHY + * The driver will call this function when it gets a link status change + * interrupt from the X557 PHY. This function configures the link speed + * between the PHYs to match the link speed of the BASE-T link. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + **/ +static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed force_speed; + bool link_up; + u32 status; + u16 speed; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + if (!(hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); + } + + /* If link is not up, then there is no setup necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status) + return status; + + if (!link_up) + return 0; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + MDIO_MMD_AN, + &speed); + if (status) + return status; + + /* If link is not still up, then no setup is necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status) + return status; + + if (!link_up) + return 0; + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); +} + +/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +{ + s32 status; + + status = ixgbe_reset_phy_generic(hw); + + if (status) + return status; + + /* Configure Link Status Alarm and Temperature Threshold interrupts */ + return ixgbe_enable_lasi_ext_t_x550em(hw); +} + +/** + * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn on + **/ +static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + MDIO_MMD_VEND1, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + MDIO_MMD_VEND1, phy_data); + + return 0; +} + +/** + * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn off + **/ +static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + MDIO_MMD_VEND1, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + MDIO_MMD_VEND1, phy_data); + + return 0; +} + +/** + * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver) +{ + struct ixgbe_hic_drv_info2 fw_cmd; + s32 ret_val; + int i; + + if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) + return IXGBE_ERR_INVALID_ARGUMENT; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + memcpy(fw_cmd.driver_string, driver_ver, len); + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status != + FW_CEM_RESP_STATUS_SUCCESS) + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return 0; + } + + return ret_val; +} + +/** ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed + * + * Determine lowest common link speed with link partner. + **/ +static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed *lcd_speed) +{ + u16 an_lp_status; + s32 status; + u16 word = hw->eeprom.ctrl_word_3; + + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, + MDIO_MMD_AN, + &an_lp_status); + if (status) + return status; + + /* If link partner advertised 1G, return 1G */ + if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { + *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; + return status; + } + + /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ + if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || + (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) + return status; + + /* Link partner not capable of lower speeds, return 10G */ + *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; + return status; +} + +/** + * ixgbe_setup_fc_x550em - Set up flow control + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) +{ + bool pause, asm_dir; + u32 reg_val; + s32 rc = 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = false; + asm_dir = false; + break; + case ixgbe_fc_tx_pause: + pause = false; + asm_dir = true; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + /* Fallthrough */ + case ixgbe_fc_full: + pause = true; + asm_dir = true; + break; + default: + hw_err(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_val); + if (rc) + return rc; + + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->fc.disable_fc_autoneg = true; + break; + default: + break; + } + return rc; +} + +/** + * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + **/ +static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) +{ + u32 link_s1, lp_an_page_low, an_cntl_1; + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + hw_err(hw, "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + hw_err(hw, "The link is down"); + goto out; + } + + /* Check at auto-negotiation has completed */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_S1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); + + if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); + + if (status) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + goto out; + } + + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); + + if (status) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + goto out; + } + + status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, + IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); + +out: + if (!status) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings + * @hw: pointer to hardware structure + **/ +static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) +{ + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; +} + +/** ixgbe_enter_lplu_x550em - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting + * the X557 PHY immediately prior to entering LPLU. + **/ +static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +{ + u16 an_10g_cntl_reg, autoneg_reg, speed; + s32 status; + ixgbe_link_speed lcd_speed; + u32 save_autoneg; + bool link_up; + + /* If blocked by MNG FW, then don't restart AN */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status) + return status; + + status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3, + &hw->eeprom.ctrl_word_3); + if (status) + return status; + + /* If link is down, LPLU disabled in NVM, WoL disabled, or + * manageability disabled, then force link down by entering + * low power mode. + */ + if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || + !(hw->wol_enabled || ixgbe_mng_present(hw))) + return ixgbe_set_copper_phy_power(hw, false); + + /* Determine LCD */ + status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); + if (status) + return status; + + /* If no valid LCD link speed, then force link down and exit. */ + if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) + return ixgbe_set_copper_phy_power(hw, false); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + MDIO_MMD_AN, + &speed); + if (status) + return status; + + /* If no link now, speed is invalid so take link down */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status) + return ixgbe_set_copper_phy_power(hw, false); + + /* clear everything but the speed bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; + + /* If current speed is already LCD, then exit. */ + if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && + (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || + ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && + (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) + return status; + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, + MDIO_MMD_AN, + &autoneg_reg); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &an_10g_cntl_reg); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + &autoneg_reg); + if (status) + return status; + + save_autoneg = hw->phy.autoneg_advertised; + + /* Setup link at least common link speed */ + status = hw->mac.ops.setup_link(hw, lcd_speed, false); + + /* restore autoneg from before setting lplu speed */ + hw->phy.autoneg_advertised = save_autoneg; + + return status; +} + +/** + * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); + if (rc) + return rc; + memset(store, 0, sizeof(store)); + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); + if (rc) + return rc; + + return ixgbe_setup_fw_link(hw); +} + +/** + * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp + * @hw: pointer to hardware structure + */ +static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); + if (rc) + return rc; + + if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { + ixgbe_shutdown_fw_phy(hw); + return IXGBE_ERR_OVERTEMP; + } + return 0; +} + +/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register + * @hw: pointer to hardware structure + * + * Read NW_MNG_IF_SEL register and save field values. + */ +static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) +{ + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set + * PHY address. This register field was has only been used for X552. + */ + if (hw->mac.type == ixgbe_mac_x550em_a && + hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { + hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; +#if 1 /* Since by Intel FW(LEK8),LAN controller 1 default set port 0 use phy address 0 + * and port 1 use phy address 1, we swap it for Porsche2 platform. + * By hilbert. + */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /*hw_err(hw, "####swap phy address used for different lan id in LAN conroller-1\n");*/ + hw->phy.mdio.prtad = (hw->bus.lan_id == 0) ? (1) : (0); + /*hw_err(hw, "####lan id: %d, phy address:%d\n", + hw->bus.lan_id, + hw->phy.mdio.prtad);*/ + } +#endif + } +} + +/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + **/ +static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + hw->mac.ops.set_lan_id(hw); + + ixgbe_read_mng_if_sel_x550em(hw); + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected hardware */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_xfi: + /* link is managed by HW */ + phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If internal link mode is XFI, then setup iXFI internal link, + * else setup KR now. + */ + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; + + /* setup SW LPLU only for first revision */ + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) & + IXGBE_FUSES0_REV_MASK)) + phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; + + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; + break; + case ixgbe_phy_sgmii: + phy->ops.setup_link = NULL; + break; + case ixgbe_phy_fw: + phy->ops.setup_link = ixgbe_setup_fw_link; + phy->ops.reset = ixgbe_reset_phy_fw; + break; + case ixgbe_phy_ext_1g_t: + phy->ops.setup_link = NULL; + phy->ops.read_reg = NULL; + phy->ops.write_reg = NULL; + phy->ops.reset = NULL; + break; + default: + break; + } + + return ret_val; +} + +/** ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + * + */ +static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + hw->phy.type = ixgbe_phy_sgmii; + /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_XFI: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + ** @hw: pointer to hardware structure + **/ +static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u16 reg; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + MDIO_MMD_PMAPMD, + ®); + if (status) + return status; + + /* If PHY FW reset completed bit is set then this is the first + * SW instance after a power on so the PHY FW must be un-stalled. + */ + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + MDIO_MMD_VEND1, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_POWER_UP_STALL; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + MDIO_MMD_VEND1, + reg); + if (status) + return status; + } + + return status; +} + +/** + * ixgbe_set_mdio_speed - Set MDIO clock speed + * @hw: pointer to hardware structure + */ +static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) +{ + u32 hlreg0; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + /* Config MDIO clock speed before the first MDIO PHY access */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + /* Select fast MDIO clock speed for these devices */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; + default: + break; + } +} + +/** ixgbe_reset_hw_X550em - Perform hardware reset + ** @hw: pointer to hardware structure + ** + ** Resets the hardware by resetting the transmit and receive units, masks + ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) + ** reset. + **/ +static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i; + bool link_up = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) + return status; + } + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); + +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_dbg(hw, "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + usleep_range(1000, 1200); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + udelay(1); + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* Double resets are required for recovery from certain error + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + ixgbe_set_mdio_speed(hw); + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) + ixgbe_setup_mux_ctl(hw); + + return status; +} + +/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype + * anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + **/ +static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; + u32 pfvfspoof; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= BIT(vf_target_shift); + else + pfvfspoof &= ~BIT(vf_target_shift); + + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + +/** + * ixgbe_setup_fc_backplane_x550em_a - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 an_cntl = 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do FC autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); + + if (status) { + hw_dbg(hw, "Auto-Negotiation did not complete\n"); + return status; + } + + /* The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + break; + default: + hw_err(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); + + /* Restart auto-negotiation. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; +} + +/** + * ixgbe_set_mux - Set mux for port 1 access with CS4227 + * @hw: pointer to hardware structure + * @state: set mux if 1, clear if 0 + */ +static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) +{ + u32 esdp; + + if (!hw->bus.lan_id) + return; + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (state) + esdp |= IXGBE_ESDP_SDP1; + else + esdp &= ~IXGBE_ESDP_SDP1; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and sets the I2C MUX + */ +static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + s32 status; + + status = ixgbe_acquire_swfw_sync_X540(hw, mask); + if (status) + return status; + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 1); + + return 0; +} + +/** + * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and sets the I2C MUX + */ +static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 0); + + ixgbe_release_swfw_sync_X540(hw, mask); +} + +/** + * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared PHY token as needed + */ +static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + int retries = FW_PHY_TOKEN_RETRIES; + s32 status; + + while (--retries) { + status = 0; + if (hmask) + status = ixgbe_acquire_swfw_sync_X540(hw, hmask); + if (status) + return status; + if (!(mask & IXGBE_GSSR_TOKEN_SM)) + return 0; + + status = ixgbe_get_phy_token(hw); + if (!status) + return 0; + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); + if (status != IXGBE_ERR_TOKEN_RETRY) + return status; + msleep(FW_PHY_TOKEN_DELAY); + } + + return status; +} + +/** + * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Release the SWFW semaphore and puts the shared PHY token as needed + */ +static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + + if (mask & IXGBE_GSSR_TOKEN_SM) + ixgbe_put_phy_token(hw); + + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); +} + +/** + * ixgbe_read_phy_reg_x550a - Reads specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register using the SWFW lock and PHY + * Token. The PHY Token is needed since the MDIO is shared between to MAC + * instances. + */ +static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + +/** + * ixgbe_write_phy_reg_x550a - Writes specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register using the SWFW lock and PHY Token. + * The PHY Token is needed since the MDIO is shared between to MAC instances. + */ +static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + +#if 0 /* To use C22 MDI access function created by our own. + * By hilbert + */ + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); +#else + status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); +#endif + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + +#define X550_COMMON_MAC \ + .init_hw = &ixgbe_init_hw_generic, \ + .start_hw = &ixgbe_start_hw_X540, \ + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \ + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \ + .get_mac_addr = &ixgbe_get_mac_addr_generic, \ + .get_device_caps = &ixgbe_get_device_caps_generic, \ + .stop_adapter = &ixgbe_stop_adapter_generic, \ + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ + .read_analog_reg8 = NULL, \ + .write_analog_reg8 = NULL, \ + .set_rxpba = &ixgbe_set_rxpba_generic, \ + .check_link = &ixgbe_check_mac_link_generic, \ + .blink_led_start = &ixgbe_blink_led_start_X540, \ + .blink_led_stop = &ixgbe_blink_led_stop_X540, \ + .set_rar = &ixgbe_set_rar_generic, \ + .clear_rar = &ixgbe_clear_rar_generic, \ + .set_vmdq = &ixgbe_set_vmdq_generic, \ + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \ + .clear_vmdq = &ixgbe_clear_vmdq_generic, \ + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \ + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \ + .enable_mc = &ixgbe_enable_mc_generic, \ + .disable_mc = &ixgbe_disable_mc_generic, \ + .clear_vfta = &ixgbe_clear_vfta_generic, \ + .set_vfta = &ixgbe_set_vfta_generic, \ + .fc_enable = &ixgbe_fc_enable_generic, \ + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ + .init_uta_tables = &ixgbe_init_uta_tables_generic, \ + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ + .set_source_address_pruning = \ + &ixgbe_set_source_address_pruning_X550, \ + .set_ethertype_anti_spoofing = \ + &ixgbe_set_ethertype_anti_spoofing_X550, \ + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ + .get_thermal_sensor_data = NULL, \ + .init_thermal_sensor_thresh = NULL, \ + .enable_rx = &ixgbe_enable_rx_generic, \ + .disable_rx = &ixgbe_disable_rx_x550, \ + +static const struct ixgbe_mac_operations mac_ops_X550 = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = &ixgbe_reset_hw_X540, + .get_media_type = &ixgbe_get_media_type_X540, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .setup_link = &ixgbe_setup_mac_link_X540, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .setup_sfp = NULL, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .prot_autoc_read = prot_autoc_read_generic, + .prot_autoc_write = prot_autoc_write_generic, + .setup_fc = ixgbe_setup_fc_generic, + .fc_autoneg = ixgbe_fc_autoneg, +}; + +static const struct ixgbe_mac_operations mac_ops_X550EM_x = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = &ixgbe_reset_hw_X550em, + .get_media_type = &ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = &ixgbe_setup_mac_link_X540, + .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, + .get_bus_info = &ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, + .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .setup_fc = NULL, /* defined later */ + .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, +}; + +static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { + X550_COMMON_MAC + .led_on = NULL, + .led_off = NULL, + .init_led_link_act = NULL, + .reset_hw = &ixgbe_reset_hw_X550em, + .get_media_type = &ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = &ixgbe_setup_mac_link_X540, + .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, + .get_bus_info = &ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, + .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .setup_fc = NULL, + .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, +}; + +static struct ixgbe_mac_operations mac_ops_x550em_a = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = &ixgbe_setup_mac_link_X540, + .get_link_capabilities = ixgbe_get_link_capabilities_X550em, + .get_bus_info = ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, + .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, +}; + +static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = ixgbe_get_link_capabilities_X550em, + .get_bus_info = ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, + .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, +}; + +#define X550_COMMON_EEP \ + .read = &ixgbe_read_ee_hostif_X550, \ + .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ + .write = &ixgbe_write_ee_hostif_X550, \ + .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \ + .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ + .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ + .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ + +static const struct ixgbe_eeprom_operations eeprom_ops_X550 = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X550, +}; + +static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X540, +}; + +#define X550_COMMON_PHY \ + .identify_sfp = &ixgbe_identify_module_generic, \ + .reset = NULL, \ + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \ + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \ + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \ + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .setup_link = &ixgbe_setup_phy_link_generic, \ + .set_phy_power = NULL, + +static const struct ixgbe_phy_operations phy_ops_X550 = { + X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, + .init = NULL, + .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, +}; + +static const struct ixgbe_phy_operations phy_ops_X550EM_x = { + X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, +}; + +static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { + X550_COMMON_PHY + .check_overtemp = NULL, + .init = ixgbe_init_phy_ops_X550em, + .identify = ixgbe_identify_phy_x550em, + .read_reg = NULL, + .write_reg = NULL, + .read_reg_mdi = NULL, + .write_reg_mdi = NULL, +}; + +static const struct ixgbe_phy_operations phy_ops_x550em_a = { + X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_x550a, + .write_reg = &ixgbe_write_phy_reg_x550a, + .read_reg_mdi = &ixgbe_read_phy_reg_mdi, + .write_reg_mdi = &ixgbe_write_phy_reg_mdi, +}; + +static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { + X550_COMMON_PHY + .check_overtemp = ixgbe_check_overtemp_fw, + .init = ixgbe_init_phy_ops_X550em, + .identify = ixgbe_identify_phy_fw, +#if 0 /* Declare C22 MDI directly access functions. By hilbert */ + .read_reg = NULL, + .write_reg = NULL, + .read_reg_mdi = NULL, + .write_reg_mdi = NULL, +#else + .read_reg = &ixgbe_read_phy_reg_x550a, + .write_reg = &ixgbe_write_phy_reg_x550a, + .read_reg_mdi = &ixgbe_read_phy_reg_mdio, + .write_reg_mdi = &ixgbe_write_phy_reg_mdio, +#endif +}; + +static const struct ixgbe_link_operations link_ops_x550em_x = { + .read_link = &ixgbe_read_i2c_combined_generic, + .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, + .write_link = &ixgbe_write_i2c_combined_generic, + .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked, +}; + +static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550) +}; + +static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550EM_x) +}; + +static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550EM_a) +}; + +const struct ixgbe_info ixgbe_X550_info = { + .mac = ixgbe_mac_X550, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550, + .eeprom_ops = &eeprom_ops_X550, + .phy_ops = &phy_ops_X550, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550, +}; + +const struct ixgbe_info ixgbe_X550EM_x_info = { + .mac = ixgbe_mac_X550EM_x, + .get_invariants = &ixgbe_get_invariants_X550_x, + .mac_ops = &mac_ops_X550EM_x, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_X550EM_x, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550EM_x, + .link_ops = &link_ops_x550em_x, +}; + +const struct ixgbe_info ixgbe_x550em_x_fw_info = { + .mac = ixgbe_mac_X550EM_x, + .get_invariants = ixgbe_get_invariants_X550_x_fw, + .mac_ops = &mac_ops_X550EM_x_fw, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_x_fw, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550EM_x, +}; + +const struct ixgbe_info ixgbe_x550em_a_info = { + .mac = ixgbe_mac_x550em_a, + .get_invariants = &ixgbe_get_invariants_X550_a, + .mac_ops = &mac_ops_x550em_a, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; + +const struct ixgbe_info ixgbe_x550em_a_fw_info = { + .mac = ixgbe_mac_x550em_a, + .get_invariants = ixgbe_get_invariants_X550_a_fw, + .mac_ops = &mac_ops_x550em_a_fw, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a_fw, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_psu.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_psu.c new file mode 100644 index 000000000000..700817c7a513 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_psu.c @@ -0,0 +1,329 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef PEGA_DEBUG +/*#define pega_DEBUG*/ +#ifdef PEGA_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ + +#define PSU_58_ADDRESS 0x58 +#define PSU_59_ADDRESS 0x59 +#define PSU_VOUT_REG 0x7A +#define PSU_IOUT_REG 0x7B +#define PSU_IPUT_REG 0x7C +#define PSU_TEMP_REG 0x7D +#define PSU_VOUT_OVER_VOLTAGE_BIT 7 +#define PSU_IOUT_OVER_CURRENT_FAULT_BIT 7 +#define PSU_IOUT_OVER_CURRENT_WARNING_BIT 5 +#define PSU_IPUT_OVER_CURRENT_WARNING_BIT 1 +#define PSU_IPUT_INSUFFICIENT_BIT 3 +#define PSU_TEMP_OVER_TEMP_FAULT_BIT 7 +#define PSU_TEMP_OVER_TEMP_WARNING_BIT 6 + +#define GET_BIT(data, bit, value) value = (data >> bit) & 0x1 +#define SET_BIT(data, bit) data |= (1 << bit) +#define CLEAR_BIT(data, bit) data &= ~(1 << bit) + +struct psu_client_node { + struct i2c_client *client; + struct list_head list; +}; + +static const unsigned short normal_i2c[] = { PSU_58_ADDRESS, PSU_59_ADDRESS, I2C_CLIENT_END }; +static LIST_HEAD(psu_client_list); +static struct mutex list_lock; + +static int pega_fn_6254_dn_f_psu_read(unsigned short addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct psu_client_node *psu_node = NULL; + int data = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &psu_client_list) + { + psu_node = list_entry(list_node, struct psu_client_node, list); + + if (psu_node->client->addr == addr) { + data = i2c_smbus_read_byte_data(psu_node->client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, addr, reg, data)); + break; + } + } + + mutex_unlock(&list_lock); + + return data; +} + +static ssize_t read_psu_vout_over_voltage(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_VOUT_REG, val = 0; + + data = pega_fn_6254_dn_f_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_VOUT_OVER_VOLTAGE_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_iout_over_current_fault(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_IOUT_REG, val = 0; + + data = pega_fn_6254_dn_f_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_IOUT_OVER_CURRENT_FAULT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_iout_over_current_warning(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_IOUT_REG, val = 0; + + data = pega_fn_6254_dn_f_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_IOUT_OVER_CURRENT_WARNING_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_iput_over_current_warning(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_IPUT_REG, val = 0; + + data = pega_fn_6254_dn_f_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_IPUT_OVER_CURRENT_WARNING_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_iput_insufficient(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_IPUT_REG, val = 0; + + data = pega_fn_6254_dn_f_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_IPUT_INSUFFICIENT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_temp_over_temp_fault(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_TEMP_REG, val = 0; + + data = pega_fn_6254_dn_f_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_TEMP_OVER_TEMP_FAULT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_temp_over_temp_warning(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_TEMP_REG, val = 0; + + data = pega_fn_6254_dn_f_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_TEMP_OVER_TEMP_WARNING_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static SENSOR_DEVICE_ATTR(vout_over_voltage, S_IRUGO, read_psu_vout_over_voltage, NULL, 0); +static SENSOR_DEVICE_ATTR(iout_over_current_fault, S_IRUGO, read_psu_iout_over_current_fault, NULL, 0); +static SENSOR_DEVICE_ATTR(iout_over_current_warning, S_IRUGO, read_psu_iout_over_current_warning, NULL, 0); +static SENSOR_DEVICE_ATTR(iput_over_current_warning, S_IRUGO, read_psu_iput_over_current_warning, NULL, 0); +static SENSOR_DEVICE_ATTR(iput_insufficient, S_IRUGO, read_psu_iput_insufficient, NULL, 0); +static SENSOR_DEVICE_ATTR(temp_over_temp_fault, S_IRUGO, read_psu_temp_over_temp_fault, NULL, 0); +static SENSOR_DEVICE_ATTR(temp_over_temp_warning, S_IRUGO, read_psu_temp_over_temp_warning, NULL, 0); + +static struct attribute *pega_fn_6254_dn_f_psu_attributes[] = { + &sensor_dev_attr_vout_over_voltage.dev_attr.attr, + &sensor_dev_attr_iout_over_current_fault.dev_attr.attr, + &sensor_dev_attr_iout_over_current_warning.dev_attr.attr, + &sensor_dev_attr_iput_over_current_warning.dev_attr.attr, + &sensor_dev_attr_iput_insufficient.dev_attr.attr, + &sensor_dev_attr_temp_over_temp_fault.dev_attr.attr, + &sensor_dev_attr_temp_over_temp_warning.dev_attr.attr, + NULL +}; + +static const struct attribute_group pega_fn_6254_dn_f_psu_group = { .attrs = pega_fn_6254_dn_f_psu_attributes}; + +static void pega_fn_6254_dn_f_psu_add_client(struct i2c_client *client) +{ + struct psu_client_node *node = kzalloc(sizeof(struct psu_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate psu_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &psu_client_list); + mutex_unlock(&list_lock); +} + +static void pega_fn_6254_dn_f_psu_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct psu_client_node *psu_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &psu_client_list) + { + psu_node = list_entry(list_node, struct psu_client_node, list); + + if (psu_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(psu_node); + } + + mutex_unlock(&list_lock); +} + +static int pega_fn_6254_dn_f_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_dbg(&client->dev, "i2c_check_functionality failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + } + + /* Register sysfs hooks */ + switch(client->addr) + { + case PSU_58_ADDRESS: + case PSU_59_ADDRESS: + status = sysfs_create_group(&client->dev.kobj, &pega_fn_6254_dn_f_psu_group); + break; + default: + dev_dbg(&client->dev, "i2c_check_psu failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + break; + } + + if (status) { + goto exit; + } + + dev_info(&client->dev, "chip found\n"); + pega_fn_6254_dn_f_psu_add_client(client); + + return 0; + +exit: + return status; +} + +static int pega_fn_6254_dn_f_psu_remove(struct i2c_client *client) +{ + switch(client->addr) + { + case PSU_58_ADDRESS: + case PSU_59_ADDRESS: + sysfs_remove_group(&client->dev.kobj, &pega_fn_6254_dn_f_psu_group); + break; + default: + dev_dbg(&client->dev, "i2c_remove_psu failed (0x%x)\n", client->addr); + break; + } + + pega_fn_6254_dn_f_psu_remove_client(client); + return 0; +} + +static const struct i2c_device_id pega_fn_6254_dn_f_psu_id[] = { + { "fn_6254_dn_f_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, pega_fn_6254_dn_f_psu_id); + +static struct i2c_driver pega_fn_6254_dn_f_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "pegatron_fn_6254_dn_f_psu", + }, + .probe = pega_fn_6254_dn_f_psu_probe, + .remove = pega_fn_6254_dn_f_psu_remove, + .id_table = pega_fn_6254_dn_f_psu_id, + .address_list = normal_i2c, +}; + +static int __init pega_fn_6254_dn_f_psu_init(void) +{ + mutex_init(&list_lock); + + return i2c_add_driver(&pega_fn_6254_dn_f_psu_driver); +} + +static void __exit pega_fn_6254_dn_f_psu_exit(void) +{ + i2c_del_driver(&pega_fn_6254_dn_f_psu_driver); +} + +MODULE_AUTHOR("Peter5 Lin "); +MODULE_DESCRIPTION("pega_fn_6254_dn_f_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(pega_fn_6254_dn_f_psu_init); +module_exit(pega_fn_6254_dn_f_psu_exit); diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors index 6bc6097bc17a..6cd3af6feb23 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors @@ -2,6 +2,6 @@ docker exec -i pmon sensors "$@" #To probe sensors not part of lm-sensors -if [ -r /usr/local/bin/fn_6254_dn_f_sensors.py ]; then - python /usr/local/bin/fn_6254_dn_f_sensors.py get_sensors +if [ -r /usr/local/bin/pegatron_fn_6254_dn_f_sensors.py ]; then + python /usr/local/bin/pegatron_fn_6254_dn_f_sensors.py get_sensors fi diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service index 2cdd391d1556..209edc0af6e7 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service @@ -1,6 +1,6 @@ [Unit] Description=Pegastron fn-6254-dn-f Platform initialization service -After=local-fs.target +Before=network.target DefaultDependencies=no [Service] diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-status.service b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-status.service new file mode 100644 index 000000000000..bfbfcb5e7067 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-status.service @@ -0,0 +1,12 @@ +[Unit] +Description=Pegastron fn-6254-dn-f Platform status service +After=fn_6254_dn_f-platform-init.service +DefaultDependencies=no + +[Service] +Type=simple +ExecStart=/usr/local/bin/pegatron_fn_6254_dn_f_status.py run +ExecStop=/usr/local/bin/pegatron_fn_6254_dn_f_status.py stop + +[Install] +WantedBy=multi-user.target diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/fn_6254_dn_f_sensors.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_sensors.py similarity index 86% rename from platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/fn_6254_dn_f_sensors.py rename to platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_sensors.py index 40e23ef01b7e..5e3f511cd4d5 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/fn_6254_dn_f_sensors.py +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_sensors.py @@ -8,10 +8,10 @@ sensors_path = '/sys/bus/i2c/devices/5-0070/' sensors_nodes = {'fan_rpm': ['_inner_rpm', '_outer_rpm'], 'fan_vol': ['ADC8_vol', 'ADC7_vol','ADC6_vol', 'ADC5_vol','ADC4_vol', 'ADC3_vol'], - 'temp':['lm75_49_temp', 'lm75_48_temp', 'SA56004_local_temp','SA56004_remote_temp']} + 'temp':['lm75_48_temp', 'lm75_49_temp', 'lm75_4a_temp']} sensors_type = {'fan_rpm': ['Inner RPM', 'Outer RPM'], 'fan_vol': ['P0.2', 'P0.6','P0.1', 'P1.5','P0.7', 'P1.6'], - 'temp':['lm75_49_temp', 'lm75_48_temp', 'SA56004_local_temp','SA56004_remote_temp']} + 'temp':['lm75_48_temp', 'lm75_49_temp', 'lm75_4a_temp']} # Get sysfs attribute def get_attr_value(attr_path): @@ -81,12 +81,12 @@ def get_fan(): return def get_hwmon(): + temp_type = sensors_type['temp'] print " " - string = get_attr_value(sensors_path + "lm75_48_temp") - print "Sensor A: " + string + " C" - string = get_attr_value(sensors_path + "lm75_49_temp") - print "Sensor B: " + string + " C" + for types in temp_type: + string = get_attr_value(sensors_path + types) + print types + ": " + string + " C" return @@ -121,10 +121,10 @@ def main(): if arg == 'fan_init': init_fan() elif arg == 'get_sensors': - ver = get_attr_value(sensors_path + "fb_hw_version") - print 'HW Version: ' + ver + ver = get_attr_value(sensors_path + "mb_fw_version") + print 'MB-SW Version: ' + ver ver = get_attr_value(sensors_path + "fb_fw_version") - print 'SW Version: ' + ver + print 'FB-SW Version: ' + ver get_fan() get_hwmon() get_voltage() diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_status.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_status.py new file mode 100755 index 000000000000..9ad398052c8e --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_status.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import sys, getopt +import logging +import os +import commands +import threading +import time +import syslog + +DEBUG = False +STOP = False +FAN_NUM = 5 +PASS = 0 +FAIL = 1 + +i2c_prefix = '/sys/bus/i2c/devices/' +status_nodes = {'fan': ['5-0070'], + 'psu':['2-0058', '3-0059']} +system_led_node = '7-0075' +status_alert = {'fan': ['wrongAirflow_alert', 'outerRPMOver_alert', 'outerRPMUnder_alert', 'outerRPMZero_alert', 'innerRPMOver_alert', 'innerRPMUnder_alert', 'innerRPMZero_alert', 'notconnect_alert'], + 'psu': ['vout_over_voltage', 'iout_over_current_fault', 'iout_over_current_warning', 'iput_over_current_warning', 'iput_insufficient', 'temp_over_temp_fault', 'temp_over_temp_warning']} + +led_command = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, + 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, + 'serial_led_enable': {'disable':'0', 'enable':'1'}} + +pre_fan_led_status = 0 +pre_pwr_led_status = 0 + +def dbg_print(string): + if DEBUG == True: + print string + return + +def do_cmd(cmd, show): + logging.info('Run :' + cmd) + status, output = commands.getstatusoutput(cmd) + dbg_print(cmd + "with result:" + str(status)) + dbg_print("output:" + output) + if status: + logging.info('Failed :' + cmd) + if show: + print('Failed :' + cmd) + return status, output + +def read_file(path): + try: + file = open(path) + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + value = int(file.readline().rstrip()) + file.close() + + return value + +def write_file(path, value): + try: + file = open(path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + return False + + file.seek(0) + file.write(str(value)) + file.close() + + return + +def check_platform_fan(): + global pre_fan_led_status + fan_result = 0 + fan_status_node = status_nodes['fan'] + fan_alert = status_alert['fan'] + fan_led = led_command['fan_led'] + + status, output = do_cmd("ls " + i2c_prefix, 1) + if output.find(fan_status_node[0]) != -1: + for num in range(0,FAN_NUM): + for alert_type in fan_alert: + path = i2c_prefix + fan_status_node[0] + "/fan" + str(num+1) + "_" + alert_type + fan_result += read_file(path) + + if fan_result != PASS: + if pre_fan_led_status != fan_led["blink_amber"]: + path = i2c_prefix + system_led_node + "/fan_led" + write_file(path, fan_led["blink_amber"]) + pre_fan_led_status = fan_led["blink_amber"] + syslog.syslog(syslog.LOG_ERR, 'FAN Status Error !!!') + return FAIL + + if pre_fan_led_status != fan_led["green"]: + path = i2c_prefix + system_led_node + "/fan_led" + write_file(path, fan_led["green"]) + pre_fan_led_status = fan_led["green"] + syslog.syslog(syslog.LOG_WARNING, 'FAN Status Normal !!!') + return PASS + +def check_platform_psu(): + global pre_pwr_led_status + psu_result = 0 + psu_status_node = status_nodes['psu'] + psu_alert = status_alert['psu'] + psu_led = led_command['pwr_led'] + + status, output = do_cmd("ls " + i2c_prefix, 1) + if output.find(psu_status_node[0]) != -1 and output.find(psu_status_node[1]) != -1: + for nodes in psu_status_node: + for alert_type in psu_alert: + path = i2c_prefix + nodes + "/" + alert_type + psu_result += read_file(path) + + if psu_result != PASS: + if pre_pwr_led_status != psu_led["blink_amber"]: + path = i2c_prefix + system_led_node + "/pwr_led" + write_file(path, psu_led["blink_amber"]) + pre_pwr_led_status = psu_led["blink_amber"] + syslog.syslog(syslog.LOG_ERR, 'PSU Status Error !!!') + return FAIL + + if pre_pwr_led_status != psu_led["green"]: + path = i2c_prefix + system_led_node + "/pwr_led" + write_file(path, psu_led["green"]) + pre_pwr_led_status = psu_led["green"] + syslog.syslog(syslog.LOG_WARNING, 'PSU Status Normal !!!') + return PASS + +def pega_check_platform_status(): + while(True): + total_result = 0 + if STOP == True: + return + total_result += check_platform_fan() + total_result += check_platform_psu() + time.sleep(1) + return + +def main(): + for arg in sys.argv[1:]: + if arg == 'run': + pega_check_platform_status() + elif arg == 'stop': + STOP = True + +if __name__ == "__main__": + main() diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py index 55e6114b11c8..fb108f12af60 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py @@ -20,6 +20,7 @@ import os import commands import threading +import time DEBUG = False @@ -29,12 +30,12 @@ CPLDB_SFP_NUM = 12 CPLDC_SFP_NUM = 18 -kernel_module = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_fn_6254_dn_f_cpld', 'pegatron_hwmon_mcu', 'pegatron_fn_6254_dn_f_sfp'] -moduleID = ['pca9544', 'pca9544', '24c02', 'pega_hwmon_mcu', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_sfpA', 'fn_6254_dn_f_sfpB', 'fn_6254_dn_f_sfpC'] +kernel_module = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_fn_6254_dn_f_cpld', 'pegatron_hwmon_mcu', 'pegatron_fn_6254_dn_f_psu', 'pegatron_fn_6254_dn_f_sfp', 'pegatron_fn_6254_dn_f_ixgbe'] +moduleID = ['pca9544', 'pca9544', 'fn_6254_dn_f_psu', 'fn_6254_dn_f_psu', '24c02', 'pega_hwmon_mcu', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_sfpA', 'fn_6254_dn_f_sfpB', 'fn_6254_dn_f_sfpC'] i2c_check_node = ['i2c-0', 'i2c-1'] uninstall_check_node = ['-0072', '-0073'] -device_address = ['0x72', '0x73', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] -device_node= ['i2c-2', 'i2c-6', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] +device_address = ['0x72', '0x73', '0x58', '0x59', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] +device_node= ['i2c-2', 'i2c-6', 'i2c-2', 'i2c-3', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] i2c_prefix = '/sys/bus/i2c/devices/' cpld_bus = ['6-0074', '7-0075', '8-0076'] @@ -58,7 +59,7 @@ def do_cmd(cmd, show): def install_driver(): status, output = do_cmd("depmod -a", 1) - + for i in range(0, len(kernel_module)): status, output = do_cmd("modprobe " + kernel_module[i], 1) if status: @@ -101,7 +102,6 @@ def do_install(): check_driver() install_device() - return def do_uninstall(): @@ -121,22 +121,24 @@ def do_uninstall(): return led_command = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, - 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, - 'serial_led_enable': {'disable':'0', 'enable':'1'}} + 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, + 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, + 'serial_led_enable': {'disable':'0', 'enable':'1'}} def set_led(args): """ - Usage: %(scriptName)s set led object command - - object: - sys_led : set SYS led [command: off|green|amber|blink_green|blink_amber] - pwr_led : set PWR led [command: off|green|amber|blink_green|blink_amber] - loc_led : set LOCATOR led [command: off|on|blink] - fan_led : set FAN led [command: off|green|amber|blink_green|blink_amber] + command: + sys_led : set SYS led [off | green | amber | blink_green | blink_amber] + pwr_led : set PWR led [off | green | amber | blink_green | blink_amber] + loc_led : set LOCATOR led [off | on | blink] + fan_led : set FAN led [off | green | amber | blink_green | blink_amber] """ + if len(args) < 1: + print set_led.__doc__ + sys.exit(0) + if args[0] not in led_command: print set_led.__doc__ sys.exit(0) @@ -154,15 +156,16 @@ def set_led(args): def set_device(args): """ - Usage: %(scriptName)s command object - command: - led : set status led sys_led|pwr_led|loc_led|mst_led|fan_led|digit_led + led : set status led """ + + if len(args[0:]) < 1: + print set_device.__doc__ + sys.exit(0) if args[0] == 'led': set_led(args[1:]) - return else: print set_device.__doc__ @@ -188,46 +191,75 @@ def pega_init(): dbg_print("SFP_TX_DISABLE NODES: " + nodes) status, output = do_cmd("echo 0 > "+ nodes, 1) + #set QSFP reset to normal for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): nodes = i2c_prefix + cpld_bus[2] + '/sfp' + str(x+1) + '_reset' dbg_print("SFP_RESET NODES: " + nodes) - status, output = do_cmd("echo 3 > "+ nodes, 1) + status, output = do_cmd("echo 1 > "+ nodes, 1) + + #set QSFP I2c enable + for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): + nodes = i2c_prefix + cpld_bus[2] + '/sfp' + str(x+1) + '_modeseln' + dbg_print("SFP_MODSEL NODES: " + nodes) + status, output = do_cmd("echo 0 > "+ nodes, 1) + return + +def pega_cmd(args): + """ + command: + locate : blink locate LED for searching + """ + + if len(args) < 1: + print pega_cmd.__doc__ + sys.exit(0) + if args[0] == 'locate': + set_led(['loc_led', 'blink']) + time.sleep(20) + set_led(['loc_led', 'off']) + else: + print pega_cmd.__doc__ + sys.exit(0) return def main(): """ - Usage: %(scriptName)s command object - command: - install : install drivers and generate related sysfs nodes - uninstall : uninstall drivers and remove related sysfs nodes - set : change board setting [led] - debug : debug info [on/off] + install : install drivers + uninstall : uninstall drivers + set : change board settings + cmd : do command + debug : show debug info [on/off] """ - - if len(sys.argv)<2: + + if len(sys.argv[1:]) < 1: print main.__doc__ + sys.exit(0) - for arg in sys.argv[1:]: - if arg == 'install': - do_install() - pega_init() - elif arg == 'uninstall': - do_uninstall() - elif arg == 'set': - if len(sys.argv[2:])<1: - print main.__doc__ - else: - set_device(sys.argv[2:]) - return - elif arg == 'debug': - if sys.argv[2] == 'on': - DEBUG = True - else: - DEBUG = False - else: + arg = sys.argv[1] + if arg == 'install': + do_install() + pega_init() + elif arg == 'uninstall': + do_uninstall() + elif arg == 'set': + set_device(sys.argv[2:]) + elif arg == 'cmd': + pega_cmd(sys.argv[2:]) + elif arg == 'debug': + if len(sys.argv[2:]) < 1: print main.__doc__ + sys.exit(0) + if sys.argv[2] == 'on': + DEBUG = True + else: + DEBUG = False + else: + print main.__doc__ + sys.exit(0) + + return if __name__ == "__main__": main() From 73efdfa25d6a7d7267847b63e45c8187cf154102 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Mon, 24 Jun 2019 16:55:26 +0800 Subject: [PATCH 15/20] change ixgbe driver from Linux 4.4.0-k to Intel 5.2.4 --- .../pegatron_fn_6254_dn_f_ixgbe/Makefile | 47 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe.h | 985 ++- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c | 703 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h | 43 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c | 1473 ++-- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h | 55 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c | 1624 ++++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h | 213 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c | 168 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h | 31 + .../ixgbe_common.c | 2756 ++++-- .../ixgbe_common.h | 159 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c | 610 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h | 228 +- .../ixgbe_dcb_82598.c | 178 +- .../ixgbe_dcb_82598.h | 137 +- .../ixgbe_dcb_82599.c | 469 +- .../ixgbe_dcb_82599.h | 139 +- .../ixgbe_dcb_nl.c | 423 +- .../ixgbe_debugfs.c | 25 +- .../ixgbe_ethtool.c | 2422 ++++-- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c | 347 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h | 29 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c | 210 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h | 51 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c | 473 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c | 7678 ++++++++++------- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c | 498 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h | 131 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h | 121 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h | 200 + .../ixgbe_osdep2.h | 68 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c | 1256 +++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c | 1699 ++-- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h | 194 +- .../ixgbe_procfs.c | 938 ++ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c | 348 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c | 841 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h | 67 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c | 129 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h | 5233 ++++++----- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c | 739 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h | 78 +- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c | 6272 ++++++++------ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h | 115 + .../pegatron_fn_6254_dn_f_ixgbe/kcompat.c | 2375 +++++ .../pegatron_fn_6254_dn_f_ixgbe/kcompat.h | 5610 ++++++++++++ .../kcompat_ethtool.c | 1169 +++ 48 files changed, 35482 insertions(+), 14275 deletions(-) create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep2.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_procfs.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.c create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.h create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat_ethtool.c diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/Makefile b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/Makefile index 2e4a507e6b1e..e759751a0fb3 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/Makefile +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/Makefile @@ -1,7 +1,7 @@ ################################################################################ # -# Intel 10 Gigabit PCI Express Linux driver -# Copyright(c) 1999 - 2013 Intel Corporation. +# Intel(R) 10GbE PCI Express Linux Network Driver +# Copyright(c) 1999 - 2017 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# # The full GNU General Public License is included in this distribution in # the file called "COPYING". # @@ -26,19 +22,38 @@ # ################################################################################ -# -# Makefile for the Intel(R) 10GbE PCI Express ethernet driver -# - obj-m += pegatron_fn_6254_dn_f_ixgbe.o -pegatron_fn_6254_dn_f_ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ - ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o +define pegatron_fn_6254_dn_f_ixgbe-y + ixgbe_main.o + ixgbe_api.o + ixgbe_common.o + ixgbe_dcb.o + ixgbe_dcb_82598.o + ixgbe_dcb_82599.o + ixgbe_ethtool.o + ixgbe_lib.o + ixgbe_mbx.o + ixgbe_sriov.o + ixgbe_param.o + ixgbe_phy.o + ixgbe_procfs.o + ixgbe_82598.o + ixgbe_82599.o + ixgbe_x540.o + ixgbe_x550.o +endef +pegatron_fn_6254_dn_f_ixgbe-y := $(strip ${pegatron_fn_6254_dn_f_ixgbe-y}) -pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_dcb.o ixgbe_dcb_82598.o \ - ixgbe_dcb_82599.o ixgbe_dcb_nl.o +pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_dcb_nl.o -pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_sysfs.o pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_debugfs.o + pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_fcoe.o + +pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_ptp.o + +pegatron_fn_6254_dn_f_ixgbe-y += ixgbe_sysfs.o + +pegatron_fn_6254_dn_f_ixgbe-y += kcompat.o + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h index 255ec3b9c021..33be88cc7eea 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,71 +25,151 @@ #ifndef _IXGBE_H_ #define _IXGBE_H_ -#include -#include +#include + #include #include -#include -#include -#include -#include - -#include -#include -#include +#include -#include "ixgbe_type.h" -#include "ixgbe_common.h" -#include "ixgbe_dcb.h" -#if IS_ENABLED(CONFIG_FCOE) -#define IXGBE_FCOE -#include "ixgbe_fcoe.h" -#endif /* IS_ENABLED(CONFIG_FCOE) */ -#ifdef CONFIG_IXGBE_DCA +#ifdef SIOCETHTOOL +#include +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) +#include +#endif +/* Can't use IS_ENABLED until after kcompat is loaded */ +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define IXGBE_DCA #include #endif +#include "ixgbe_dcb.h" -#include +#include "kcompat.h" #ifdef CONFIG_NET_RX_BUSY_POLL +#include +#ifdef HAVE_NDO_BUSY_POLL #define BP_EXTENDED_STATS #endif -/* common prefix used by pr_<> macros */ -#undef pr_fmt -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#endif /* CONFIG_NET_RX_BUSY_POLL */ -/* TX/RX descriptor defines */ -#define IXGBE_DEFAULT_TXD 512 -#define IXGBE_DEFAULT_TX_WORK 256 -#define IXGBE_MAX_TXD 4096 -#define IXGBE_MIN_TXD 64 +#ifdef HAVE_SCTP +#include +#endif -#if (PAGE_SIZE < 8192) -#define IXGBE_DEFAULT_RXD 512 -#else -#define IXGBE_DEFAULT_RXD 128 +#ifdef HAVE_INCLUDE_LINUX_MDIO_H +#include +#endif + +#if IS_ENABLED(CONFIG_FCOE) +#include "ixgbe_fcoe.h" +#endif /* CONFIG_FCOE */ + +#include "ixgbe_api.h" + +#include "ixgbe_common.h" + +#define PFX "ixgbe: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __func__ , ## args))) + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ +#include +#include +#include #endif -#define IXGBE_MAX_RXD 4096 -#define IXGBE_MIN_RXD 64 -#define IXGBE_ETH_P_LLDP 0x88CC +/* TX/RX descriptor defines */ +#define IXGBE_DEFAULT_TXD 512 +#define IXGBE_DEFAULT_TX_WORK 256 +#define IXGBE_MAX_TXD 4096 +#define IXGBE_MIN_TXD 64 + +#define IXGBE_DEFAULT_RXD 512 +#define IXGBE_DEFAULT_RX_WORK 256 +#define IXGBE_MAX_RXD 4096 +#define IXGBE_MIN_RXD 64 + +#define IXGBE_ETH_P_LLDP 0x88CC /* flow control */ -#define IXGBE_MIN_FCRTL 0x40 +#define IXGBE_MIN_FCRTL 0x40 #define IXGBE_MAX_FCRTL 0x7FF80 -#define IXGBE_MIN_FCRTH 0x600 +#define IXGBE_MIN_FCRTH 0x600 #define IXGBE_MAX_FCRTH 0x7FFF0 -#define IXGBE_DEFAULT_FCPAUSE 0xFFFF -#define IXGBE_MIN_FCPAUSE 0 -#define IXGBE_MAX_FCPAUSE 0xFFFF +#define IXGBE_DEFAULT_FCPAUSE 0xFFFF +#define IXGBE_MIN_FCPAUSE 0 +#define IXGBE_MAX_FCPAUSE 0xFFFF /* Supported Rx Buffer Sizes */ -#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ -#define IXGBE_RXBUFFER_1536 1536 -#define IXGBE_RXBUFFER_2K 2048 -#define IXGBE_RXBUFFER_3K 3072 -#define IXGBE_RXBUFFER_4K 4096 -#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define IXGBE_RXBUFFER_1536 1536 +#define IXGBE_RXBUFFER_2K 2048 +#define IXGBE_RXBUFFER_3K 3072 +#define IXGBE_RXBUFFER_4K 4096 +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#define IXGBE_RXBUFFER_7K 7168 +#define IXGBE_RXBUFFER_8K 8192 +#define IXGBE_RXBUFFER_15K 15360 +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ +#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ +#if (PAGE_SIZE < 8192) +#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN) +#define IXGBE_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K)) + +static inline int ixgbe_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int ixgbe_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (IXGBE_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = IXGBE_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return ixgbe_compute_pad(rx_buf_len); +} + +#define IXGBE_SKB_PAD ixgbe_skb_pad() +#else +#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif /* * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we @@ -103,11 +179,27 @@ * Since netdev_alloc_skb now allocates a page fragment we can use a value * of 256 and the resultant skb will have a truesize of 960 or less. */ -#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#ifdef HAVE_STRUCT_DMA_ATTRS +#define IXGBE_RX_DMA_ATTR NULL +#else +#define IXGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + +/* assume the kernel supports 8021p to avoid stripping vlan tags */ +#ifdef IXGBE_DISABLE_8021P_SUPPORT +#ifndef HAVE_8021P_SUPPORT +#define HAVE_8021P_SUPPORT +#endif +#endif /* IXGBE_DISABLE_8021P_SUPPORT */ + enum ixgbe_tx_flags { /* cmd_type flags */ IXGBE_TX_FLAGS_HW_VLAN = 0x01, @@ -127,41 +219,76 @@ enum ixgbe_tx_flags { /* VLAN info */ #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 -#define IXGBE_MAX_VF_MC_ENTRIES 30 -#define IXGBE_MAX_VF_FUNCTIONS 64 -#define IXGBE_MAX_VFTA_ENTRIES 128 -#define MAX_EMULATION_MAC_ADDRS 16 -#define IXGBE_MAX_PF_MACVLANS 15 -#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) -#define IXGBE_82599_VF_DEVICE_ID 0x10ED -#define IXGBE_X540_VF_DEVICE_ID 0x1515 +#define IXGBE_MAX_RX_DESC_POLL 10 + +#define IXGBE_MAX_VF_MC_ENTRIES 30 +#define IXGBE_MAX_VF_FUNCTIONS 64 +#define IXGBE_MAX_VFTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define IXGBE_MAX_PF_MACVLANS 15 +/* must account for pools assigned to VFs. */ +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#else +#define VMDQ_P(p) (p) +#endif + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = IXGBE_READ_REG(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ + u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } + +struct vf_stats { + u64 gprc; + u64 gorc; + u64 gptc; + u64 gotc; + u64 mprc; +}; struct vf_data_storage { struct pci_dev *vfdev; unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; bool clear_to_send; + struct vf_stats vfstats; + struct vf_stats last_vfstats; + struct vf_stats saved_rst_vfstats; bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; u8 spoofchk_enabled; +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN bool rss_query_enabled; +#endif u8 trusted; int xcast_mode; unsigned int vf_api; }; -enum ixgbevf_xcast_modes { - IXGBEVF_XCAST_MODE_NONE = 0, - IXGBEVF_XCAST_MODE_MULTI, - IXGBEVF_XCAST_MODE_ALLMULTI, -}; - struct vf_macvlans { struct list_head l; int vf; @@ -171,11 +298,17 @@ struct vf_macvlans { }; #define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR) +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ @@ -194,8 +327,15 @@ struct ixgbe_tx_buffer { struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +#endif }; struct ixgbe_queue_stats { @@ -224,26 +364,26 @@ struct ixgbe_rx_queue_stats { }; #define IXGBE_TS_HDR_LEN 8 - enum ixgbe_ring_state_t { +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + __IXGBE_RX_3K_BUFFER, + __IXGBE_RX_BUILD_SKB_ENABLED, +#endif + __IXGBE_RX_RSC_ENABLED, + __IXGBE_RX_CSUM_UDP_ZERO_ERR, +#if IS_ENABLED(CONFIG_FCOE) + __IXGBE_RX_FCOE, +#endif __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, - __IXGBE_RX_RSC_ENABLED, - __IXGBE_RX_CSUM_UDP_ZERO_ERR, - __IXGBE_RX_FCOE, -}; - -struct ixgbe_fwd_adapter { - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct net_device *netdev; - struct ixgbe_adapter *real_adapter; - unsigned int tx_base_queue; - unsigned int rx_base_queue; - int pool; }; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#define ring_uses_build_skb(ring) \ + test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) +#endif #define check_for_tx_hang(ring) \ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define set_check_for_tx_hang(ring) \ @@ -256,12 +396,15 @@ struct ixgbe_fwd_adapter { set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #define clear_ring_rsc_enabled(ring) \ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define netdev_ring(ring) (ring->netdev) +#define ring_queue_index(ring) (ring->queue_index) + + struct ixgbe_ring { struct ixgbe_ring *next; /* pointer to next ring in q_vector */ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ struct net_device *netdev; /* netdev ring belongs to */ struct device *dev; /* device for DMA mapping */ - struct ixgbe_fwd_adapter *l2_accel_priv; void *desc; /* descriptor ring memory */ union { struct ixgbe_tx_buffer *tx_buffer_info; @@ -283,10 +426,16 @@ struct ixgbe_ring { u16 next_to_use; u16 next_to_clean; +#ifdef HAVE_PTP_1588_CLOCK unsigned long last_rx_timestamp; +#endif union { +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + u16 rx_buf_len; +#else u16 next_to_alloc; +#endif struct { u8 atr_sample_rate; u8 atr_count; @@ -295,7 +444,9 @@ struct ixgbe_ring { u8 dcb_tc; struct ixgbe_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 struct u64_stats_sync syncp; +#endif union { struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_rx_queue_stats rx_stats; @@ -307,61 +458,68 @@ enum ixgbe_ring_f_enum { RING_F_VMDQ, /* SR-IOV uses the same ring feature */ RING_F_RSS, RING_F_FDIR, -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) RING_F_FCOE, -#endif /* IXGBE_FCOE */ - - RING_F_ARRAY_SIZE /* must be last in enum set */ +#endif /* CONFIG_FCOE */ + RING_F_ARRAY_SIZE /* must be last in enum set */ }; +#define IXGBE_MAX_DCB_INDICES 8 #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_RSS_INDICES_X550 63 #define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ -#define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define IXGBE_MAX_L2A_QUEUES 4 -#define IXGBE_BAD_L2A_QUEUE 3 -#define IXGBE_MAX_MACVLANS 31 -#define IXGBE_MAX_DCBMACVLANS 8 - +#define IXGBE_MAX_FDIR_INDICES 63 +#if IS_ENABLED(CONFIG_FCOE) +#define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#else +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#endif /* CONFIG_FCOE */ struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ u16 mask; /* Mask used for feature to ring mapping */ u16 offset; /* offset to start of feature */ -} ____cacheline_internodealigned_in_smp; +}; #define IXGBE_82599_VMDQ_8Q_MASK 0x78 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C #define IXGBE_82599_VMDQ_2Q_MASK 0x7E +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT /* * FCoE requires that all Rx buffers be over 2200 bytes in length. Since * this is twice the size of a half page we need to double the page order * for FCoE enabled Rx queues. */ -static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) +static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring __maybe_unused *ring) { -#ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : - IXGBE_RXBUFFER_3K; +#if MAX_SKB_FRAGS < 8 + return ALIGN(IXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + return IXGBE_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return IXGBE_MAX_2K_FRAME_BUILD_SKB; #endif return IXGBE_RXBUFFER_2K; +#endif } -static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring __maybe_unused *ring) { -#ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? 1 : 0; +#if (PAGE_SIZE < 8192) + if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + return 1; #endif return 0; } #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) +#endif struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ unsigned int total_bytes; /* total bytes processed this int */ @@ -375,39 +533,43 @@ struct ixgbe_ring_container { #define ixgbe_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ - ? 8 : 1) -#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS -/* MAX_Q_VECTORS of these are allocated, +/* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ struct ixgbe_q_vector { struct ixgbe_adapter *adapter; -#ifdef CONFIG_IXGBE_DCA - int cpu; /* CPU for DCA */ -#endif - u16 v_idx; /* index of q_vector within array, also used for - * finding the bit in EICR and friends that - * represents the vector for this ring */ - u16 itr; /* Interrupt throttle rate written to EITR */ + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ struct ixgbe_ring_container rx, tx; struct napi_struct napi; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT cpumask_t affinity_mask; +#endif int numa_node; struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; + bool netpoll_rx; -#ifdef CONFIG_NET_RX_BUSY_POLL +#ifdef HAVE_NDO_BUSY_POLL atomic_t state; -#endif /* CONFIG_NET_RX_BUSY_POLL */ +#endif /* HAVE_NDO_BUSY_POLL */ /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; -#ifdef CONFIG_NET_RX_BUSY_POLL +#ifdef HAVE_NDO_BUSY_POLL enum ixgbe_qv_state_t { IXGBE_QV_STATE_IDLE = 0, IXGBE_QV_STATE_NAPI, @@ -454,7 +616,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) IXGBE_QV_STATE_POLL); #ifdef BP_EXTENDED_STATS if (rc != IXGBE_QV_STATE_IDLE) - q_vector->rx.ring->stats.yields++; + q_vector->tx.ring->stats.yields++; #endif return rc == IXGBE_QV_STATE_IDLE; } @@ -483,44 +645,8 @@ static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) return rc == IXGBE_QV_STATE_IDLE; } -#else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) -{ -} - -static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) -{ - return true; -} - -static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) -{ - return true; -} - -#endif /* CONFIG_NET_RX_BUSY_POLL */ - -#ifdef CONFIG_IXGBE_HWMON +#endif /* HAVE_NDO_BUSY_POLL */ +#ifdef IXGBE_HWMON #define IXGBE_HWMON_TYPE_LOC 0 #define IXGBE_HWMON_TYPE_TEMP 1 @@ -535,13 +661,11 @@ struct hwmon_attr { }; struct hwmon_buff { - struct attribute_group group; - const struct attribute_group *groups[2]; - struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1]; - struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4]; + struct device *device; + struct hwmon_attr *hwmon_list; unsigned int n_hwmon; }; -#endif /* CONFIG_IXGBE_HWMON */ +#endif /* IXGBE_HWMON */ /* * microsecond values for various ITR rates shifted by 2 to fit itr register @@ -550,6 +674,7 @@ struct hwmon_buff { #define IXGBE_MIN_RSC_ITR 24 #define IXGBE_100K_ITR 40 #define IXGBE_20K_ITR 200 +#define IXGBE_16K_ITR 248 #define IXGBE_12K_ITR 336 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ @@ -559,6 +684,7 @@ static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); } +/* ixgbe_desc_unused - calculate if we have unused descriptors */ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) { u16 ntc = ring->next_to_clean; @@ -567,26 +693,25 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } -#define IXGBE_RX_DESC(R, i) \ +#define IXGBE_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBE_TX_DESC(R, i) \ +#define IXGBE_TX_DESC(R, i) \ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBE_TX_CTXTDESC(R, i) \ +#define IXGBE_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) -#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ -#ifdef IXGBE_FCOE -/* Use 3K as the baby jumbo frame size for FCoE */ -#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 -#endif /* IXGBE_FCOE */ +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 +#if IS_ENABLED(CONFIG_FCOE) +/* use 3K as the baby jumbo frame size for FCoE */ +#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +#endif /* CONFIG_FCOE */ -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR) +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) -#define MAX_MSIX_VECTORS_82599 64 -#define MAX_Q_VECTORS_82599 64 -#define MAX_MSIX_VECTORS_82598 18 -#define MAX_Q_VECTORS_82598 16 +#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64 +#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16 struct ixgbe_mac_addr { u8 addr[ETH_ALEN]; @@ -598,19 +723,38 @@ struct ixgbe_mac_addr { #define IXGBE_MAC_STATE_MODIFIED 0x2 #define IXGBE_MAC_STATE_IN_USE 0x4 -#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 -#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 +#ifdef IXGBE_PROCFS +struct ixgbe_therm_proc_data { + struct ixgbe_hw *hw; + struct ixgbe_thermal_diode_data *sensor_data; +}; + +#endif /* IXGBE_PROCFS */ +/* + * Only for array allocations in our adapter struct. On 82598, there will be + * unused entries in the array, but that's not a big deal. Also, in 82599, + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. This is different than 82598, which is limited to 16. + */ +#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599 +#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599 -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) /* default to trying for four seconds */ -#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) -#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ /* board specific private data structure */ struct ixgbe_adapter { +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */ +#else unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; @@ -621,63 +765,95 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_MSI_ENABLED BIT(1) -#define IXGBE_FLAG_MSIX_ENABLED BIT(3) -#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4) -#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5) -#define IXGBE_FLAG_RX_PS_ENABLED BIT(6) -#define IXGBE_FLAG_DCA_ENABLED BIT(8) -#define IXGBE_FLAG_DCA_CAPABLE BIT(9) -#define IXGBE_FLAG_IMIR_ENABLED BIT(10) -#define IXGBE_FLAG_MQ_CAPABLE BIT(11) -#define IXGBE_FLAG_DCB_ENABLED BIT(12) -#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13) -#define IXGBE_FLAG_VMDQ_ENABLED BIT(14) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15) -#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16) -#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19) -#define IXGBE_FLAG_FCOE_CAPABLE BIT(20) -#define IXGBE_FLAG_FCOE_ENABLED BIT(21) -#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) -#define IXGBE_FLAG_SRIOV_ENABLED BIT(23) -#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) -#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) -#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) -#define IXGBE_FLAG_DCB_CAPABLE BIT(27) -#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28) +#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#ifndef IXGBE_NO_LLI +#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4) +#endif + +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 6) +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 7) +#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 8) +#else +#define IXGBE_FLAG_DCA_ENABLED (u32)0 +#define IXGBE_FLAG_DCA_CAPABLE (u32)0 +#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0 +#endif +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#if IS_ENABLED(CONFIG_FCOE) +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 17) +#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 18) +#endif /* CONFIG_FCOE */ +#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define IXGBE_FLAG_MDD_ENABLED (u32)(1 << 29) +#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 30) +#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(31) + +/* preset defaults */ +#define IXGBE_FLAGS_82598_INIT (IXGBE_FLAG_MSI_CAPABLE | \ + IXGBE_FLAG_MSIX_CAPABLE | \ + IXGBE_FLAG_MQ_CAPABLE) + +#define IXGBE_FLAGS_82599_INIT (IXGBE_FLAGS_82598_INIT | \ + IXGBE_FLAG_SRIOV_CAPABLE) + +#define IXGBE_FLAGS_X540_INIT IXGBE_FLAGS_82599_INIT + +#define IXGBE_FLAGS_X550_INIT (IXGBE_FLAGS_82599_INIT | \ + IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE) u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE BIT(0) -#define IXGBE_FLAG2_RSC_ENABLED BIT(1) -#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2) -#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) -#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) -#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) -#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) -#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) -#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) -#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) -#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) -#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) -#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) -#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) -#define IXGBE_FLAG2_EEE_ENABLED BIT(15) -#define IXGBE_FLAG2_RX_LEGACY BIT(16) +#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) +#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 3) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 4) +#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 5) +#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 6) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 8) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 9) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 10) +#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) +#define IXGBE_FLAG2_EEE_CAPABLE (u32)(1 << 14) +#define IXGBE_FLAG2_EEE_ENABLED (u32)(1 << 15) +#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED (u32)(1 << 16) +#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 17) +#define IXGBE_FLAG2_VLAN_PROMISC (u32)(1 << 18) +#define IXGBE_FLAG2_RX_LEGACY (u32)(1 << 19) /* Tx fast path data */ int num_tx_queues; u16 tx_itr_setting; u16 tx_work_limit; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + __be16 vxlan_port; +#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + __be16 geneve_port; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + /* Rx fast path data */ int num_rx_queues; u16 rx_itr_setting; - - /* Port number used to identify VXLAN traffic */ - __be16 vxlan_port; - __be16 geneve_port; + u16 rx_work_limit; /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; @@ -688,8 +864,8 @@ struct ixgbe_adapter { /* RX */ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; - int num_rx_pools; /* == num_rx_queues in 82598 */ - int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ + int num_rx_pools; /* does not include pools assigned to VFs */ + int num_rx_queues_per_pool; u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; u64 rsc_total_count; @@ -698,37 +874,57 @@ struct ixgbe_adapter { u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; - struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; + struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - /* DCB parameters */ +#ifdef HAVE_DCBNL_IEEE struct ieee_pfc *ixgbe_ieee_pfc; struct ieee_ets *ixgbe_ieee_ets; +#endif struct ixgbe_dcb_config dcb_cfg; struct ixgbe_dcb_config temp_dcb_cfg; u8 dcb_set_bitmap; u8 dcbx_cap; +#ifndef HAVE_MQPRIO + u8 dcb_tc; +#endif enum ixgbe_fc_mode last_lfc_mode; int num_q_vectors; /* current number of q_vectors for device */ - int max_q_vectors; /* true count of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; struct msix_entry *msix_entries; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + +#ifdef ETHTOOL_TEST u32 test_icr; struct ixgbe_ring test_tx_ring; struct ixgbe_ring test_rx_ring; +#endif /* structs defined in ixgbe_hw.h */ struct ixgbe_hw hw; u16 msg_enable; struct ixgbe_hw_stats stats; - +#ifndef IXGBE_NO_LLI + u32 lli_port; + u32 lli_size; + u32 lli_etype; + u32 lli_vlan_pri; +#endif /* IXGBE_NO_LLI */ + + u32 *config_space; u64 tx_busy; unsigned int tx_ring_count; unsigned int rx_ring_count; u32 link_speed; bool link_up; + + bool cloud_mode; + unsigned long sfp_poll_time; unsigned long link_check_timeout; @@ -743,21 +939,27 @@ struct ixgbe_adapter { u32 atr_sample_rate; spinlock_t fdir_perfect_lock; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) struct ixgbe_fcoe fcoe; -#endif /* IXGBE_FCOE */ - u8 __iomem *io_addr; /* Mainly for iounmap use */ +#endif /* CONFIG_FCOE */ + u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 wol; + u16 bd_number; + +#ifdef HAVE_BRIDGE_ATTRIBS u16 bridge_mode; +#endif - u16 eeprom_verh; - u16 eeprom_verl; + char eeprom_id[32]; u16 eeprom_cap; - + bool netdev_registered; u32 interrupt_event; +#ifdef HAVE_ETHTOOL_SET_PHYS_ID u32 led_reg; +#endif +#ifdef HAVE_PTP_1588_CLOCK struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; struct work_struct ptp_tx_work; @@ -766,40 +968,46 @@ struct ixgbe_adapter { unsigned long ptp_tx_start; unsigned long last_overflow_check; unsigned long last_rx_ptp_check; - unsigned long last_rx_timestamp; spinlock_t tmreg_lock; struct cyclecounter hw_cc; struct timecounter hw_tc; u32 base_incval; u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; - void (*ptp_setup_sdp)(struct ixgbe_adapter *); + void (*ptp_setup_sdp) (struct ixgbe_adapter *); +#endif /* HAVE_PTP_1588_CLOCK */ - /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); unsigned int num_vfs; + unsigned int max_vfs; struct vf_data_storage *vfinfo; int vf_rate_link_speed; struct vf_macvlans vf_mvs; struct vf_macvlans *mv_list; - +#ifdef CONFIG_PCI_IOV u32 timer_event_accumulator; u32 vferr_refcount; +#endif struct ixgbe_mac_addr *mac_table; - struct kobject *info_kobj; -#ifdef CONFIG_IXGBE_HWMON - struct hwmon_buff *ixgbe_hwmon_buff; -#endif /* CONFIG_IXGBE_HWMON */ -#ifdef CONFIG_DEBUG_FS +#ifdef IXGBE_SYSFS +#ifdef IXGBE_HWMON + struct hwmon_buff ixgbe_hwmon_buff; +#endif /* IXGBE_HWMON */ +#else /* IXGBE_SYSFS */ +#ifdef IXGBE_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + u64 old_lsc; + struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS]; + struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS]; +#endif /* IXGBE_PROCFS */ +#endif /* IXGBE_SYSFS */ + +#ifdef HAVE_IXGBE_DEBUG_FS struct dentry *ixgbe_dbg_adapter; -#endif /*CONFIG_DEBUG_FS*/ - +#endif /*HAVE_IXGBE_DEBUG_FS*/ u8 default_up; - unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ - -#define IXGBE_MAX_LINK_HANDLE 10 - struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; - unsigned long tables; /* maximum number of RETA entries among all devices supported by ixgbe * driver: currently it's x550 device in non-SRIOV mode @@ -808,7 +1016,14 @@ struct ixgbe_adapter { u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ - u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; + u32 *rss_key; + +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + unsigned int indices; +#endif +#endif + bool need_crosstalk_fix; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -818,17 +1033,20 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) case ixgbe_mac_82599EB: case ixgbe_mac_X540: return IXGBE_MAX_RSS_INDICES; + break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: return IXGBE_MAX_RSS_INDICES_X550; + break; default: return 0; + break; } } struct ixgbe_fdir_filter { - struct hlist_node fdir_node; + struct hlist_node fdir_node; union ixgbe_atr_input filter; u16 sw_idx; u64 action; @@ -839,57 +1057,66 @@ enum ixgbe_state_t { __IXGBE_RESETTING, __IXGBE_DOWN, __IXGBE_DISABLED, - __IXGBE_REMOVING, + __IXGBE_REMOVE, __IXGBE_SERVICE_SCHED, __IXGBE_SERVICE_INITED, __IXGBE_IN_SFP_INIT, +#ifdef HAVE_PTP_1588_CLOCK __IXGBE_PTP_RUNNING, __IXGBE_PTP_TX_IN_PROGRESS, +#endif __IXGBE_RESET_REQUESTED, }; struct ixgbe_cb { +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT union { /* Union defining head/tail partner */ struct sk_buff *head; struct sk_buff *tail; }; +#endif dma_addr_t dma; - u16 append_cnt; - bool page_released; +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif + u16 append_cnt; /* number of skb's appended */ +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + bool page_released; +#endif }; #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) -enum ixgbe_boards { - board_82598, - board_82599, - board_X540, - board_X550, - board_X550EM_x, - board_x550em_x_fw, - board_x550em_a, - board_x550em_a_fw, -}; - -extern const struct ixgbe_info ixgbe_82598_info; -extern const struct ixgbe_info ixgbe_82599_info; -extern const struct ixgbe_info ixgbe_X540_info; -extern const struct ixgbe_info ixgbe_X550_info; -extern const struct ixgbe_info ixgbe_X550EM_x_info; -extern const struct ixgbe_info ixgbe_x550em_x_fw_info; -extern const struct ixgbe_info ixgbe_x550em_a_info; -extern const struct ixgbe_info ixgbe_x550em_a_fw_info; -#ifdef CONFIG_IXGBE_DCB -extern const struct dcbnl_rtnl_ops dcbnl_ops; -#endif +/* ESX ixgbe CIM IOCTL definition */ +#ifdef IXGBE_SYSFS +void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); +int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); +#endif /* IXGBE_SYSFS */ +#ifdef IXGBE_PROCFS +void ixgbe_procfs_exit(struct ixgbe_adapter *adapter); +int ixgbe_procfs_init(struct ixgbe_adapter *adapter); +int ixgbe_procfs_topdir_init(void); +void ixgbe_procfs_topdir_exit(void); +#endif /* IXGBE_PROCFS */ + +extern struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max); + +u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index); + +/* needed by ixgbe_main.c */ +int ixgbe_validate_mac_addr(u8 *mc_addr); +void ixgbe_check_options(struct ixgbe_adapter *adapter); +void ixgbe_assign_netdev_ops(struct net_device *netdev); + +/* needed by ixgbe_ethtool.c */ +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME extern char ixgbe_driver_name[]; +#else +extern const char ixgbe_driver_name[]; +#endif extern const char ixgbe_driver_version[]; -#ifdef IXGBE_FCOE -extern char ixgbe_default_device_descr[]; -#endif /* IXGBE_FCOE */ -int ixgbe_open(struct net_device *netdev); -int ixgbe_close(struct net_device *netdev); void ixgbe_up(struct ixgbe_adapter *adapter); void ixgbe_down(struct ixgbe_adapter *adapter); void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); @@ -899,108 +1126,130 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *); int ixgbe_setup_tx_resources(struct ixgbe_ring *); void ixgbe_free_rx_resources(struct ixgbe_ring *); void ixgbe_free_tx_resources(struct ixgbe_ring *); -void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); -void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); +void ixgbe_configure_rx_ring(struct ixgbe_adapter *, + struct ixgbe_ring *); +void ixgbe_configure_tx_ring(struct ixgbe_adapter *, + struct ixgbe_ring *); void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id); -#ifdef CONFIG_PCI_IOV -void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); -#endif -int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, - const u8 *addr, u16 queue); -int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, - const u8 *addr, u16 queue); -void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); +void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter); +void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter); void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, - struct ixgbe_ring *); +bool ixgbe_is_ixgbe(struct pci_dev *pcidev); +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, + struct ixgbe_adapter *, + struct ixgbe_ring *); void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, - struct ixgbe_tx_buffer *); + struct ixgbe_tx_buffer *); void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); -void ixgbe_write_eitr(struct ixgbe_q_vector *); -int ixgbe_poll(struct napi_struct *napi, int budget); -int ethtool_ioctl(struct ifreq *ifr); -s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue); -s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask); -s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id, u8 queue); -s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id); -void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - union ixgbe_atr_input *mask); -int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ixgbe_fdir_filter *input, - u16 sw_idx); -void ixgbe_set_rx_mode(struct net_device *netdev); -#ifdef CONFIG_IXGBE_DCB -void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); +void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *); +void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *); +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) +void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *, u32); #endif +void ixgbe_set_rx_mode(struct net_device *netdev); +int ixgbe_write_mc_addr_list(struct net_device *netdev); int ixgbe_setup_tc(struct net_device *dev, u8 tc); void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); void ixgbe_do_reset(struct net_device *netdev); -#ifdef CONFIG_IXGBE_HWMON -void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); -int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); -#endif /* CONFIG_IXGBE_HWMON */ -#ifdef IXGBE_FCOE +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector); +int ixgbe_poll(struct napi_struct *napi, int budget); +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *); +void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter); +void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter); +#ifdef ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr); +#endif + +#if IS_ENABLED(CONFIG_FCOE) void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); -int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - u8 *hdr_len); +int ixgbe_fso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len); int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); + struct scatterlist *sgl, unsigned int sgc); +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); + struct scatterlist *sgl, unsigned int sgc); +#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE int ixgbe_fcoe_enable(struct net_device *netdev); int ixgbe_fcoe_disable(struct net_device *netdev); -#ifdef CONFIG_IXGBE_DCB -u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); +#else +int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter); +void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter); +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_OPS_GETAPP +u8 ixgbe_fcoe_getapp(struct net_device *netdev); +#endif /* HAVE_DCBNL_OPS_GETAPP */ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); -#endif /* CONFIG_IXGBE_DCB */ -int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); -int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, - struct netdev_fcoe_hbainfo *info); +#endif /* CONFIG_DCB */ u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); -#endif /* IXGBE_FCOE */ -#ifdef CONFIG_DEBUG_FS +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +#endif +#endif /* CONFIG_FCOE */ + +#ifdef HAVE_IXGBE_DEBUG_FS void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); void ixgbe_dbg_init(void); void ixgbe_dbg_exit(void); -#else -static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} -static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} -static inline void ixgbe_dbg_init(void) {} -static inline void ixgbe_dbg_exit(void) {} -#endif /* CONFIG_DEBUG_FS */ +#endif /* HAVE_IXGBE_DEBUG_FS */ + +#if IS_ENABLED(CONFIG_BQL) || defined(HAVE_SKB_XMIT_MORE) static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); } +#endif + +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_IEEE +s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame); +#endif /* HAVE_DCBNL_IEEE */ +#endif /* CONFIG_DCB */ + +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); +void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring); +int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn); +void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 queue); +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, + const u8 *addr, u16 queue); +int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool); +void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); +#ifndef HAVE_VLAN_RX_REGISTER +void ixgbe_vlan_mode(struct net_device *, u32); +#else +#ifdef CONFIG_PCI_IOV +int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan); +#endif +#endif +#ifdef HAVE_PTP_1588_CLOCK void ixgbe_ptp_init(struct ixgbe_adapter *adapter); -void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); +void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); -void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); -void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); +void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter); +void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -1021,20 +1270,18 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, rx_ring->last_rx_timestamp = jiffies; } -int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); +int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); +#endif /* HAVE_PTP_1588_CLOCK */ #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); #endif - -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, - struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring); u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); +void ixgbe_store_key(struct ixgbe_adapter *adapter); void ixgbe_store_reta(struct ixgbe_adapter *adapter); -s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); + +void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c index 8a32eb7d47b9..8b7fc593427e 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,11 +22,10 @@ *******************************************************************************/ -#include -#include -#include - -#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_82598.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" #include "ixgbe_phy.h" #define IXGBE_82598_MAX_TX_QUEUES 32 @@ -38,14 +33,30 @@ #define IXGBE_82598_RAR_ENTRIES 16 #define IXGBE_82598_MC_TBL_SIZE 128 #define IXGBE_82598_VFT_TBL_SIZE 128 -#define IXGBE_82598_RX_PB_SIZE 512 +#define IXGBE_82598_RX_PB_SIZE 512 -static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); +STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete); +STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data); - +STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); +STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy); +STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); /** * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout * @hw: pointer to the HW structure @@ -56,14 +67,11 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, * increase the value to either 10ms to 250ms for capability version 1 config, * or 16ms to 55ms for version 2. **/ -static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) { u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); u16 pcie_devctl2; - if (ixgbe_removed(hw->hw_addr)) - return; - /* only take action if timeout value is defaulted to 0 */ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) goto out; @@ -82,31 +90,82 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) * directly in order to set the completion timeout value for * 16ms to 55ms */ - pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; - ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); + IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); } -static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) +/** + * ixgbe_init_ops_82598 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for 82598. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; - /* Call PHY identify routine to get the phy type */ - ixgbe_identify_phy_generic(hw); - - mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; - mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - return 0; + DEBUGFUNC("ixgbe_init_ops_82598"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_82598; + + /* MAC */ + mac->ops.start_hw = ixgbe_start_hw_82598; + mac->ops.reset_hw = ixgbe_reset_hw_82598; + mac->ops.get_media_type = ixgbe_get_media_type_82598; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_82598; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_82598; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; + mac->ops.set_vfta = ixgbe_set_vfta_82598; + mac->ops.set_vlvf = NULL; + mac->ops.clear_vfta = ixgbe_clear_vfta_82598; + + /* Flow Control */ + mac->ops.fc_enable = ixgbe_fc_enable_82598; + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* SFP+ Module */ + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; + + /* Link */ + mac->ops.check_link = ixgbe_check_mac_link_82598; + mac->ops.setup_link = ixgbe_setup_mac_link_82598; + mac->ops.flap_tx_laser = NULL; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; + mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = NULL; + + mac->ops.get_rtrup2tc = NULL; + + return ret_val; } /** @@ -114,54 +173,63 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Initialize any function pointers that were not able to be - * set during get_invariants because the PHY/SFP type was + * set during init_shared_code because the PHY/SFP type was * not known. Perform the SFP init if necessary. * **/ -static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; + s32 ret_val = IXGBE_SUCCESS; u16 list_offset, data_offset; + DEBUGFUNC("ixgbe_init_phy_ops_82598"); + /* Identify the PHY */ phy->ops.identify(hw); /* Overwrite the link function pointers if copper PHY */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.setup_link = ixgbe_setup_copper_link_82598; mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; + ixgbe_get_copper_link_capabilities_generic; } switch (hw->phy.type) { case ixgbe_phy_tn: - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + ixgbe_get_phy_firmware_version_tnx; break; case ixgbe_phy_nl: - phy->ops.reset = &ixgbe_reset_phy_nl; + phy->ops.reset = ixgbe_reset_phy_nl; /* Call SFP+ identify routine to get the SFP+ module type */ ret_val = phy->ops.identify_sfp(hw); - if (ret_val) - return ret_val; - if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + if (ret_val != IXGBE_SUCCESS) + goto out; + else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } /* Check to see if SFP+ module is supported */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, - &list_offset, - &data_offset); - if (ret_val) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } break; default: break; } - return 0; +out: + return ret_val; } /** @@ -169,22 +237,41 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function. - * Disables relaxed ordering for archs other than SPARC - * Then set pcie completion timeout + * Disables relaxed ordering Then set pcie completion timeout * **/ -static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) { - s32 ret_val; + u32 regval; + u32 i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82598"); ret_val = ixgbe_start_hw_generic(hw); if (ret_val) return ret_val; + /* Disable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + /* set the completion timeout for interface */ ixgbe_set_pcie_completion_timeout(hw); - return 0; + return ret_val; } /** @@ -195,12 +282,15 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) * * Determines the link capabilities by reading the AUTOC register. **/ -static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { + s32 status = IXGBE_SUCCESS; u32 autoc = 0; + DEBUGFUNC("ixgbe_get_link_capabilities_82598"); + /* * Determine link capabilities based on the stored value of AUTOC, * which represents EEPROM defaults. If AUTOC value has not been @@ -238,10 +328,11 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, break; default: - return IXGBE_ERR_LINK_SETUP; + status = IXGBE_ERR_LINK_SETUP; + break; } - return 0; + return status; } /** @@ -250,14 +341,18 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, * * Returns the media type (fiber, copper, backplane) **/ -static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) { + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82598"); + /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: - return ixgbe_media_type_copper; - + media_type = ixgbe_media_type_copper; + goto out; default: break; } @@ -267,27 +362,30 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82598_BX: /* Default device ID is mezzanine card KX/KX4 */ - return ixgbe_media_type_backplane; - + media_type = ixgbe_media_type_backplane; + break; case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598_DA_DUAL_PORT: case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: case IXGBE_DEV_ID_82598EB_XF_LR: case IXGBE_DEV_ID_82598EB_SFP_LOM: - return ixgbe_media_type_fiber; - + media_type = ixgbe_media_type_fiber; + break; case IXGBE_DEV_ID_82598EB_CX4: case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: - return ixgbe_media_type_cx4; - + media_type = ixgbe_media_type_cx4; + break; case IXGBE_DEV_ID_82598AT: case IXGBE_DEV_ID_82598AT2: - return ixgbe_media_type_copper; - + media_type = ixgbe_media_type_copper; + break; default: - return ixgbe_media_type_unknown; + media_type = ixgbe_media_type_unknown; + break; } +out: + return media_type; } /** @@ -296,8 +394,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) * * Enable flow control according to the current settings. **/ -static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) { + s32 ret_val = IXGBE_SUCCESS; u32 fctrl_reg; u32 rmcs_reg; u32 reg; @@ -306,18 +405,23 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) int i; bool link_up; + DEBUGFUNC("ixgbe_fc_enable_82598"); + /* Validate the water mark configuration */ - if (!hw->fc.pause_time) - return IXGBE_ERR_INVALID_LINK_SETTINGS; + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } /* Low water mark of zero causes XOFF floods */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { - hw_dbg(hw, "Invalid water mark configuration\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; } } } @@ -343,7 +447,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) } /* Negotiate the fc mode to use */ - hw->mac.ops.fc_autoneg(hw); + ixgbe_fc_autoneg(hw); /* Disable any previous flow control settings */ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); @@ -393,8 +497,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; } /* Set 802.3x based flow control settings. */ @@ -403,7 +509,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; @@ -419,13 +525,14 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - return 0; +out: + return ret_val; } /** @@ -435,13 +542,15 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ -static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; - s32 status = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_mac_link_82598"); /* Restart link */ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -459,17 +568,17 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; - msleep(100); + msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - hw_dbg(hw, "Autonegotiation did not complete.\n"); + DEBUGOUT("Autonegotiation did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ - msleep(50); + msec_delay(50); return status; } @@ -481,31 +590,32 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, * Function indicates success when phy link is available. If phy is not ready * within 5 seconds of MAC indicating link, the function returns error. **/ -static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) { u32 timeout; u16 an_reg; if (hw->device_id != IXGBE_DEV_ID_82598AT2) - return 0; + return IXGBE_SUCCESS; for (timeout = 0; timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { - hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); - if ((an_reg & MDIO_AN_STAT1_COMPLETE) && - (an_reg & MDIO_STAT1_LSTATUS)) + if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && + (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) break; - msleep(100); + msec_delay(100); } if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { - hw_dbg(hw, "Link was indicated but link is down\n"); + DEBUGOUT("Link was indicated but link is down\n"); return IXGBE_ERR_LINK_SETUP; } - return 0; + return IXGBE_SUCCESS; } /** @@ -517,7 +627,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) * * Reads the links register to determine if link is up and the current speed **/ -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { @@ -525,19 +635,21 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 i; u16 link_reg, adapt_comp_reg; + DEBUGFUNC("ixgbe_check_mac_link_82598"); + /* - * SERDES PHY requires us to read link status from register 0xC79F. - * Bit 0 set indicates link is up/ready; clear indicates link down. - * 0xC00C is read to check that the XAUI lanes are active. Bit 0 - * clear indicates active; set indicates inactive. + * SERDES PHY requires us to read link status from undocumented + * register 0xC79F. Bit 0 set indicates link is up/ready; clear + * indicates link down. OxC00C is read to check that the XAUI lanes + * are active. Bit 0 clear indicates active; set indicates inactive. */ if (hw->phy.type == ixgbe_phy_nl) { - hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); - hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); - hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, &adapt_comp_reg); if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) { *link_up = true; @@ -545,12 +657,12 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, } else { *link_up = false; } - msleep(100); + msec_delay(100); hw->phy.ops.read_reg(hw, 0xC79F, - MDIO_MMD_PMAPMD, + IXGBE_TWINAX_DEV, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, - MDIO_MMD_PMAPMD, + IXGBE_TWINAX_DEV, &adapt_comp_reg); } } else { @@ -560,20 +672,20 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, *link_up = false; } - if (!*link_up) - return 0; + if (*link_up == false) + goto out; } links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { if (links_reg & IXGBE_LINKS_UP) { *link_up = true; break; } else { *link_up = false; } - msleep(100); + msec_delay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { @@ -588,11 +700,12 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, else *speed = IXGBE_LINK_SPEED_1GB_FULL; - if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && - (ixgbe_validate_link_ready(hw) != 0)) + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && + (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) *link_up = false; - return 0; +out: + return IXGBE_SUCCESS; } /** @@ -603,22 +716,25 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, * * Set the link speed in the AUTOC register and restarts link. **/ -static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, +STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - bool autoneg = false; + bool autoneg = false; + s32 status = IXGBE_SUCCESS; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 autoc = curr_autoc; - u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc = curr_autoc; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + + DEBUGFUNC("ixgbe_setup_mac_link_82598"); /* Check to see if speed passed in is supported. */ - ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); + ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) - return IXGBE_ERR_LINK_SETUP; + status = IXGBE_ERR_LINK_SETUP; /* Set KX4/KX support according to speed requested */ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || @@ -632,11 +748,17 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } - /* Setup and restart the link based on the new values in - * ixgbe_hw This will write the AUTOC register based on the new - * stored values - */ - return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + if (status == IXGBE_SUCCESS) { + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + status = ixgbe_start_mac_link_82598(hw, + autoneg_wait_to_complete); + } + + return status; } @@ -648,12 +770,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, * * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ -static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { s32 status; + DEBUGFUNC("ixgbe_setup_copper_link_82598"); + /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); @@ -671,20 +795,22 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ -static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) { - s32 status; - s32 phy_status = 0; + s32 status = IXGBE_SUCCESS; + s32 phy_status = IXGBE_SUCCESS; u32 ctrl; u32 gheccr; u32 i; u32 autoc; u8 analog_val; + DEBUGFUNC("ixgbe_reset_hw_82598"); + /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto reset_hw_out; /* * Power up the Atlas Tx lanes if they are currently powered down. @@ -726,7 +852,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) /* Init PHY and function pointers, perform SFP setup */ phy_status = hw->phy.ops.init(hw); if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) - return phy_status; + goto reset_hw_out; if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) goto mac_reset_top; @@ -741,21 +867,20 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); - usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { + usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; - udelay(1); } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "Reset polling failed to complete.\n"); + DEBUGOUT("Reset polling failed to complete.\n"); } - msleep(50); + msec_delay(50); /* * Double resets are required for recovery from certain error @@ -768,7 +893,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) } gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); - gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6)); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* @@ -793,7 +918,8 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) */ hw->mac.ops.init_rx_addrs(hw); - if (phy_status) +reset_hw_out: + if (phy_status != IXGBE_SUCCESS) status = phy_status; return status; @@ -805,14 +931,16 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq set index **/ -static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_set_vmdq_82598"); + /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); + DEBUGOUT1("RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } @@ -820,7 +948,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) rar_high &= ~IXGBE_RAH_VIND_MASK; rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); - return 0; + return IXGBE_SUCCESS; } /** @@ -829,15 +957,16 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq clear index (not used in 82598, but elsewhere) **/ -static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; + UNREFERENCED_1PARAMETER(vmdq); /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); + DEBUGOUT1("RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } @@ -847,7 +976,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); } - return 0; + return IXGBE_SUCCESS; } /** @@ -860,14 +989,18 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * * Turn on/off specified VLAN in the VLAN filter table. **/ -static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool vlvf_bypass) +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) { u32 regindex; u32 bitindex; u32 bits; u32 vftabyte; + UNREFERENCED_1PARAMETER(vlvf_bypass); + + DEBUGFUNC("ixgbe_set_vfta_82598"); + if (vlan > 4095) return IXGBE_ERR_PARAM; @@ -890,13 +1023,13 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); if (vlan_on) /* Turn on this VLAN id */ - bits |= BIT(bitindex); + bits |= (1 << bitindex); else /* Turn off this VLAN id */ - bits &= ~BIT(bitindex); + bits &= ~(1 << bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); - return 0; + return IXGBE_SUCCESS; } /** @@ -905,11 +1038,13 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, * * Clears the VLAN filer table, and the VMDq index associated with the filter **/ -static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) { u32 offset; u32 vlanbyte; + DEBUGFUNC("ixgbe_clear_vfta_82598"); + for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); @@ -918,7 +1053,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 0); - return 0; + return IXGBE_SUCCESS; } /** @@ -929,18 +1064,20 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) * * Performs read operation to Atlas analog register specified. **/ -static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 atlas_ctl; + DEBUGFUNC("ixgbe_read_analog_reg8_82598"); + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); - udelay(10); + usec_delay(10); atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); *val = (u8)atlas_ctl; - return 0; + return IXGBE_SUCCESS; } /** @@ -951,16 +1088,18 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) * * Performs write operation to Atlas analog register specified. **/ -static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 atlas_ctl; + DEBUGFUNC("ixgbe_write_analog_reg8_82598"); + atlas_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); IXGBE_WRITE_FLUSH(hw); - udelay(10); + usec_delay(10); - return 0; + return IXGBE_SUCCESS; } /** @@ -970,60 +1109,62 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) * @byte_offset: byte offset to read from dev_addr * @eeprom_data: value read * - * Performs 8 byte read operation to SFP module's data over I2C interface. + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ -static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, +STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, u8 byte_offset, u8 *eeprom_data) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; u16 sfp_addr = 0; u16 sfp_data = 0; u16 sfp_stat = 0; u16 gssr; u32 i; + DEBUGFUNC("ixgbe_read_i2c_phy_82598"); + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) gssr = IXGBE_GSSR_PHY1_SM; else gssr = IXGBE_GSSR_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; if (hw->phy.type == ixgbe_phy_nl) { /* - * phy SDA/SCL registers are at addresses 0xC30A to - * 0xC30D. These registers are used to talk to the SFP+ + * NetLogic phy SDA/SCL registers are at addresses 0xC30A to + * 0xC30D. These registers are used to talk to the SFP+ * module's EEPROM through the SDA/SCL (I2C) interface. */ sfp_addr = (dev_addr << 8) + byte_offset; sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); hw->phy.ops.write_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, - MDIO_MMD_PMAPMD, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, sfp_addr); /* Poll status */ for (i = 0; i < 100; i++) { hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, - MDIO_MMD_PMAPMD, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_stat); sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) break; - usleep_range(10000, 20000); + msec_delay(10); } if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { - hw_dbg(hw, "EEPROM read did not pass.\n"); + DEBUGOUT("EEPROM read did not pass.\n"); status = IXGBE_ERR_SFP_NOT_PRESENT; goto out; } /* Read data */ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, - MDIO_MMD_PMAPMD, &sfp_data); + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); *eeprom_data = (u8)(sfp_data >> 8); } else { @@ -1043,8 +1184,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, * * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data) +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, byte_offset, eeprom_data); @@ -1058,13 +1199,115 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, * * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C **/ -static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data) +STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, byte_offset, sff8472_data); } +/** + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); + + hw->phy.ops.identify(hw); + + /* Copper PHY must be checked before AUTOC LMS to determine correct + * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + else + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else /* XAUI */ + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + break; + default: + break; + } + + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.identify_sfp(hw); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_da_cu: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_sfp_type_sr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case ixgbe_sfp_type_lr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + } + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case IXGBE_DEV_ID_82598EB_XF_LR: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + +out: + return physical_layer; +} + /** * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple * port devices. @@ -1073,12 +1316,14 @@ static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, * Calls common function and corrects issue with some single port devices * that enable LAN1 but not LAN0. **/ -static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; u16 pci_gen = 0; u16 pci_ctrl2 = 0; + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); + ixgbe_set_lan_id_multi_port_pcie(hw); /* check if LAN0 is disabled */ @@ -1104,11 +1349,12 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) * @headroom: reserve n KB of headroom * @strategy: packet buffer allocation strategy **/ -static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, +STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy) { u32 rxpktsize = IXGBE_RXPBSIZE_64KB; - u8 i = 0; + u8 i = 0; + UNREFERENCED_1PARAMETER(headroom); if (!num_pb) return; @@ -1136,85 +1382,18 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); } -static const struct ixgbe_mac_operations mac_ops_82598 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_82598, - .start_hw = &ixgbe_start_hw_82598, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_82598, - .enable_rx_dma = &ixgbe_enable_rx_dma_generic, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, - .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, - .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, - .setup_link = &ixgbe_setup_mac_link_82598, - .set_rxpba = &ixgbe_set_rxpba_82598, - .check_link = &ixgbe_check_mac_link_82598, - .get_link_capabilities = &ixgbe_get_link_capabilities_82598, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_82598, - .clear_vmdq = &ixgbe_clear_vmdq_82598, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_82598, - .set_vfta = &ixgbe_set_vfta_82598, - .fc_enable = &ixgbe_fc_enable_82598, - .setup_fc = ixgbe_setup_fc_generic, - .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = NULL, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, - .release_swfw_sync = &ixgbe_release_swfw_sync, - .init_swfw_sync = NULL, - .get_thermal_sensor_data = NULL, - .init_thermal_sensor_thresh = NULL, - .prot_autoc_read = &prot_autoc_read_generic, - .prot_autoc_write = &prot_autoc_write_generic, - .enable_rx = &ixgbe_enable_rx_generic, - .disable_rx = &ixgbe_disable_rx_generic, -}; - -static const struct ixgbe_eeprom_operations eeprom_ops_82598 = { - .init_params = &ixgbe_init_eeprom_params_generic, - .read = &ixgbe_read_eerd_generic, - .write = &ixgbe_write_eeprom_generic, - .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, - .read_buffer = &ixgbe_read_eerd_buffer_generic, - .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, - .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, - .update_checksum = &ixgbe_update_eeprom_checksum_generic, -}; - -static const struct ixgbe_phy_operations phy_ops_82598 = { - .identify = &ixgbe_identify_phy_generic, - .identify_sfp = &ixgbe_identify_module_generic, - .init = &ixgbe_init_phy_ops_82598, - .reset = &ixgbe_reset_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .read_reg_mdi = &ixgbe_read_phy_reg_mdi, - .write_reg_mdi = &ixgbe_write_phy_reg_mdi, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, - .check_overtemp = &ixgbe_tn_check_overtemp, -}; - -const struct ixgbe_info ixgbe_82598_info = { - .mac = ixgbe_mac_82598EB, - .get_invariants = &ixgbe_get_invariants_82598, - .mac_ops = &mac_ops_82598, - .eeprom_ops = &eeprom_ops_82598, - .phy_ops = &phy_ops_82598, - .mvals = ixgbe_mvals_8259X, -}; +/** + * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ixgbe_enable_rx_dma_82598"); + + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + + return IXGBE_SUCCESS; +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h new file mode 100644 index 000000000000..1e0c15a2776f --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h @@ -0,0 +1,43 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_82598_H_ +#define _IXGBE_82598_H_ + +u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool vlvf_bypass); +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval); +#endif /* _IXGBE_82598_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c index d602637ccc40..0164233b7b10 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,13 +22,11 @@ *******************************************************************************/ -#include -#include -#include - -#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_82599.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" #include "ixgbe_phy.h" -#include "ixgbe_mbx.h" #define IXGBE_82599_MAX_TX_QUEUES 128 #define IXGBE_82599_MAX_RX_QUEUES 128 @@ -41,64 +35,37 @@ #define IXGBE_82599_VFT_TBL_SIZE 128 #define IXGBE_82599_RX_PB_SIZE 512 -static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static void -ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed); -static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); -static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete); -static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, +STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); -static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); -static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); - -bool ixgbe_mng_enabled(struct ixgbe_hw *hw) -{ - u32 fwsm, manc, factps; - - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); - if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) - return false; - - manc = IXGBE_READ_REG(hw, IXGBE_MANC); - if (!(manc & IXGBE_MANC_RCV_TCO_EN)) - return false; - - factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); - if (factps & IXGBE_FACTPS_MNGCG) - return false; - - return true; -} - -static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data); +STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); + +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; - /* enable the laser control functions for SFP+ fiber + DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); + + /* + * enable the laser control functions for SFP+ fiber * and MNG not enabled */ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && !ixgbe_mng_enabled(hw)) { mac->ops.disable_tx_laser = - &ixgbe_disable_tx_laser_multispeed_fiber; + ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = - &ixgbe_enable_tx_laser_multispeed_fiber; - mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; + ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; + } else { mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; @@ -107,27 +74,96 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; mac->ops.set_rate_select_speed = ixgbe_set_hard_rate_select_speed; } else { - if ((mac->ops.get_media_type(hw) == - ixgbe_media_type_backplane) && - (hw->phy.smart_speed == ixgbe_smart_speed_auto || - hw->phy.smart_speed == ixgbe_smart_speed_on) && - !ixgbe_verify_lesm_fw_enabled_82599(hw)) - mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; - else - mac->ops.setup_link = &ixgbe_setup_mac_link_82599; + if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && + (hw->phy.smart_speed == ixgbe_smart_speed_auto || + hw->phy.smart_speed == ixgbe_smart_speed_on) && + !ixgbe_verify_lesm_fw_enabled_82599(hw)) { + mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; + } else { + mac->ops.setup_link = ixgbe_setup_mac_link_82599; + } } } -static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) { - s32 ret_val; + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; + u32 esdp; + + DEBUGFUNC("ixgbe_init_phy_ops_82599"); + + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = TRUE; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; + } + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + + /* If copper media, overwrite with copper function pointers */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = ixgbe_setup_copper_link_82599; + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + } + + /* Set necessary function pointers based on PHY type */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + ixgbe_get_phy_firmware_version_tnx; + break; + default: + break; + } +init_phy_ops_out: + return ret_val; +} + +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; u16 list_offset, data_offset, data_value; + DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); @@ -135,14 +171,16 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto setup_sfp_out; /* PHY config will finish before releasing the semaphore */ ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) - return IXGBE_ERR_SWFW_SYNC; + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto setup_sfp_out; + } if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) goto setup_sfp_err; @@ -155,12 +193,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - /* - * Delay obtaining semaphore again to allow FW access, - * semaphore_delay is in ms usleep_range needs us. + /* Delay obtaining semaphore again to allow FW access + * prot_autoc_write uses the semaphore too. */ - usleep_range(hw->eeprom.semaphore_delay * 1000, - hw->eeprom.semaphore_delay * 2000); + msec_delay(hw->eeprom.semaphore_delay); /* Restart DSP and set SFI mode */ ret_val = hw->mac.ops.prot_autoc_write(hw, @@ -168,23 +204,24 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) false); if (ret_val) { - hw_dbg(hw, " sfp module setup not complete\n"); - return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + DEBUGOUT("sfp module setup not complete\n"); + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + goto setup_sfp_out; } + } - return 0; +setup_sfp_out: + return ret_val; setup_sfp_err: /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - /* Delay obtaining semaphore again to allow FW access, - * semaphore_delay is in ms usleep_range needs us. - */ - usleep_range(hw->eeprom.semaphore_delay * 1000, - hw->eeprom.semaphore_delay * 2000); - hw_err(hw, "eeprom read at offset %d failed\n", data_offset); - return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + /* Delay obtaining semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", data_offset); + return IXGBE_ERR_PHY; } /** @@ -195,27 +232,25 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) * * For this part (82599) we need to wrap read-modify-writes with a possible * FW/SW lock. It is assumed this lock will be freed with the next - * prot_autoc_write_82599(). Note, that locked can only be true in cases - * where this function doesn't return an error. - **/ -static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, - u32 *reg_val) + * prot_autoc_write_82599(). + */ +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) { s32 ret_val; *locked = false; - /* If LESM is on then we need to hold the SW/FW semaphore. */ + /* If LESM is on then we need to hold the SW/FW semaphore. */ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) + if (ret_val != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; *locked = true; } *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); - return 0; + return IXGBE_SUCCESS; } /** @@ -223,14 +258,14 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, * @hw: pointer to hardware structure * @reg_val: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous proc_autoc_read_82599. + * previous proc_autoc_read_82599. * - * This part (82599) may need to hold a the SW/FW lock around all writes to + * This part (82599) may need to hold the SW/FW lock around all writes to * AUTOC. Likewise after a write we need to do a pipeline reset. - **/ -static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) + */ +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) { - s32 ret_val = 0; + s32 ret_val = IXGBE_SUCCESS; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) @@ -243,7 +278,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - if (ret_val) + if (ret_val != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; locked = true; @@ -262,79 +297,95 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) return ret_val; } -static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - ixgbe_init_mac_link_ops_82599(hw); - - mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; - mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - return 0; -} - /** - * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * ixgbe_init_ops_82599 - Inits func ptrs and MAC type * @hw: pointer to hardware structure * - * Initialize any function pointers that were not able to be - * set during get_invariants because the PHY/SFP type was - * not known. Perform the SFP init if necessary. - * + * Initialize the function pointers and assign the MAC type for 82599. + * Does not touch the hardware. **/ -static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) + +s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val; - u32 esdp; - if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { - /* Store flag indicating I2C bus access control unit. */ - hw->phy.qsfp_shared_i2c_bus = true; + DEBUGFUNC("ixgbe_init_ops_82599"); + + ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.identify = ixgbe_identify_phy_82599; + phy->ops.init = ixgbe_init_phy_ops_82599; + + /* MAC */ + mac->ops.reset_hw = ixgbe_reset_hw_82599; + mac->ops.get_media_type = ixgbe_get_media_type_82599; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_82599; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; + mac->ops.start_hw = ixgbe_start_hw_82599; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.prot_autoc_read = prot_autoc_read_82599; + mac->ops.prot_autoc_write = prot_autoc_write_82599; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; + mac->ops.check_link = ixgbe_check_mac_link_generic; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; + ixgbe_init_mac_link_ops_82599(hw); - /* Initialize access to QSFP+ I2C bus */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp |= IXGBE_ESDP_SDP0_DIR; - esdp &= ~IXGBE_ESDP_SDP1_DIR; - esdp &= ~IXGBE_ESDP_SDP0; - esdp &= ~IXGBE_ESDP_SDP0_NATIVE; - esdp &= ~IXGBE_ESDP_SDP1_NATIVE; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; - phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; - } + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); - /* Identify the PHY or SFP module */ - ret_val = phy->ops.identify(hw); + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; - /* Setup function pointers based on detected SFP module and speeds */ - ixgbe_init_mac_link_ops_82599(hw); + /* EEPROM */ + eeprom->ops.read = ixgbe_read_eeprom_82599; + eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; - /* If copper media, overwrite with copper function pointers */ - if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82599; - mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; - } + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; - /* Set necessary function pointers based on phy type */ - switch (hw->phy.type) { - case ixgbe_phy_tn: - phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - break; - default: - break; - } + mac->ops.get_thermal_sensor_data = + ixgbe_get_thermal_sensor_data_generic; + mac->ops.init_thermal_sensor_thresh = + ixgbe_init_thermal_sensor_thresh_generic; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; return ret_val; } @@ -347,13 +398,17 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) * * Determines the link capabilities by reading the AUTOC register. **/ -static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) { + s32 status = IXGBE_SUCCESS; u32 autoc = 0; - /* Determine 1G link capabilities off of SFP+ type */ + DEBUGFUNC("ixgbe_get_link_capabilities_82599"); + + + /* Check if 1G SFP module. */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || @@ -362,13 +417,13 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; - return 0; + goto out; } /* * Determine link capabilities based on the stored value of AUTOC, - * which represents EEPROM defaults. If AUTOC value has not been - * stored, use the current register value. + * which represents EEPROM defaults. If AUTOC value has not + * been stored, use the current register values. */ if (hw->mac.orig_link_settings_stored) autoc = hw->mac.orig_autoc; @@ -425,21 +480,26 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, break; default: - return IXGBE_ERR_LINK_SETUP; + status = IXGBE_ERR_LINK_SETUP; + goto out; + break; } if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; - /* QSFP must not enable auto-negotiation */ + /* QSFP must not enable full auto-negotiation + * Limited autoneg is enabled at 1G + */ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) *autoneg = false; else *autoneg = true; } - return 0; +out: + return status; } /** @@ -448,14 +508,18 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, * * Returns the media type (fiber, copper, backplane) **/ -static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) { + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82599"); + /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: - return ixgbe_media_type_copper; - + media_type = ixgbe_media_type_copper; + goto out; default: break; } @@ -468,46 +532,50 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ - return ixgbe_media_type_backplane; - + media_type = ixgbe_media_type_backplane; + break; case IXGBE_DEV_ID_82599_SFP: case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: - return ixgbe_media_type_fiber; - + media_type = ixgbe_media_type_fiber; + break; case IXGBE_DEV_ID_82599_CX4: - return ixgbe_media_type_cx4; - + media_type = ixgbe_media_type_cx4; + break; case IXGBE_DEV_ID_82599_T3_LOM: - return ixgbe_media_type_copper; - + media_type = ixgbe_media_type_copper; + break; case IXGBE_DEV_ID_82599_LS: - return ixgbe_media_type_fiber_lco; - + media_type = ixgbe_media_type_fiber_lco; + break; case IXGBE_DEV_ID_82599_QSFP_SF_QP: - return ixgbe_media_type_fiber_qsfp; - + media_type = ixgbe_media_type_fiber_qsfp; + break; default: - return ixgbe_media_type_unknown; + media_type = ixgbe_media_type_unknown; + break; } +out: + return media_type; } /** - * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 - * @hw: pointer to hardware structure + * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 + * @hw: pointer to hardware structure * - * Disables link, should be called during D3 power down sequence. + * Disables link during D3 power down sequence. * **/ -static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) +void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) { u32 autoc2_reg; u16 ee_ctrl_2 = 0; - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); + ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); if (!ixgbe_mng_present(hw) && !hw->wol_enabled && ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { @@ -525,20 +593,25 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ -static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; - s32 status = 0; + s32 status = IXGBE_SUCCESS; bool got_lock = false; + DEBUGFUNC("ixgbe_start_mac_link_82599"); + + /* reset_pipeline requires us to hold this lock as it writes to + * AUTOC. + */ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { status = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (status) - return status; + IXGBE_GSSR_MAC_CSR_SM); + if (status != IXGBE_SUCCESS) + goto out; got_lock = true; } @@ -563,18 +636,19 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; - msleep(100); + msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - hw_dbg(hw, "Autoneg did not complete.\n"); + DEBUGOUT("Autoneg did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ - msleep(50); + msec_delay(50); +out: return status; } @@ -586,7 +660,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, * PHY states. This includes selectively shutting down the Tx * laser on the PHY, effectively halting physical link. **/ -static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); @@ -594,11 +668,11 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) if (ixgbe_check_reset_blocked(hw)) return; - /* Disable tx laser; allow 100us to go dark per spec */ + /* Disable Tx laser; allow 100us to go dark per spec */ esdp_reg |= IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); - udelay(100); + usec_delay(100); } /** @@ -609,15 +683,15 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) * PHY states. This includes selectively turning on the Tx * laser on the PHY, effectively starting physical link. **/ -static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - /* Enable tx laser; allow 100ms to light up */ + /* Enable Tx laser; allow 100ms to light up */ esdp_reg &= ~IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); - msleep(100); + msec_delay(100); } /** @@ -627,13 +701,15 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) * When the driver changes the link speeds that it can support, * it sets autotry_restart to true to indicate that we need to * initiate a new autotry session with the link partner. To do - * so, we set the speed then disable and re-enable the tx laser, to + * so, we set the speed then disable and re-enable the Tx laser, to * alert the link partner that it also needs to restart autotry on its * end. This is consistent with true clause 37 autoneg, which also * involves a loss of signal. **/ -static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { + DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); + /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) return; @@ -646,14 +722,14 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) } /** - * ixgbe_set_hard_rate_select_speed - Set module link speed - * @hw: pointer to hardware structure - * @speed: link speed to set + * ixgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set * - * Set module link speed via RS0/RS1 rate select pins. + * Set module link speed via RS0/RS1 rate select pins. */ -static void -ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); @@ -666,7 +742,7 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) esdp_reg |= IXGBE_ESDP_SDP5_DIR; break; default: - hw_dbg(hw, "Invalid fixed module speed\n"); + DEBUGOUT("Invalid fixed module speed\n"); return; } @@ -682,16 +758,18 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) * * Implements the Intel SmartSpeed algorithm. **/ -static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; s32 i, j; bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); + /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; @@ -716,7 +794,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); - if (status != 0) + if (status != IXGBE_SUCCESS) goto out; /* @@ -726,12 +804,12 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, * Table 9 in the AN MAS. */ for (i = 0; i < 5; i++) { - mdelay(100); + msec_delay(100); /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) + status = ixgbe_check_link(hw, &link_speed, &link_up, + false); + if (status != IXGBE_SUCCESS) goto out; if (link_up) @@ -751,7 +829,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, hw->phy.smart_speed_active = true; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); - if (status != 0) + if (status != IXGBE_SUCCESS) goto out; /* @@ -761,12 +839,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, * connect attempts as defined in the AN MAS table 73-7. */ for (i = 0; i < 6; i++) { - mdelay(100); + msec_delay(100); /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) goto out; if (link_up) @@ -780,7 +857,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, out: if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) - hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n"); + DEBUGOUT("Smartspeed has downgraded the link speed " + "from the maximum advertised\n"); return status; } @@ -792,34 +870,35 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, * * Set the link speed in the AUTOC register and restarts link. **/ -static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { bool autoneg = false; - s32 status; - u32 pma_pmd_1g, link_mode, links_reg, i; + s32 status = IXGBE_SUCCESS; + u32 pma_pmd_1g, link_mode; + u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ + u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ + u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 links_reg; + u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - /* holds the value of AUTOC register at this current point in time */ - u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - /* holds the cached value of AUTOC register */ - u32 orig_autoc = 0; - /* temporary variable used for comparison purposes */ - u32 autoc = current_autoc; + DEBUGFUNC("ixgbe_setup_mac_link_82599"); /* Check to see if speed passed in is supported. */ - status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, - &autoneg); + status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); if (status) - return status; + goto out; speed &= link_capabilities; - if (speed == IXGBE_LINK_SPEED_UNKNOWN) - return IXGBE_ERR_LINK_SETUP; + if (speed == IXGBE_LINK_SPEED_UNKNOWN) { + status = IXGBE_ERR_LINK_SETUP; + goto out; + } /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) @@ -859,7 +938,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; - if (autoneg) + if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) autoc |= IXGBE_AUTOC_LMS_1G_AN; else autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; @@ -869,8 +948,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, if (autoc != current_autoc) { /* Restart link */ status = hw->mac.ops.prot_autoc_write(hw, autoc, false); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto out; /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { @@ -883,20 +962,21 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; - msleep(100); + msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - hw_dbg(hw, "Autoneg did not complete.\n"); + DEBUGOUT("Autoneg did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ - msleep(50); + msec_delay(50); } +out: return status; } @@ -908,12 +988,14 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, * * Restarts link on PHY and MAC based on settings passed in. **/ -static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, +STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status; + DEBUGFUNC("ixgbe_setup_copper_link_82599"); + /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); @@ -931,18 +1013,21 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; - u32 ctrl, i, autoc, autoc2; + u32 ctrl = 0; + u32 i, autoc, autoc2; u32 curr_lms; bool link_up = false; + DEBUGFUNC("ixgbe_reset_hw_82599"); + /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); @@ -953,7 +1038,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - return status; + goto reset_hw_out; /* Setup SFP module if there is one present. */ if (hw->phy.sfp_setup_needed) { @@ -962,7 +1047,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) } if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - return status; + goto reset_hw_out; /* Reset PHY */ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) @@ -973,7 +1058,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) mac_reset_top: /* - * Issue global reset to the MAC. Needs to be SW reset if link is up. + * Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when * mng is using it. If link is down or the flag to force full link * reset is set, then perform link reset. @@ -988,27 +1073,26 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); - usleep_range(1000, 1200); - /* Poll for reset bit to self-clear indicating reset is complete */ + /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { + usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; - udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "Reset polling failed to complete.\n"); + DEBUGOUT("Reset polling failed to complete.\n"); } - msleep(50); + msec_delay(50); /* * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to allow time - * for any pending HW events to complete. + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; @@ -1040,7 +1124,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) * doesn't autoneg with out driver support we need to * leave LMS in the state it was before we MAC reset. * Likewise if we support WoL we don't want change the - * LMS state either. + * LMS state. */ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || hw->wol_enabled) @@ -1052,8 +1136,8 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) status = hw->mac.ops.prot_autoc_write(hw, hw->mac.orig_autoc, false); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto reset_hw_out; } if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != @@ -1080,7 +1164,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (is_valid_ether_addr(hw->mac.san_addr)) { + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; @@ -1097,8 +1181,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, - &hw->mac.wwpn_prefix); + &hw->mac.wwpn_prefix); +reset_hw_out: return status; } @@ -1107,15 +1192,15 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @fdircmd: current value of FDIRCMD register */ -static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) { int i; for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) - return 0; - udelay(10); + return IXGBE_SUCCESS; + usec_delay(10); } return IXGBE_ERR_FDIR_CMD_INCOMPLETE; @@ -1127,20 +1212,21 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) **/ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { + s32 err; int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); u32 fdircmd; - s32 err; - fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); + /* * Before starting reinitialization process, * FDIRCMD.CMD must be zero. */ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { - hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n"); + DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); return err; } @@ -1176,10 +1262,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; - usleep_range(1000, 2000); + msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { - hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); + DEBUGOUT("Flow Director Signature poll time exceeded!\n"); return IXGBE_ERR_FDIR_REINIT_FAILED; } @@ -1190,7 +1276,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) IXGBE_READ_REG(hw, IXGBE_FDIRMISS); IXGBE_READ_REG(hw, IXGBE_FDIRLEN); - return 0; + return IXGBE_SUCCESS; } /** @@ -1198,10 +1284,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register **/ -static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) { int i; + DEBUGFUNC("ixgbe_fdir_enable_82599"); + /* Prime the keys for hashing */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); @@ -1225,21 +1313,23 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; - usleep_range(1000, 2000); + msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) - hw_dbg(hw, "Flow Director poll time exceeded!\n"); + DEBUGOUT("Flow Director poll time exceeded!\n"); } /** * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially - * contains just the value of the Rx packet buffer allocation + * contains just the value of the Rx packet buffer allocation **/ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) { + DEBUGFUNC("ixgbe_init_fdir_signature_82599"); + /* * Continue setup of fdirctrl register bits: * Move the flexible bytes to use the ethertype - shift 6 words @@ -1253,35 +1343,79 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially - * contains just the value of the Rx packet buffer allocation + * contains just the value of the Rx packet buffer allocation + * @cloud_mode: true - cloud mode, false - other mode **/ -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode) { + DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); + /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on - * Initialize the drop queue + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue to queue 127 * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + IXGBE_FDIRCTRL_REPORT_STATUS | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + if (cloud_mode) + fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << + IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); - return 0; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue + * @hw: pointer to hardware structure + * @dropqueue: Rx queue index used for the dropped packets + **/ +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) +{ + u32 fdirctrl; + + DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); + /* Clear init done bit and drop queue field */ + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); + + /* Set drop queue */ + fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + if ((hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_X550EM_a)) + fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); } /* @@ -1294,17 +1428,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ common_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ bucket_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ sig_hash ^= lo_hash_dword << (16 - n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ common_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ sig_hash ^= hi_hash_dword << (16 - n); \ } while (0) @@ -1313,22 +1447,22 @@ do { \ * @stream: input bitstream to compute the hash on * * This function is almost identical to the function above but contains - * several optomizations such as unwinding all of the loops, letting the + * several optimizations such as unwinding all of the loops, letting the * compiler work out all of the conditional ifs since the keys are static * defines, and computing two keys at once since the hashed dword stream * will be the same for both keys. **/ -static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common) +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = ntohl(input.dword); + flow_vm_vlan = IXGBE_NTOHL(input.dword); /* generate common hash dword */ - hi_hash_dword = ntohl(common.dword); + hi_hash_dword = IXGBE_NTOHL(common.dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); @@ -1342,7 +1476,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed + * so we do not add the VLAN until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); @@ -1384,19 +1518,22 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, * Note that the tunnel bit in input must not be set when the hardware * tunneling support does not exist. **/ -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue) +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) { u64 fdirhashcmd; u8 flow_type; bool tunnel; u32 fdircmd; + DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); + /* * Get the flow_type in order to program FDIRCMD properly * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER */ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); flow_type = input.formatted.flow_type & @@ -1410,8 +1547,8 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, case IXGBE_ATR_FLOW_TYPE_SCTPV6: break; default: - hw_dbg(hw, " Error on flow type input\n"); - return IXGBE_ERR_CONFIG; + DEBUGOUT(" Error on flow type input\n"); + return; } /* configure FDIRCMD register */ @@ -1430,17 +1567,17 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); - hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); - return 0; + return; } #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ bucket_hash ^= lo_hash_dword >> n; \ - if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ } while (0) @@ -1460,20 +1597,21 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 bucket_hash = 0, hi_dword = 0; - int i; + u32 bucket_hash = 0; + u32 hi_dword = 0; + u32 i = 0; /* Apply masks to input data */ - for (i = 0; i <= 10; i++) - input->dword_stream[i] &= input_mask->dword_stream[i]; + for (i = 0; i < 14; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = ntohl(input->dword_stream[0]); + flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); /* generate common hash dword */ - for (i = 1; i <= 10; i++) + for (i = 1; i <= 13; i++) hi_dword ^= input->dword_stream[i]; - hi_hash_dword = ntohl(hi_dword); + hi_hash_dword = IXGBE_NTOHL(hi_dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); @@ -1487,7 +1625,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed + * so we do not add the VLAN until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); @@ -1503,7 +1641,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, } /** - * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks + * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks * @input_mask: mask to be bit swapped * * The source and destination port masks for flow director are bit swapped @@ -1511,12 +1649,11 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, * generate a correctly swapped value we need to bit swap the mask and that * is what is accomplished by this function. **/ -static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) { - u32 mask = ntohs(input_mask->formatted.dst_port); - + u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; - mask |= ntohs(input_mask->formatted.src_port); + mask |= IXGBE_NTOHS(input_mask->formatted.src_port); mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); @@ -1535,17 +1672,19 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) #define IXGBE_WRITE_REG_BE32(a, reg, value) \ - IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) #define IXGBE_STORE_AS_BE16(_value) \ - ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask) + union ixgbe_atr_input *input_mask, bool cloud_mode) { /* mask IPv6 since it is currently not supported */ u32 fdirm = IXGBE_FDIRM_DIPv6; u32 fdirtcpm; + u32 fdirip6m; + DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); /* * Program the relevant mask registers. If src/dst_port or src/dst_addr @@ -1559,7 +1698,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, /* verify bucket hash is cleared on hash generation */ if (input_mask->formatted.bkt_hash) - hw_dbg(hw, " bucket hash should always be 0 in mask\n"); + DEBUGOUT(" bucket hash should always be 0 in mask\n"); /* Program FDIRM and verify partial masks */ switch (input_mask->formatted.vm_pool & 0x7F) { @@ -1568,7 +1707,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, case 0x7F: break; default: - hw_dbg(hw, " Error on vm pool mask\n"); + DEBUGOUT(" Error on vm pool mask\n"); return IXGBE_ERR_CONFIG; } @@ -1577,17 +1716,17 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, fdirm |= IXGBE_FDIRM_L4P; if (input_mask->formatted.dst_port || input_mask->formatted.src_port) { - hw_dbg(hw, " Error on src/dst port mask\n"); + DEBUGOUT(" Error on src/dst port mask\n"); return IXGBE_ERR_CONFIG; } case IXGBE_ATR_L4TYPE_MASK: break; default: - hw_dbg(hw, " Error on flow type mask\n"); + DEBUGOUT(" Error on flow type mask\n"); return IXGBE_ERR_CONFIG; } - switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { + switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { case 0x0000: /* mask VLAN ID */ fdirm |= IXGBE_FDIRM_VLANID; @@ -1604,7 +1743,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, /* no VLAN fields masked */ break; default: - hw_dbg(hw, " Error on VLAN mask\n"); + DEBUGOUT(" Error on VLAN mask\n"); return IXGBE_ERR_CONFIG; } @@ -1616,73 +1755,160 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, case 0xFFFF: break; default: - hw_dbg(hw, " Error on flexible byte mask\n"); + DEBUGOUT(" Error on flexible byte mask\n"); return IXGBE_ERR_CONFIG; } - /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + if (cloud_mode) { + fdirm |= IXGBE_FDIRM_L3P; + fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); + fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; - /* store the TCP/UDP port masks, bit reversed from port layout */ - fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + switch (input_mask->formatted.inner_mac[0] & 0xFF) { + case 0x00: + /* Mask inner MAC, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; + case 0xFF: + break; + default: + DEBUGOUT(" Error on inner_mac byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { + case 0x0: + /* Mask vxlan id */ + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; + break; + case 0x00FFFFFF: + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; + break; + case 0xFFFFFFFF: + break; + default: + DEBUGOUT(" Error on TNI/VNI byte mask\n"); + return IXGBE_ERR_CONFIG; + } - /* write both the same so that UDP and TCP use the same mask */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + switch (input_mask->formatted.tunnel_type & 0xFFFF) { + case 0x0: + /* Mask turnnel type, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on tunnel type byte mask\n"); + return IXGBE_ERR_CONFIG; + } + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); - /* also use it for SCTP */ - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); - break; - default: - break; + /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, + * FDIRSIP4M and FDIRDIP4M in cloud mode to allow + * L3/L3 packets to tunnel. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); + break; + default: + break; + } } - /* store source and destination IP masks (big-enian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, - ~input_mask->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, - ~input_mask->formatted.dst_ip[0]); + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + if (!cloud_mode) { + /* store the TCP/UDP port masks, bit reversed from port + * layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* also use it for SCTP */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + break; + default: + break; + } - return 0; + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + } + return IXGBE_SUCCESS; } s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, - u16 soft_id, u8 queue) + u16 soft_id, u8 queue, bool cloud_mode) { u32 fdirport, fdirvlan, fdirhash, fdircmd; + u32 addr_low, addr_high; + u32 cloud_type = 0; s32 err; - /* currently IPv6 is not supported, must be programmed with 0 */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), - input->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), - input->formatted.src_ip[1]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), - input->formatted.src_ip[2]); - - /* record the source address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); - - /* record the first 32 bits of the destination address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); - - /* record source and destination port (little-endian)*/ - fdirport = ntohs(input->formatted.dst_port); - fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; - fdirport |= ntohs(input->formatted.src_port); - IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); + if (!cloud_mode) { + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, + input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address + * (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, + input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + } - /* record vlan (little-endian) and flex_bytes(big-endian) */ + /* record VLAN (little-endian) and flex_bytes(big-endian) */ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; - fdirvlan |= ntohs(input->formatted.vlan_id); + fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + if (cloud_mode) { + if (input->formatted.tunnel_type != 0) + cloud_type = 0x80000000; + + addr_low = ((u32)input->formatted.inner_mac[0] | + ((u32)input->formatted.inner_mac[1] << 8) | + ((u32)input->formatted.inner_mac[2] << 16) | + ((u32)input->formatted.inner_mac[3] << 24)); + addr_high = ((u32)input->formatted.inner_mac[4] | + ((u32)input->formatted.inner_mac[5] << 8)); + cloud_type |= addr_high; + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); + } + /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; @@ -1699,6 +1925,8 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; if (queue == IXGBE_FDIR_DROP_QUEUE) fdircmd |= IXGBE_FDIRCMD_DROP; + if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; @@ -1706,11 +1934,11 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { - hw_dbg(hw, "Flow Director command did not complete!\n"); + DEBUGOUT("Flow Director command did not complete!\n"); return err; } - return 0; + return IXGBE_SUCCESS; } s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, @@ -1734,7 +1962,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { - hw_dbg(hw, "Flow Director command did not complete!\n"); + DEBUGOUT("Flow Director command did not complete!\n"); return err; } @@ -1746,7 +1974,72 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } - return 0; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter + * @hw: pointer to hardware structure + * @input: input bitstream + * @input_mask: mask for the input bitstream + * @soft_id: software index for the filters + * @queue: queue index to direct traffic to + * + * Note that the caller to this function must lock before calling, since the + * hardware writes must be protected from one another. + **/ +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask, + u16 soft_id, u8 queue, bool cloud_mode) +{ + s32 err = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); + + /* + * Check flow_type formatting, and bail out before we touch the hardware + * if there's a configuration issue + */ + switch (input->formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_IPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; + if (input->formatted.dst_port || input->formatted.src_port) { + DEBUGOUT(" Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: + if (input->formatted.dst_port || input->formatted.src_port) { + DEBUGOUT(" Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + /* fall through */ + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return err; + } + + /* program input mask into the HW */ + err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); + if (err) + return err; + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(input, input_mask); + + /* program filters to filter memory */ + return ixgbe_fdir_write_perfect_filter_82599(hw, input, + soft_id, queue, cloud_mode); } /** @@ -1757,18 +2050,20 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, * * Performs read operation to Omer analog register specified. **/ -static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 core_ctl; + DEBUGFUNC("ixgbe_read_analog_reg8_82599"); + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); - udelay(10); + usec_delay(10); core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); *val = (u8)core_ctl; - return 0; + return IXGBE_SUCCESS; } /** @@ -1779,16 +2074,18 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) * * Performs write operation to Omer analog register specified. **/ -static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 core_ctl; + DEBUGFUNC("ixgbe_write_analog_reg8_82599"); + core_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); IXGBE_WRITE_FLUSH(hw); - udelay(10); + usec_delay(10); - return 0; + return IXGBE_SUCCESS; } /** @@ -1799,22 +2096,27 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ -static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) { - s32 ret_val = 0; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82599"); ret_val = ixgbe_start_hw_generic(hw); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; ret_val = ixgbe_start_hw_gen2(hw); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; /* We need to run link autotry after the driver loads */ hw->mac.autotry_restart = true; - return ixgbe_verify_fw_version_82599(hw); + if (ret_val == IXGBE_SUCCESS) + ret_val = ixgbe_verify_fw_version_82599(hw); +out: + return ret_val; } /** @@ -1825,23 +2127,26 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) * If PHY already detected, maintains current PHY type in hw struct, * otherwise executes the PHY detection routine. **/ -static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) { s32 status; + DEBUGFUNC("ixgbe_identify_phy_82599"); + /* Detect PHY if not unknown - returns success if already detected. */ status = ixgbe_identify_phy_generic(hw); - if (status) { + if (status != IXGBE_SUCCESS) { /* 82599 10GBASE-T requires an external PHY */ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) return status; - status = ixgbe_identify_module_generic(hw); + else + status = ixgbe_identify_module_generic(hw); } /* Set PHY type none if no PHY detected */ if (hw->phy.type == ixgbe_phy_unknown) { hw->phy.type = ixgbe_phy_none; - status = 0; + return IXGBE_SUCCESS; } /* Return error if SFP module has been detected but is not supported */ @@ -1851,6 +2156,93 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | + IXGBE_PHYSICAL_LAYER_1000BASE_BX; + goto out; + } else + /* SFI mode so read SFP module */ + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; + goto out; + break; + case IXGBE_AUTOC_LMS_10G_SERIAL: + if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + if (autoc & IXGBE_AUTOC_KR_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + break; + default: + goto out; + break; + } + +sfp_check: + /* SFP check must be done last since DA modules are sometimes used to + * test KR mode - we need to id KR mode correctly before SFP module. + * Call identify_sfp because the pluggable module may have changed */ + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); +out: + return physical_layer; +} + /** * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 * @hw: pointer to hardware structure @@ -1858,28 +2250,32 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) * * Enables the Rx DMA unit for 82599 **/ -static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) { + + DEBUGFUNC("ixgbe_enable_rx_dma_82599"); + /* * Workaround for 82599 silicon errata when enabling the Rx datapath. * If traffic is incoming before we enable the Rx unit, it could hang * the Rx DMA unit. Therefore, make sure the security engine is * completely disabled prior to enabling the Rx unit. */ - hw->mac.ops.disable_rx_buff(hw); + + hw->mac.ops.disable_sec_rx_path(hw); if (regval & IXGBE_RXCTRL_RXEN) - hw->mac.ops.enable_rx(hw); + ixgbe_enable_rx(hw); else - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); - hw->mac.ops.enable_rx_buff(hw); + hw->mac.ops.enable_sec_rx_path(hw); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_verify_fw_version_82599 - verify fw version for 82599 + * ixgbe_verify_fw_version_82599 - verify FW version for 82599 * @hw: pointer to hardware structure * * Verifies that installed the firmware version is 0.6 or higher @@ -1888,46 +2284,58 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or * if the FW version is not supported. **/ -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM_VERSION; u16 fw_offset, fw_ptp_cfg_offset; - u16 offset; - u16 fw_version = 0; + u16 fw_version; + + DEBUGFUNC("ixgbe_verify_fw_version_82599"); /* firmware check is only necessary for SFI devices */ - if (hw->phy.media_type != ixgbe_media_type_fiber) - return 0; + if (hw->phy.media_type != ixgbe_media_type_fiber) { + status = IXGBE_SUCCESS; + goto fw_version_out; + } /* get the offset to the Firmware Module block */ - offset = IXGBE_FW_PTR; - if (hw->eeprom.ops.read(hw, offset, &fw_offset)) - goto fw_version_err; - - if (fw_offset == 0 || fw_offset == 0xFFFF) + if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", IXGBE_FW_PTR); return IXGBE_ERR_EEPROM_VERSION; + } - /* get the offset to the Pass Through Patch Configuration block */ - offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; - if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) - goto fw_version_err; + if ((fw_offset == 0) || (fw_offset == 0xFFFF)) + goto fw_version_out; - if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF) + /* get the offset to the Pass Through Patch Configuration block */ + if (hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), + &fw_ptp_cfg_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); return IXGBE_ERR_EEPROM_VERSION; + } + + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) + goto fw_version_out; /* get the firmware version */ - offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; - if (hw->eeprom.ops.read(hw, offset, &fw_version)) - goto fw_version_err; + if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + + IXGBE_FW_PATCH_VERSION_4), &fw_version)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); + return IXGBE_ERR_EEPROM_VERSION; + } if (fw_version > 0x5) - status = 0; + status = IXGBE_SUCCESS; +fw_version_out: return status; - -fw_version_err: - hw_err(hw, "eeprom read at offset %d failed\n", offset); - return IXGBE_ERR_EEPROM_VERSION; } /** @@ -1937,35 +2345,41 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) * Returns true if the LESM FW module is present and enabled. Otherwise * returns false. Smart Speed must be disabled if LESM FW module is enabled. **/ -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { + bool lesm_enabled = false; u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; s32 status; + DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); + /* get the offset to the Firmware Module block */ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); - if (status || fw_offset == 0 || fw_offset == 0xFFFF) - return false; + if ((status != IXGBE_SUCCESS) || + (fw_offset == 0) || (fw_offset == 0xFFFF)) + goto out; /* get the offset to the LESM Parameters block */ status = hw->eeprom.ops.read(hw, (fw_offset + IXGBE_FW_LESM_PARAMETERS_PTR), &fw_lesm_param_offset); - if (status || - fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF) - return false; + if ((status != IXGBE_SUCCESS) || + (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) + goto out; - /* get the lesm state word */ + /* get the LESM state word */ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + IXGBE_FW_LESM_STATE_1), &fw_lesm_state); - if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) - return true; + if ((status == IXGBE_SUCCESS) && + (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + lesm_enabled = true; - return false; +out: + return lesm_enabled; } /** @@ -1979,20 +2393,28 @@ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) * * Retrieves 16 bit word(s) read from EEPROM **/ -static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, +STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; - /* If EEPROM is detected and can be addressed using 14 bits, + DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); + + /* + * If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ - if (eeprom->type == ixgbe_eeprom_spi && - offset + (words - 1) <= IXGBE_EERD_MAX_ADDR) - return ixgbe_read_eerd_buffer_generic(hw, offset, words, data); - - return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words, + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); + else + ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, + words, + data); + + return ret_val; } /** @@ -2005,31 +2427,36 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, * * Reads a 16 bit word from the EEPROM **/ -static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, +STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, u16 offset, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_read_eeprom_82599"); /* * If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ - if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR) - return ixgbe_read_eerd_generic(hw, offset, data); + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_generic(hw, offset, data); + else + ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); - return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); + return ret_val; } /** * ixgbe_reset_pipeline_82599 - perform pipeline reset * - * @hw: pointer to hardware structure + * @hw: pointer to hardware structure * * Reset pipeline by asserting Restart_AN together with LMS change to ensure - * full pipeline reset. Note - We must hold the SW/FW semaphore before writing - * to AUTOC, so this function assumes the semaphore is held. + * full pipeline reset. This function assumes the SW/FW lock is held. **/ -static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) { s32 ret_val; u32 anlp1_reg = 0; @@ -2045,26 +2472,24 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; - /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); - /* Wait for AN to leave state 0 */ for (i = 0; i < 10; i++) { - usleep_range(4000, 8000); + msec_delay(4); anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) break; } if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { - hw_dbg(hw, "auto negotiation not completed\n"); + DEBUGOUT("auto negotiation not completed\n"); ret_val = IXGBE_ERR_RESET_FAILED; goto reset_pipeline_out; } - ret_val = 0; + ret_val = IXGBE_SUCCESS; reset_pipeline_out: /* Write AUTOC register with original LMS field and Restart_AN */ @@ -2083,14 +2508,16 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ -static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) +STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) { u32 esdp; s32 status; s32 timeout = 200; - if (hw->phy.qsfp_shared_i2c_bus == true) { + DEBUGFUNC("ixgbe_read_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { /* Acquire I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0; @@ -2102,12 +2529,13 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, if (esdp & IXGBE_ESDP_SDP1) break; - usleep_range(5000, 10000); + msec_delay(5); timeout--; } if (!timeout) { - hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + DEBUGOUT("Driver can't access resource," + " acquiring I2C bus timeout.\n"); status = IXGBE_ERR_I2C; goto release_i2c_access; } @@ -2116,7 +2544,8 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); release_i2c_access: - if (hw->phy.qsfp_shared_i2c_bus == true) { + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { /* Release I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp &= ~IXGBE_ESDP_SDP0; @@ -2136,14 +2565,16 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ -static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) +STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) { u32 esdp; s32 status; s32 timeout = 200; - if (hw->phy.qsfp_shared_i2c_bus == true) { + DEBUGFUNC("ixgbe_write_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { /* Acquire I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0; @@ -2155,12 +2586,13 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, if (esdp & IXGBE_ESDP_SDP1) break; - usleep_range(5000, 10000); + msec_delay(5); timeout--; } if (!timeout) { - hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + DEBUGOUT("Driver can't access resource," + " acquiring I2C bus timeout.\n"); status = IXGBE_ERR_I2C; goto release_i2c_access; } @@ -2169,7 +2601,8 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); release_i2c_access: - if (hw->phy.qsfp_shared_i2c_bus == true) { + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { /* Release I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp &= ~IXGBE_ESDP_SDP0; @@ -2179,99 +2612,3 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, return status; } - -static const struct ixgbe_mac_operations mac_ops_82599 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_82599, - .start_hw = &ixgbe_start_hw_82599, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_82599, - .enable_rx_dma = &ixgbe_enable_rx_dma_82599, - .disable_rx_buff = &ixgbe_disable_rx_buff_generic, - .enable_rx_buff = &ixgbe_enable_rx_buff_generic, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, - .get_device_caps = &ixgbe_get_device_caps_generic, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, - .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, - .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, - .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599, - .setup_link = &ixgbe_setup_mac_link_82599, - .set_rxpba = &ixgbe_set_rxpba_generic, - .check_link = &ixgbe_check_mac_link_generic, - .get_link_capabilities = &ixgbe_get_link_capabilities_82599, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_generic, - .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, - .clear_vmdq = &ixgbe_clear_vmdq_generic, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_generic, - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .setup_fc = ixgbe_setup_fc_generic, - .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = &ixgbe_setup_sfp_modules_82599, - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, - .release_swfw_sync = &ixgbe_release_swfw_sync, - .init_swfw_sync = NULL, - .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, - .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, - .prot_autoc_read = &prot_autoc_read_82599, - .prot_autoc_write = &prot_autoc_write_82599, - .enable_rx = &ixgbe_enable_rx_generic, - .disable_rx = &ixgbe_disable_rx_generic, -}; - -static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { - .init_params = &ixgbe_init_eeprom_params_generic, - .read = &ixgbe_read_eeprom_82599, - .read_buffer = &ixgbe_read_eeprom_buffer_82599, - .write = &ixgbe_write_eeprom_generic, - .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, - .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, - .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, - .update_checksum = &ixgbe_update_eeprom_checksum_generic, -}; - -static const struct ixgbe_phy_operations phy_ops_82599 = { - .identify = &ixgbe_identify_phy_82599, - .identify_sfp = &ixgbe_identify_module_generic, - .init = &ixgbe_init_phy_ops_82599, - .reset = &ixgbe_reset_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, - .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, -}; - -const struct ixgbe_info ixgbe_82599_info = { - .mac = ixgbe_mac_82599EB, - .get_invariants = &ixgbe_get_invariants_82599, - .mac_ops = &mac_ops_82599, - .eeprom_ops = &eeprom_ops_82599, - .phy_ops = &phy_ops_82599, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_8259X, -}; diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h new file mode 100644 index 000000000000..7d928b87bdca --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h @@ -0,0 +1,55 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_82599_H_ +#define _IXGBE_82599_H_ + +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val); +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked); +#endif /* _IXGBE_82599_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c new file mode 100644 index 000000000000..3251a7125723 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c @@ -0,0 +1,1624 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe_api.h" +#include "ixgbe_common.h" + +#define IXGBE_EMPTY_PARAM + +static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM) +}; + +static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X540) +}; + +static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550) +}; + +static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_x) +}; + +static const u32 ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_a) +}; + +/** + * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ + if (hw->mac.ops.get_rtrup2tc) + hw->mac.ops.get_rtrup2tc(hw, map); +} + +/** + * ixgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The ixgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_init_shared_code"); + + /* + * Set the mac type + */ + ixgbe_set_mac_type(hw); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + status = ixgbe_init_ops_82598(hw); + break; + case ixgbe_mac_82599EB: + status = ixgbe_init_ops_82599(hw); + break; + case ixgbe_mac_X540: + status = ixgbe_init_ops_X540(hw); + break; + case ixgbe_mac_X550: + status = ixgbe_init_ops_X550(hw); + break; + case ixgbe_mac_X550EM_x: + status = ixgbe_init_ops_X550EM_x(hw); + break; + case ixgbe_mac_X550EM_a: + status = ixgbe_init_ops_X550EM_a(hw); + break; + default: + status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; + + return status; +} + +/** + * ixgbe_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_mac_type\n"); + + if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) { + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Unsupported vendor id: %x", hw->vendor_id); + return IXGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + hw->mvals = ixgbe_mvals_base; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + hw->mac.type = ixgbe_mac_82598EB; + break; + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_XAUI_LOM: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + case IXGBE_DEV_ID_82599_CX4: + case IXGBE_DEV_ID_82599_LS: + case IXGBE_DEV_ID_82599_T3_LOM: + hw->mac.type = ixgbe_mac_82599EB; + break; + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + hw->mac.type = ixgbe_mac_X540; + hw->mvals = ixgbe_mvals_X540; + break; + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: + hw->mac.type = ixgbe_mac_X550; + hw->mvals = ixgbe_mvals_X550; + break; + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->mac.type = ixgbe_mac_X550EM_x; + hw->mvals = ixgbe_mvals_X550EM_x; + break; + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + case IXGBE_DEV_ID_X550EM_A_SFP: + hw->mac.type = ixgbe_mac_X550EM_a; + hw->mvals = ixgbe_mvals_X550EM_a; + break; + default: + ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Unsupported device id: %x", + hw->device_id); + break; + } + + DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, ret_val); + return ret_val; +} + +/** + * ixgbe_init_hw - Initialize the hardware + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting and then starting the hardware + **/ +s32 ixgbe_init_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_reset_hw - Performs a hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performs a PHY reset, and performs a MAC reset + **/ +s32 ixgbe_reset_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_start_hw - Prepares hardware for Rx/Tx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, + * clears all on chip counters, initializes receive address registers, + * multicast table, VLAN filter table, calls routine to setup link and + * flow control settings, and leaves transmit and receive units disabled + * and uninitialized. + **/ +s32 ixgbe_start_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_hw_cntrs - Clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), + ixgbe_media_type_unknown); +} + +/** + * ixgbe_get_mac_addr - Get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from the first Receive Address Register + * (RAR0) A reset of the adapter must have been performed prior to calling + * this function in order for the MAC address to have been loaded from the + * EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, + (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_san_mac_addr - Get SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + **/ +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_san_mac_addr - Write a SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Writes A SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word for device capabilities + * + * Reads the extra device capabilities from the EEPROM + **/ +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, + (hw, device_caps), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, + (hw, wwnn_prefix, wwpn_prefix), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, + (hw, bs), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_bus_info - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_of_tx_queues - Get Tx queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_tx_queues; +} + +/** + * ixgbe_get_num_of_rx_queues - Get Rx queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_rx_queues; +} + +/** + * ixgbe_stop_adapter - Disable Rx/Tx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * ixgbe_identify_phy - Get PHY type + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw), + IXGBE_NOT_IMPLEMENTED); + } + + return status; +} + +/** + * ixgbe_reset_phy - Perform a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) + status = IXGBE_ERR_PHY; + } + + if (status == IXGBE_SUCCESS) { + status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), + IXGBE_NOT_IMPLEMENTED); + } + return status; +} + +/** + * ixgbe_get_phy_firmware_version - + * @hw: pointer to hardware structure + * @firmware_version: pointer to firmware version + **/ +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, + (hw, firmware_version), + IXGBE_NOT_IMPLEMENTED); + return status; +} + +/** + * ixgbe_read_phy_reg - Read PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_phy_reg - Write PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link - Restart PHY autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_internal_phy - Configure integrated PHY + * @hw: pointer to hardware structure + * + * Reconfigure the integrated PHY in order to enable talk to the external PHY. + * Returns success if not implemented, since nothing needs to be done in this + * case. + */ +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw), + IXGBE_SUCCESS); +} + +/** + * ixgbe_check_phy_link - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads a PHY register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, + link_up), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link_speed - Set auto advertise + * @hw: pointer to hardware structure + * @speed: new link speed + * + * Sets the auto advertised capabilities + **/ +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_phy_power - Control the phy power state + * @hw: pointer to hardware structure + * @on: true for on, false for off + */ +s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on) +{ + return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_check_link - Get link and speed status + * @hw: pointer to hardware structure + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, + link_up, link_up_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_tx_laser - Disable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to disable the laser on SFI optics. + **/ +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); +} + +/** + * ixgbe_enable_tx_laser - Enable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to enable the laser on SFI optics. + **/ +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); +} + +/** + * ixgbe_flap_tx_laser - flap Tx laser to start autotry process + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support then + * flap the tx laser to alert the link partner to start autotry + * process on its end. + **/ +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); +} + +/** + * ixgbe_setup_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_mac_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_link_capabilities - Returns link capabilities + * @hw: pointer to hardware structure + * + * Determines the link capabilities of the current configuration. + **/ +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, + speed, autoneg), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_on - Turn on LEDs + * @hw: pointer to hardware structure + * @index: led number to turn on + * + * Turns on the software controllable LEDs. + **/ +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_off - Turn off LEDs + * @hw: pointer to hardware structure + * @index: led number to turn off + * + * Turns off the software controllable LEDs. + **/ +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_start - Blink LEDs + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Blink LED based on index. + **/ +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_stop - Stop blinking LEDs + * @hw: pointer to hardware structure + * + * Stop blinking LED based on index. + **/ +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_eeprom_params - Initialize EEPROM parameters + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), + IXGBE_NOT_IMPLEMENTED); +} + + +/** + * ixgbe_write_eeprom - Write word to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word(s) to be written to the EEPROM + * @words: number of words + * + * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom - Read word from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM + **/ +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit word(s) from EEPROM + * @words: number of words + * + * Reads 16 bit word(s) from EEPROM + **/ +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum + **/ +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, + (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, + (hw, addr, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, + enable_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_rar - Clear Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vmdq - Associate a VMDq index with a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to associate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); + +} + +/** + * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address + * @hw: pointer to hardware structure + * @vmdq: VMDq default pool index + **/ +s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac, + (hw, vmdq), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to disassociate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. + * @hw: pointer to hardware structure + **/ +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) +{ + return hw->mac.num_rar_entries; +} + +/** + * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new multicast addresses + * @addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + **/ +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, + addr_list, addr_count, func), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, + mc_addr_list, mc_addr_count, func, clear), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN + * @vlvf_bypass: boolean flag indicating updating the default pool is okay + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool vlvf_bypass) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, + vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN in VLVF + * @vfta_delta: pointer to the difference between the current value of VFTA + * and the desired value + * @vfta: the desired value of the VFTA + * @vlvf_bypass: boolean flag indicating updating the default pool is okay + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + u32 *vfta_delta, u32 vfta, bool vlvf_bypass) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind, + vlan_on, vfta_delta, vfta, vlvf_bypass), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Configures the flow control settings based on SW configuration. + **/ +s32 ixgbe_fc_enable(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_fw_drv_ver - Try to send the driver version number FW + * @hw: pointer to hardware structure + * @maj: driver major number to be sent to firmware + * @min: driver minor number to be sent to firmware + * @build: driver build number to be sent to firmware + * @ver: driver version number to be sent to firmware + * @len: length of driver_ver string + * @driver_ver: driver string + **/ +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver, u16 len, char *driver_ver) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min, + build, ver, len, driver_ver), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + **/ +s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. + **/ +s32 ixgbe_dmac_config(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enable dmac. + **/ +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC and set high priority bit for + * FCOE TC. The dmac enable bit must be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_ee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_source_address_pruning - Enable/Disable source address pruning + * @hw: pointer to hardware structure + * @enbale: enable or disable source address pruning + * @pool: Rx pool - Rx pool to toggle source address pruning + **/ +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, enable, pool); +} + +/** + * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + if (hw->mac.ops.set_ethertype_anti_spoofing) + hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf); +} + +/** + * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: type of device you want to communicate with + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: type of device you want to communicate with + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mdd - Disable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_disable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_mdd) + hw->mac.ops.disable_mdd(hw); +} + +/** + * ixgbe_enable_mdd - Enable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_mdd) + hw->mac.ops.enable_mdd(hw); +} + +/** + * ixgbe_mdd_event - Handle malicious driver detection event + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + **/ +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + if (hw->mac.ops.mdd_event) + hw->mac.ops.mdd_event(hw, vf_bitmap); +} + +/** + * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver + * detection event + * @hw: pointer to hardware structure + * @vf: vf index + * + **/ +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf) +{ + if (hw->mac.ops.restore_mdd_vf) + hw->mac.ops.restore_mdd_vf(hw, vf); +} + +/** + * ixgbe_enter_lplu - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). + **/ +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_handle_lasi - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_analog_reg8 - Reads 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs write operation to analog register specified. + **/ +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_analog_reg8 - Writes 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. + * @hw: pointer to hardware structure + * + * Initializes the Unicast Table Arrays to zero on device load. This + * is part of the Rx init addr execution path. + **/ +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_link - Perform read operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to read from + * @reg: device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->link.ops.read_link, (hw, addr, + reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_link_unlocked - Perform read operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to read from + * @reg: device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->link.ops.read_link_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_link - Perform write operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to write to + * @reg: device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->link.ops.write_link, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_link_unlocked - Perform write operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to write to + * @reg: device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->link.ops.write_link_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, + u8 byte_offset, u8 eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_supported_physical_layer - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, + (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); +} + +/** + * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics + * @hw: pointer to hardware structure + * @regval: bitfield to write to the Rx DMA register + * + * Enables the Rx DMA unit of the device. + **/ +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, + (hw, regval), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path. + **/ +s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path, + (hw), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path, + (hw), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) +{ + return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, + (hw, mask), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_release_swfw_semaphore - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) +{ + if (hw->mac.ops.release_swfw_sync) + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore + * @hw: pointer to hardware structure + * + * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register. + * Regardless of whether is succeeds or not it then release the semaphore. + * This is function is called to recover from catastrophic failures that + * may have left the semaphore locked. + **/ +void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); +} + +void ixgbe_disable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_rx) + hw->mac.ops.disable_rx(hw); +} + +void ixgbe_enable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_rx) + hw->mac.ops.enable_rx(hw); +} + +/** + * ixgbe_set_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the rate select. + */ +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +{ + if (hw->mac.ops.set_rate_select_speed) + hw->mac.ops.set_rate_select_speed(hw, speed); +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h new file mode 100644 index 000000000000..8016a49f2974 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h @@ -0,0 +1,213 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_API_H_ +#define _IXGBE_API_H_ + +#include "ixgbe_type.h" + +void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map); + +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); + +extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw); + +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); +s32 ixgbe_init_hw(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw(struct ixgbe_hw *hw); +s32 ixgbe_start_hw(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); + +s32 ixgbe_identify_phy(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); + +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw); +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on); +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg); +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); + +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); + +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear); +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq); +s32 ixgbe_enable_mc(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc(struct ixgbe_hw *hw); +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on, bool vlvf_bypass); +s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); +s32 ixgbe_fc_enable(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc(struct ixgbe_hw *hw); +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver, u16 len, char *driver_ver); +s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw); +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); +u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw); +s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw); +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode); +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask, bool cloud_mode); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode); +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *mask, + u16 soft_id, + u8 queue, + bool cloud_mode); +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common); +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue); +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw); +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); +s32 ixgbe_dmac_config(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw); +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee); +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int vf); +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, + int vf); +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data); +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data); +void ixgbe_disable_mdd(struct ixgbe_hw *hw); +void ixgbe_enable_mdd(struct ixgbe_hw *hw); +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf); +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw); +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw); +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); +void ixgbe_disable_rx(struct ixgbe_hw *hw); +void ixgbe_enable_rx(struct ixgbe_hw *hw); +s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); + +#endif /* _IXGBE_API_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c new file mode 100644 index 000000000000..5f516296ac8e --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c @@ -0,0 +1,168 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "ixgbe.h" +#include "ixgbe_cna.h" +#include "ixgbe_vmdq.h" + +static int ixgbe_cna_open(struct net_device *cnadev) +{ + struct ixgbe_adapter *adapter = netdev_priv(cnadev); + strcpy(cnadev->name, adapter->netdev->name); + DPRINTK(PROBE, INFO, "CNA pseudo device opened %s\n", cnadev->name); + return 0; +} + +static int ixgbe_cna_close(struct net_device *cnadev) +{ + struct ixgbe_adapter *adapter = netdev_priv(cnadev); + + DPRINTK(PROBE, INFO, "CNA pseudo device closed %s\n", cnadev->name); + return 0; +} + +static int ixgbe_cna_change_mtu(struct net_device *cnadev, int new_mtu) +{ + struct ixgbe_adapter *adapter = netdev_priv(cnadev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + + DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", + cnadev->mtu, new_mtu); + /* must set new MTU before calling down or up */ + cnadev->mtu = new_mtu; + + return 0; +} + +int ixgbe_cna_enable(struct ixgbe_adapter *adapter) +{ + struct net_device *cnadev; + struct net_device *netdev; + int err; + u64 wwpn; + u64 wwnn; + + netdev = adapter->netdev; + /* + * Oppositely to regular net device, CNA device doesn't have + * a private allocated region as we don't want to duplicate + * ixgbe_adapter information. Though, the CNA device still need + * to access the ixgbe_adapter while allocating queues or such. Thereby, + * cnadev->priv needs to point to netdev->priv. + */ + cnadev = alloc_etherdev_mq(0, MAX_TX_QUEUES); + if (!cnadev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + adapter->cnadev = cnadev; + SET_MODULE_OWNER(cnadev); + + cnadev->priv = adapter; + + cnadev->open = &ixgbe_cna_open; + cnadev->stop = &ixgbe_cna_close; + cnadev->change_mtu = &ixgbe_cna_change_mtu; + cnadev->do_ioctl = netdev->do_ioctl; + cnadev->hard_start_xmit = netdev->hard_start_xmit; +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + cnadev->vlan_rx_register = netdev->vlan_rx_register; + cnadev->vlan_rx_add_vid = netdev->vlan_rx_add_vid; + cnadev->vlan_rx_kill_vid = netdev->vlan_rx_kill_vid; +#endif + ixgbe_set_ethtool_ops(cnadev); + +#if IS_ENABLED(CONFIG_DCB) + cnadev->dcbnl_ops = netdev->dcbnl_ops; +#endif /* CONFIG_DCB */ + + cnadev->mtu = netdev->mtu; + cnadev->pdev = netdev->pdev; + cnadev->gso_max_size = GSO_MAX_SIZE; + cnadev->features = netdev->features | NETIF_F_CNA | NETIF_F_HW_VLAN_FILTER; + + /* set the MAC address to SAN mac address */ + if (ixgbe_validate_mac_addr(adapter->hw.mac.san_addr) == 0) + memcpy(cnadev->dev_addr, + adapter->hw.mac.san_addr, + cnadev->addr_len); + + cnadev->features |= NETIF_F_FCOE_CRC | + NETIF_F_FCOE_MTU | + NETIF_F_FSO; + + cnadev->ndo_fcoe_ddp_setup = &ixgbe_fcoe_ddp_get; + cnadev->ndo_fcoe_ddp_done = &ixgbe_fcoe_ddp_put; + cnadev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + + netif_carrier_off(cnadev); + netif_tx_stop_all_queues(cnadev); + + VMKNETDDI_REGISTER_QUEUEOPS(cnadev, ixgbe_netqueue_ops); + + err = register_netdev(cnadev); + if (err) + goto err_register; + + DPRINTK(PROBE, INFO, "CNA pseudo device registered %s\n", netdev->name); + + return err; + +err_register: + DPRINTK(PROBE, INFO, "CNA pseudo device cannot be registered %s\n", + netdev->name); + free_netdev(cnadev); +err_alloc_etherdev: + DPRINTK(PROBE, INFO, "CNA cannot be enabled on %s\n", netdev->name); + adapter->flags2 &= ~IXGBE_FLAG2_CNA_ENABLED; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; + return err; +} + +void ixgbe_cna_disable(struct ixgbe_adapter *adapter) +{ + if (!(adapter->flags2 & IXGBE_FLAG2_CNA_ENABLED)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_CNA_ENABLED; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; + + if (adapter->cnadev) { + unregister_netdev(adapter->cnadev); + DPRINTK(PROBE, INFO, "CNA pseudo device unregistered %s\n", + adapter->cnadev->name); + + free_netdev(adapter->cnadev); + adapter->cnadev = NULL; + } +} + +/* ixgbe_cna.c */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h new file mode 100644 index 000000000000..ee40480b1f44 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h @@ -0,0 +1,31 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_CNA_H_ +#define _IXGBE_CNA_H_ + +int ixgbe_cna_enable(struct ixgbe_adapter *adapter); +void ixgbe_cna_disable(struct ixgbe_adapter *adapter); + +#endif /* _IXGBE_CNA_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c index fd055cc93cc6..04369ba69f20 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,50 +22,132 @@ *******************************************************************************/ -#include -#include -#include -#include - -#include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" - -static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); -static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); -static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); -static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); -static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); -static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_api.h" + +STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count); -static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); -static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); -static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); -static void ixgbe_release_eeprom(struct ixgbe_hw *hw); - -static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); -static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw); + +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset); +STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, +STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset); -static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); -/* Base table for registers values that change by MAC */ -const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(8259X) -}; +/** + * ixgbe_init_ops_generic - Inits function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_mac_info *mac = &hw->mac; + u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_init_ops_generic"); + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; + /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ + if (eec & IXGBE_EEC_PRES) { + eeprom->ops.read = ixgbe_read_eerd_generic; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; + } else { + eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; + eeprom->ops.read_buffer = + ixgbe_read_eeprom_buffer_bit_bang_generic; + } + eeprom->ops.write = ixgbe_write_eeprom_generic; + eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; + eeprom->ops.validate_checksum = + ixgbe_validate_eeprom_checksum_generic; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; + + /* MAC */ + mac->ops.init_hw = ixgbe_init_hw_generic; + mac->ops.reset_hw = NULL; + mac->ops.start_hw = ixgbe_start_hw_generic; + mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; + mac->ops.get_media_type = NULL; + mac->ops.get_supported_physical_layer = NULL; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; + mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; + mac->ops.stop_adapter = ixgbe_stop_adapter_generic; + mac->ops.get_bus_info = ixgbe_get_bus_info_generic; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; + mac->ops.prot_autoc_read = prot_autoc_read_generic; + mac->ops.prot_autoc_write = prot_autoc_write_generic; + + /* LEDs */ + mac->ops.led_on = ixgbe_led_on_generic; + mac->ops.led_off = ixgbe_led_off_generic; + mac->ops.blink_led_start = ixgbe_blink_led_start_generic; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; + mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = ixgbe_set_rar_generic; + mac->ops.clear_rar = ixgbe_clear_rar_generic; + mac->ops.insert_mac_addr = NULL; + mac->ops.set_vmdq = NULL; + mac->ops.clear_vmdq = NULL; + mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; + mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; + mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; + mac->ops.enable_mc = ixgbe_enable_mc_generic; + mac->ops.disable_mc = ixgbe_disable_mc_generic; + mac->ops.clear_vfta = NULL; + mac->ops.set_vfta = NULL; + mac->ops.set_vlvf = NULL; + mac->ops.init_uta_tables = NULL; + mac->ops.enable_rx = ixgbe_enable_rx_generic; + mac->ops.disable_rx = ixgbe_disable_rx_generic; + + /* Flow Control */ + mac->ops.fc_enable = ixgbe_fc_enable_generic; + mac->ops.setup_fc = ixgbe_setup_fc_generic; + mac->ops.fc_autoneg = ixgbe_fc_autoneg; + + /* Link */ + mac->ops.get_link_capabilities = NULL; + mac->ops.setup_link = NULL; + mac->ops.check_link = NULL; + mac->ops.dmac_config = NULL; + mac->ops.dmac_update_tcs = NULL; + mac->ops.dmac_config_tcs = NULL; + + return IXGBE_SUCCESS; +} /** - * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow - * control - * @hw: pointer to hardware structure + * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation + * of flow control + * @hw: pointer to hardware structure + * + * This function returns true if the device supports flow control + * autonegotiation, and false if it does not. * - * There are several phys that do not support autoneg flow control. This - * function check the device id to see if the associated phy supports - * autoneg flow control. **/ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) { @@ -77,12 +155,17 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) ixgbe_link_speed speed; bool link_up; + DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: /* flow control autoneg black list */ switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: supported = false; break; default: @@ -117,16 +200,16 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) supported = true; break; default: - break; + supported = false; } default: break; } if (!supported) - hw_dbg(hw, "Device %x does not support flow control autoneg\n", - hw->device_id); - + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Device %x does not support flow control autoneg", + hw->device_id); return supported; } @@ -138,18 +221,19 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) **/ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { - s32 ret_val = 0; + s32 ret_val = IXGBE_SUCCESS; u32 reg = 0, reg_bp = 0; u16 reg_cu = 0; bool locked = false; - /* - * Validate the requested mode. Strict IEEE mode does not allow - * ixgbe_fc_rx_pause because it will cause us to fail at UNH. - */ + DEBUGFUNC("ixgbe_setup_fc_generic"); + + /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; } /* @@ -168,17 +252,18 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) case ixgbe_media_type_backplane: /* some MAC's need RMW protection on AUTOC */ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; /* fall through - only backplane uses autoc */ + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); break; case ixgbe_media_type_copper: - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, ®_cu); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); break; default: break; @@ -239,11 +324,14 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; break; default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; } - if (hw->mac.type != ixgbe_mac_X540) { + if (hw->mac.type < ixgbe_mac_X540) { /* * Enable auto-negotiation between the MAC & PHY; * the MAC will advertise clause 37 flow control. @@ -256,7 +344,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); - hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); } /* @@ -265,21 +353,18 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) * */ if (hw->phy.media_type == ixgbe_media_type_backplane) { - /* Need the SW/FW semaphore around AUTOC writes if 82599 and - * LESM is on, likewise reset_pipeline requries the lock as - * it also writes AUTOC. - */ + reg_bp |= IXGBE_AUTOC_AN_RESTART; ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); if (ret_val) - return ret_val; - + goto out; } else if ((hw->phy.media_type == ixgbe_media_type_copper) && - ixgbe_device_supports_autoneg_fc(hw)) { - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, reg_cu); + (ixgbe_device_supports_autoneg_fc(hw))) { + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); } - hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); +out: return ret_val; } @@ -297,16 +382,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) s32 ret_val; u32 ctrl_ext; u16 device_caps; -#if 1 //by hilbert s32 rc; u16 regVal=0; -#endif + + DEBUGFUNC("ixgbe_start_hw_generic"); /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); - /* Identify the PHY */ - hw->phy.ops.identify(hw); + /* PHY ops initialization must be done in reset_hw() */ /* Clear the VLAN filter table */ hw->mac.ops.clear_vfta(hw); @@ -320,18 +404,18 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); - /* Setup flow control if method for doing so */ - if (hw->mac.ops.setup_fc) { - ret_val = hw->mac.ops.setup_fc(hw); - if (ret_val) - return ret_val; + /* Setup flow control */ + ret_val = ixgbe_setup_fc(hw); + if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { + DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val); + return ret_val; } - /* Cashe bit indicating need for crosstalk fix */ + /* Cache bit indicating need for crosstalk fix */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: hw->mac.ops.get_device_caps(hw, &device_caps); if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) hw->need_crosstalk_fix = false; @@ -347,14 +431,14 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) hw->adapter_stopped = false; #if 1 /* To modify speed LED polarity and configure led on only for speed 1G in M88E1512 - * for Porsche2 platform. By hilbert + * for Porsche2 platform. * From 88E1512 datasheet: * Page register: 0x16 * LED functon control register: 0x10 in page 3 * LED polarity control register: 0x11 in page 3 */ - if (hw->mac.type == ixgbe_mac_x550em_a && + if (hw->mac.type == ixgbe_mac_X550EM_a && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { /* For M88E1512, to select page 3 in register 0x16 */ regVal = 0x03; @@ -407,7 +491,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) } } #endif - return 0; + return IXGBE_SUCCESS; } /** @@ -423,6 +507,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; + u32 regval; /* Clear the rate limiters */ for (i = 0; i < hw->mac.max_tx_queues; i++) { @@ -431,7 +516,21 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); - return 0; + /* Disable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + return IXGBE_SUCCESS; } /** @@ -448,10 +547,12 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) { s32 status; + DEBUGFUNC("ixgbe_init_hw_generic"); + /* Reset the hardware */ status = hw->mac.ops.reset_hw(hw); - if (status == 0) { + if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) { /* Start the HW */ status = hw->mac.ops.start_hw(hw); } @@ -460,6 +561,9 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) if (hw->mac.ops.init_led_link_act) hw->mac.ops.init_led_link_act(hw); + if (status != IXGBE_SUCCESS) + DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); + return status; } @@ -474,6 +578,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) { u16 i = 0; + DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); + IXGBE_READ_REG(hw, IXGBE_CRCERRS); IXGBE_READ_REG(hw, IXGBE_ILLERRC); IXGBE_READ_REG(hw, IXGBE_ERRBC); @@ -561,14 +667,18 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) - hw->phy.ops.identify(hw); - hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); - hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); + ixgbe_identify_phy(hw); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, + IXGBE_MDIO_PCS_DEV_TYPE, &i); } - return 0; + return IXGBE_SUCCESS; } /** @@ -588,20 +698,22 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u16 offset; u16 length; + DEBUGFUNC("ixgbe_read_pba_string_generic"); + if (pba_num == NULL) { - hw_dbg(hw, "PBA string buffer was null\n"); + DEBUGOUT("PBA string buffer was null\n"); return IXGBE_ERR_INVALID_ARGUMENT; } ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + DEBUGOUT("NVM Read Error\n"); return ret_val; } ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + DEBUGOUT("NVM Read Error\n"); return ret_val; } @@ -611,11 +723,11 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, * and we can decode it into an ascii string */ if (data != IXGBE_PBANUM_PTR_GUARD) { - hw_dbg(hw, "NVM PBA number is not stored as string\n"); + DEBUGOUT("NVM PBA number is not stored as string\n"); /* we will need 11 characters to store the PBA */ if (pba_num_size < 11) { - hw_dbg(hw, "PBA string buffer too small\n"); + DEBUGOUT("PBA string buffer too small\n"); return IXGBE_ERR_NO_SPACE; } @@ -642,23 +754,23 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, pba_num[offset] += 'A' - 0xA; } - return 0; + return IXGBE_SUCCESS; } ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + DEBUGOUT("NVM Read Error\n"); return ret_val; } if (length == 0xFFFF || length == 0) { - hw_dbg(hw, "NVM PBA number section invalid length\n"); + DEBUGOUT("NVM PBA number section invalid length\n"); return IXGBE_ERR_PBA_SECTION; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { - hw_dbg(hw, "PBA string buffer too small\n"); + DEBUGOUT("PBA string buffer too small\n"); return IXGBE_ERR_NO_SPACE; } @@ -669,7 +781,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, for (offset = 0; offset < length; offset++) { ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + DEBUGOUT("NVM Read Error\n"); return ret_val; } pba_num[offset * 2] = (u8)(data >> 8); @@ -677,7 +789,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, } pba_num[offset * 2] = '\0'; - return 0; + return IXGBE_SUCCESS; } /** @@ -695,6 +807,8 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) u32 rar_low; u16 i; + DEBUGFUNC("ixgbe_get_mac_addr_generic"); + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); @@ -704,81 +818,102 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) for (i = 0; i < 2; i++) mac_addr[i+4] = (u8)(rar_high >> (i*8)); - return 0; + return IXGBE_SUCCESS; } -enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) +/** + * ixgbe_set_pci_config_data_generic - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * + * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) { + struct ixgbe_mac_info *mac = &hw->mac; + + if (hw->bus.type == ixgbe_bus_type_unknown) + hw->bus.type = ixgbe_bus_type_pci_express; + switch (link_status & IXGBE_PCI_LINK_WIDTH) { case IXGBE_PCI_LINK_WIDTH_1: - return ixgbe_bus_width_pcie_x1; + hw->bus.width = ixgbe_bus_width_pcie_x1; + break; case IXGBE_PCI_LINK_WIDTH_2: - return ixgbe_bus_width_pcie_x2; + hw->bus.width = ixgbe_bus_width_pcie_x2; + break; case IXGBE_PCI_LINK_WIDTH_4: - return ixgbe_bus_width_pcie_x4; + hw->bus.width = ixgbe_bus_width_pcie_x4; + break; case IXGBE_PCI_LINK_WIDTH_8: - return ixgbe_bus_width_pcie_x8; + hw->bus.width = ixgbe_bus_width_pcie_x8; + break; default: - return ixgbe_bus_width_unknown; + hw->bus.width = ixgbe_bus_width_unknown; + break; } -} -enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) -{ switch (link_status & IXGBE_PCI_LINK_SPEED) { case IXGBE_PCI_LINK_SPEED_2500: - return ixgbe_bus_speed_2500; + hw->bus.speed = ixgbe_bus_speed_2500; + break; case IXGBE_PCI_LINK_SPEED_5000: - return ixgbe_bus_speed_5000; + hw->bus.speed = ixgbe_bus_speed_5000; + break; case IXGBE_PCI_LINK_SPEED_8000: - return ixgbe_bus_speed_8000; + hw->bus.speed = ixgbe_bus_speed_8000; + break; default: - return ixgbe_bus_speed_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; + break; } + + mac->ops.set_lan_id(hw); } /** * ixgbe_get_bus_info_generic - Generic set PCI bus info * @hw: pointer to hardware structure * - * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the ixgbe_hw structure. **/ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) { u16 link_status; - hw->bus.type = ixgbe_bus_type_pci_express; + DEBUGFUNC("ixgbe_get_bus_info_generic"); /* Get the negotiated link width and speed from PCI config space */ - link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); - - hw->bus.width = ixgbe_convert_bus_width(link_status); - hw->bus.speed = ixgbe_convert_bus_speed(link_status); + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); - hw->mac.ops.set_lan_id(hw); + ixgbe_set_pci_config_data_generic(hw, link_status); - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices * @hw: pointer to the HW structure * - * Determines the LAN function id by reading memory-mapped registers - * and swaps the port value if requested. + * Determines the LAN function id by reading memory-mapped registers and swaps + * the port value if requested, and set MAC instance for devices that share + * CS4227. **/ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; - u16 ee_ctrl_4; u32 reg; + u16 ee_ctrl_4; + + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); reg = IXGBE_READ_REG(hw, IXGBE_STATUS); bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; - bus->lan_id = bus->func; + bus->lan_id = (u8)bus->func; /* check for a port swap */ - reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; @@ -804,6 +939,8 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) u32 reg_val; u16 i; + DEBUGFUNC("ixgbe_stop_adapter_generic"); + /* * Set the adapter_stopped flag so other driver functions stop touching * the hardware @@ -811,7 +948,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) hw->adapter_stopped = true; /* Disable the receive unit */ - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); @@ -833,10 +970,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) /* flush all queues disables */ IXGBE_WRITE_FLUSH(hw); - usleep_range(1000, 2000); + msec_delay(2); /* - * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * Prevent the PCI-E bus from hanging by disabling PCI-E master * access and verify no pending requests */ return ixgbe_disable_pcie_master(hw); @@ -853,7 +990,7 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; u32 led_reg, led_mode; - u16 i; + u8 i; led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); @@ -862,27 +999,25 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == - IXGBE_LED_LINK_ACTIVE) { + IXGBE_LED_LINK_ACTIVE) { mac->led_link_act = i; - return 0; + return IXGBE_SUCCESS; } } - /* If LEDCTL register does not have the LED link active set, then use + /* + * If LEDCTL register does not have the LED link active set, then use * known MAC defaults. */ switch (hw->mac.type) { - case ixgbe_mac_x550em_a: - mac->led_link_act = 0; - break; + case ixgbe_mac_X550EM_a: case ixgbe_mac_X550EM_x: mac->led_link_act = 1; break; default: mac->led_link_act = 2; } - - return 0; + return IXGBE_SUCCESS; } /** @@ -892,14 +1027,12 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) { - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - s32 rc; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 rc; u16 regVal; - /* following led behavior was modified by hilbert, - * to force led on through C22 MDI command. - */ - if (hw->mac.type == ixgbe_mac_x550em_a) { + DEBUGFUNC("ixgbe_led_on_generic"); + if (hw->mac.type == ixgbe_mac_X550EM_a) { /* For M88E1512, to select page 3 in register 22 */ regVal = 0x03; rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); @@ -927,18 +1060,20 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) if (rc) { hw_err(hw, "page register write failed, rc:%x\n", rc); } - } else { - if (index > 3) - return IXGBE_ERR_PARAM; - - /* To turn on the LED, set mode to ON. */ - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); } + else + { + if (index > 3) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + } - return 0; + return IXGBE_SUCCESS; } /** @@ -949,13 +1084,12 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - s32 rc; + s32 rc; u16 regVal; - /* following led behavior was modified by hilbert, - * to force led on through C22 MDI command. - */ - if (hw->mac.type == ixgbe_mac_x550em_a) { + DEBUGFUNC("ixgbe_led_off_generic"); + + if (hw->mac.type == ixgbe_mac_X550EM_a) { /* For M88E1512, to select page 3 in register 22 */ regVal = 0x03; rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); @@ -983,18 +1117,19 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) if (rc) { hw_err(hw, "page register write failed, rc:%x\n", rc); } - } else { - if (index > 3) - return IXGBE_ERR_PARAM; - - /* To turn off the LED, set mode to OFF. */ - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - } - - return 0; + } + else + { + if (index > 3) + return IXGBE_ERR_PARAM; + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + } + return IXGBE_SUCCESS; } /** @@ -1010,6 +1145,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) u32 eec; u16 eeprom_size; + DEBUGFUNC("ixgbe_init_eeprom_params_generic"); + if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->type = ixgbe_eeprom_none; /* Set default semaphore delay to 10ms which is a well @@ -1022,7 +1159,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) * Check for EEPROM present first. * If not present leave as none */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); if (eec & IXGBE_EEC_PRES) { eeprom->type = ixgbe_eeprom_spi; @@ -1032,26 +1169,27 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) */ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = BIT(eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); } if (eec & IXGBE_EEC_ADDR_SIZE) eeprom->address_bits = 16; else eeprom->address_bits = 8; - hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", - eeprom->type, eeprom->word_size, eeprom->address_bits); + DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " + "%d\n", eeprom->type, eeprom->word_size, + eeprom->address_bits); } - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to write - * @words: number of words + * @words: number of word(s) * @data: 16 bit word(s) to write to EEPROM * * Reads 16 bit word(s) from EEPROM through bit-bang method @@ -1059,16 +1197,22 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; + s32 status = IXGBE_SUCCESS; u16 i, count; + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); + hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } - if (offset + words > hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } /* * The EEPROM page size cannot be queried from the chip. We do lazy @@ -1085,14 +1229,15 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, */ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? - IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, count, &data[i]); - if (status != 0) + if (status != IXGBE_SUCCESS) break; } +out: return status; } @@ -1106,7 +1251,7 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, * If ixgbe_eeprom_update_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ -static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; @@ -1115,63 +1260,68 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 i; u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); + /* Prepare the EEPROM for writing */ status = ixgbe_acquire_eeprom(hw); - if (status) - return status; - if (ixgbe_ready_eeprom(hw) != 0) { - ixgbe_release_eeprom(hw); - return IXGBE_ERR_EEPROM; + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } } - for (i = 0; i < words; i++) { - ixgbe_standby_eeprom(hw); - - /* Send the WRITE ENABLE command (8 bit opcode) */ - ixgbe_shift_out_eeprom_bits(hw, - IXGBE_EEPROM_WREN_OPCODE_SPI, - IXGBE_EEPROM_OPCODE_BITS); - - ixgbe_standby_eeprom(hw); - - /* Some SPI eeproms use the 8th address bit embedded - * in the opcode - */ - if ((hw->eeprom.address_bits == 8) && - ((offset + i) >= 128)) - write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ - ixgbe_shift_out_eeprom_bits(hw, write_opcode, - IXGBE_EEPROM_OPCODE_BITS); - ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), - hw->eeprom.address_bits); - - page_size = hw->eeprom.word_page_size; - - /* Send the data in burst via SPI */ - do { - word = data[i]; - word = (word >> 8) | (word << 8); - ixgbe_shift_out_eeprom_bits(hw, word, 16); + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); - if (page_size == 0) - break; + /* Send the WRITE ENABLE command (8 bit opcode ) */ + ixgbe_shift_out_eeprom_bits(hw, + IXGBE_EEPROM_WREN_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); - /* do not wrap around page */ - if (((offset + i) & (page_size - 1)) == - (page_size - 1)) - break; - } while (++i < words); + ixgbe_standby_eeprom(hw); - ixgbe_standby_eeprom(hw); - usleep_range(10000, 20000); + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, write_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + page_size = hw->eeprom.word_page_size; + + /* Send the data in burst via SPI*/ + do { + word = data[i]; + word = (word >> 8) | (word << 8); + ixgbe_shift_out_eeprom_bits(hw, word, 16); + + if (page_size == 0) + break; + + /* do not wrap around page */ + if (((offset + i) & (page_size - 1)) == + (page_size - 1)) + break; + } while (++i < words); + + ixgbe_standby_eeprom(hw); + msec_delay(10); + } + /* Done with writing - release the EEPROM */ + ixgbe_release_eeprom(hw); } - /* Done with writing - release the EEPROM */ - ixgbe_release_eeprom(hw); - return 0; + return status; } /** @@ -1185,36 +1335,51 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, **/ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { + s32 status; + + DEBUGFUNC("ixgbe_write_eeprom_generic"); + hw->eeprom.ops.init_params(hw); - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); - return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); +out: + return status; } /** * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read - * @words: number of word(s) * @data: read 16 bit words(s) from EEPROM + * @words: number of word(s) * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; + s32 status = IXGBE_SUCCESS; u16 i, count; + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); + hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } - if (offset + words > hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } /* * We cannot hold synchronization semaphores for too long @@ -1223,16 +1388,17 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, */ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? - IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, count, &data[i]); - if (status) - return status; + if (status != IXGBE_SUCCESS) + break; } - return 0; +out: + return status; } /** @@ -1244,7 +1410,7 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ -static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; @@ -1252,40 +1418,45 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; u16 i; + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); + /* Prepare the EEPROM for reading */ status = ixgbe_acquire_eeprom(hw); - if (status) - return status; - if (ixgbe_ready_eeprom(hw) != 0) { - ixgbe_release_eeprom(hw); - return IXGBE_ERR_EEPROM; + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } } - for (i = 0; i < words; i++) { - ixgbe_standby_eeprom(hw); - /* Some SPI eeproms use the 8th address bit embedded - * in the opcode - */ - if ((hw->eeprom.address_bits == 8) && - ((offset + i) >= 128)) - read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; - - /* Send the READ command (opcode + addr) */ - ixgbe_shift_out_eeprom_bits(hw, read_opcode, - IXGBE_EEPROM_OPCODE_BITS); - ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), - hw->eeprom.address_bits); + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } - /* Read the data. */ - word_in = ixgbe_shift_in_eeprom_bits(hw, 16); - data[i] = (word_in >> 8) | (word_in << 8); + /* End this read operation */ + ixgbe_release_eeprom(hw); } - /* End this read operation */ - ixgbe_release_eeprom(hw); - - return 0; + return status; } /** @@ -1299,12 +1470,21 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { + s32 status; + + DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); + hw->eeprom.ops.init_params(hw); - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); - return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); +out: + return status; } /** @@ -1320,16 +1500,24 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { u32 eerd; - s32 status; + s32 status = IXGBE_SUCCESS; u32 i; + DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); + hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } for (i = 0; i < words; i++) { eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | @@ -1338,16 +1526,16 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); - if (status == 0) { + if (status == IXGBE_SUCCESS) { data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> IXGBE_EEPROM_RW_REG_DATA); } else { - hw_dbg(hw, "Eeprom read timed out\n"); - return status; + DEBUGOUT("Eeprom read timed out\n"); + goto out; } } - - return 0; +out: + return status; } /** @@ -1359,13 +1547,15 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, * This function is called only when we are writing a new large buffer * at given offset so the data would be overwritten anyway. **/ -static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, +STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset) { u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; - s32 status; + s32 status = IXGBE_SUCCESS; u16 i; + DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) data[i] = i; @@ -1373,12 +1563,12 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, IXGBE_EEPROM_PAGE_SIZE_MAX, data); hw->eeprom.word_page_size = 0; - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto out; status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto out; /* * When writing in burst more than the actual page size @@ -1386,9 +1576,10 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, */ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; - hw_dbg(hw, "Detected EEPROM page size = %d words.\n", - hw->eeprom.word_page_size); - return 0; + DEBUGOUT1("Detected EEPROM page size = %d words.", + hw->eeprom.word_page_size); +out: + return status; } /** @@ -1408,7 +1599,7 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write - * @words: number of words + * @words: number of word(s) * @data: word(s) write to the EEPROM * * Write a 16 bit word(s) to the EEPROM using the EEWR register. @@ -1417,38 +1608,47 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { u32 eewr; - s32 status; + s32 status = IXGBE_SUCCESS; u16 i; + DEBUGFUNC("ixgbe_write_eewr_generic"); + hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } for (i = 0; i < words; i++) { eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | - (data[i] << IXGBE_EEPROM_RW_REG_DATA) | - IXGBE_EEPROM_RW_REG_START; + (data[i] << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); - if (status) { - hw_dbg(hw, "Eeprom write EEWR timed out\n"); - return status; + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; } IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); - if (status) { - hw_dbg(hw, "Eeprom write EEWR timed out\n"); - return status; + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; } } - return 0; +out: + return status; } /** @@ -1472,10 +1672,13 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) * Polls the status bit (bit 1) of the EERD or EEWR to determine when the * read or write is done respectively. **/ -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) { u32 i; u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { if (ee_reg == IXGBE_NVM_POLL_READ) @@ -1484,11 +1687,17 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) reg = IXGBE_READ_REG(hw, IXGBE_EEWR); if (reg & IXGBE_EEPROM_RW_REG_DONE) { - return 0; + status = IXGBE_SUCCESS; + break; } - udelay(5); + usec_delay(5); } - return IXGBE_ERR_EEPROM; + + if (i == IXGBE_EERD_EEWR_ATTEMPTS) + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "EEPROM read/write done polling timed out"); + + return status; } /** @@ -1498,44 +1707,52 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) * Prepares EEPROM for access using bit-bang method. This function should * be called before issuing a command to the EEPROM. **/ -static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u32 eec; u32 i; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) - return IXGBE_ERR_SWFW_SYNC; + DEBUGFUNC("ixgbe_acquire_eeprom"); - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) + != IXGBE_SUCCESS) + status = IXGBE_ERR_SWFW_SYNC; - /* Request EEPROM Access */ - eec |= IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + if (status == IXGBE_SUCCESS) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - if (eec & IXGBE_EEC_GNT) - break; - udelay(5); - } + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - /* Release if grant not acquired */ - if (!(eec & IXGBE_EEC_GNT)) { - eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); - hw_dbg(hw, "Could not acquire EEPROM grant\n"); + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (eec & IXGBE_EEC_GNT) + break; + usec_delay(5); + } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - return IXGBE_ERR_EEPROM; - } + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + DEBUGOUT("Could not acquire EEPROM grant\n"); - /* Setup EEPROM for Read/Write */ - /* Clear CS and SK */ - eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); - IXGBE_WRITE_FLUSH(hw); - udelay(1); - return 0; + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ + if (status == IXGBE_SUCCESS) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); + } + } + return status; } /** @@ -1544,73 +1761,88 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) * * Sets the hardware semaphores so EEPROM access can occur for bit-bang method **/ -static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) { + s32 status = IXGBE_ERR_EEPROM; u32 timeout = 2000; u32 i; u32 swsm; + DEBUGFUNC("ixgbe_get_eeprom_semaphore"); + /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { /* * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); - if (!(swsm & IXGBE_SWSM_SMBI)) + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; break; - usleep_range(50, 100); + } + usec_delay(50); } if (i == timeout) { - hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); - /* this release is particularly important because our attempts + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts * above to get the semaphore may have succeeded, and if there * was a timeout, we should unconditionally clear the semaphore * bits to free the driver to make progress */ ixgbe_release_eeprom_semaphore(hw); - usleep_range(50, 100); - /* one last try + usec_delay(50); + /* + * one last try * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); - if (swsm & IXGBE_SWSM_SMBI) { - hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); - return IXGBE_ERR_EEPROM; - } + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) + status = IXGBE_SUCCESS; } /* Now get the semaphore between SW/FW through the SWESMBI bit */ - for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); - /* Set the SW EEPROM semaphore bit to request access */ - swsm |= IXGBE_SWSM_SWESMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); - /* If we set the bit successfully then we got the - * semaphore. - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); - if (swsm & IXGBE_SWSM_SWESMBI) - break; + /* + * If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (swsm & IXGBE_SWSM_SWESMBI) + break; - usleep_range(50, 100); + usec_delay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "SWESMBI Software EEPROM semaphore not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); } - /* Release semaphores and return error if SW EEPROM semaphore - * was not granted because we don't have access to the EEPROM - */ - if (i >= timeout) { - hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); - ixgbe_release_eeprom_semaphore(hw); - return IXGBE_ERR_EEPROM; - } - - return 0; + return status; } /** @@ -1619,15 +1851,17 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) * * This function clears hardware semaphore bits. **/ -static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) { u32 swsm; - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + DEBUGFUNC("ixgbe_release_eeprom_semaphore"); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); - IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); IXGBE_WRITE_FLUSH(hw); } @@ -1635,11 +1869,14 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) * ixgbe_ready_eeprom - Polls for EEPROM ready * @hw: pointer to hardware structure **/ -static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u16 i; u8 spi_stat_reg; + DEBUGFUNC("ixgbe_ready_eeprom"); + /* * Read "Status Register" repeatedly until the LSB is cleared. The * EEPROM will signal that the command has been completed by clearing @@ -1653,41 +1890,43 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) break; - udelay(5); + usec_delay(5); ixgbe_standby_eeprom(hw); - } + }; /* * On some parts, SPI write time could vary from 0-20mSec on 3.3V * devices (and only 0-5mSec on 5V devices) */ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { - hw_dbg(hw, "SPI EEPROM Status error\n"); - return IXGBE_ERR_EEPROM; + DEBUGOUT("SPI EEPROM Status error\n"); + status = IXGBE_ERR_EEPROM; } - return 0; + return status; } /** * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state * @hw: pointer to hardware structure **/ -static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw) { u32 eec; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + DEBUGFUNC("ixgbe_standby_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); /* Toggle CS to flush commands */ eec |= IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); eec &= ~IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); } /** @@ -1696,20 +1935,22 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) * @data: data to send to the EEPROM * @count: number of bits to shift out **/ -static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, +STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count) { u32 eec; u32 mask; u32 i; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); /* * Mask is used to shift "count" bits of "data" out to the EEPROM * one bit at a time. Determine the starting bit based on count */ - mask = BIT(count - 1); + mask = 0x01 << (count - 1); for (i = 0; i < count; i++) { /* @@ -1724,10 +1965,10 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, else eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); ixgbe_raise_eeprom_clk(hw, &eec); ixgbe_lower_eeprom_clk(hw, &eec); @@ -1737,11 +1978,11 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, * EEPROM */ mask = mask >> 1; - } + }; /* We leave the "DI" bit set to "0" when we leave this routine. */ eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); } @@ -1749,12 +1990,14 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM * @hw: pointer to hardware structure **/ -static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) { u32 eec; u32 i; u16 data = 0; + DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); + /* * In order to read a register from the EEPROM, we need to shift * 'count' bits in from the EEPROM. Bits are "shifted in" by raising @@ -1762,7 +2005,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) * the value of the "DO" bit. During this "shifting in" process the * "DI" bit should always be clear. */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); @@ -1770,7 +2013,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) data = data << 1; ixgbe_raise_eeprom_clk(hw, &eec); - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec &= ~(IXGBE_EEC_DI); if (eec & IXGBE_EEC_DO) @@ -1787,16 +2030,18 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) * @hw: pointer to hardware structure * @eec: EEC register's current value **/ -static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { + DEBUGFUNC("ixgbe_raise_eeprom_clk"); + /* * Raise the clock input to the EEPROM * (setting the SK bit), then delay */ *eec = *eec | IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); } /** @@ -1804,53 +2049,55 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) * @hw: pointer to hardware structure * @eecd: EECD's current value **/ -static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { + DEBUGFUNC("ixgbe_lower_eeprom_clk"); + /* * Lower the clock input to the EEPROM (clearing the SK bit), then * delay */ *eec = *eec & ~IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); } /** * ixgbe_release_eeprom - Release EEPROM, release semaphores * @hw: pointer to hardware structure **/ -static void ixgbe_release_eeprom(struct ixgbe_hw *hw) +STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw) { u32 eec; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + DEBUGFUNC("ixgbe_release_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec |= IXGBE_EEC_CS; /* Pull CS high */ eec &= ~IXGBE_EEC_SK; /* Lower SCK */ - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); - udelay(1); + usec_delay(1); /* Stop requesting EEPROM access */ eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - /* - * Delay before attempt to obtain semaphore again to allow FW - * access. semaphore_delay is in ms we need us for usleep_range - */ - usleep_range(hw->eeprom.semaphore_delay * 1000, - hw->eeprom.semaphore_delay * 2000); + /* Delay before attempt to obtain semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); } /** * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum **/ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { @@ -1861,11 +2108,13 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) u16 pointer = 0; u16 word = 0; + DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); + /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { if (hw->eeprom.ops.read(hw, i, &word)) { - hw_dbg(hw, "EEPROM read failed\n"); - break; + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -1873,7 +2122,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { if (hw->eeprom.ops.read(hw, i, &pointer)) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } @@ -1882,7 +2131,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) continue; if (hw->eeprom.ops.read(hw, pointer, &length)) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } @@ -1891,7 +2140,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) for (j = pointer + 1; j <= pointer + length; j++) { if (hw->eeprom.ops.read(hw, j, &word)) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; @@ -1918,14 +2167,15 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 checksum; u16 read_checksum = 0; - /* - * Read the first word from the EEPROM. If this times out or fails, do + DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); + + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return status; } @@ -1937,7 +2187,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); if (status) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return status; } @@ -1963,14 +2213,15 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) s32 status; u16 checksum; - /* - * Read the first word from the EEPROM. If this times out or fails, do + DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); + + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return status; } @@ -1985,6 +2236,32 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address. + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_validate_mac_addr"); + + /* Make sure it is not a multicast address */ + if (IXGBE_IS_MULTICAST(mac_addr)) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (IXGBE_IS_BROADCAST(mac_addr)) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + /** * ixgbe_set_rar_generic - Set Rx address register * @hw: pointer to hardware structure @@ -2001,9 +2278,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 rar_low, rar_high; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_set_rar_generic"); + /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", index); + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); return IXGBE_ERR_INVALID_ARGUMENT; } @@ -2033,7 +2313,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - return 0; + return IXGBE_SUCCESS; } /** @@ -2048,9 +2328,12 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_clear_rar_generic"); + /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", index); + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); return IXGBE_ERR_INVALID_ARGUMENT; } @@ -2068,7 +2351,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); - return 0; + return IXGBE_SUCCESS; } /** @@ -2084,25 +2367,36 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) u32 i; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_init_rx_addrs_generic"); + /* * If the current mac address is valid, assume it is a software override * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ - if (!is_valid_ether_addr(hw->mac.addr)) { + if (ixgbe_validate_mac_addr(hw->mac.addr) == + IXGBE_ERR_INVALID_MAC_ADDR) { /* Get the MAC address from the RAR0 for later reference */ hw->mac.ops.get_mac_addr(hw, hw->mac.addr); - hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); } else { /* Setup the receive address. */ - hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); - hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } - /* clear VMDq pool/queue selection for RAR 0 */ + /* clear VMDq pool/queue selection for RAR 0 */ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); hw->addr_ctrl.overflow_promisc = 0; @@ -2110,7 +2404,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw->addr_ctrl.rar_used_count = 1; /* Zero out the other receive addresses. */ - hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); for (i = 1; i < rar_entries; i++) { IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); @@ -2120,14 +2414,116 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw->addr_ctrl.mta_in_use = 0; IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); - hw_dbg(hw, " Clearing MTA\n"); + DEBUGOUT(" Clearing MTA\n"); for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); - if (hw->mac.ops.init_uta_tables) - hw->mac.ops.init_uta_tables(hw); + ixgbe_init_uta_tables(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGFUNC("ixgbe_add_uc_addr"); + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("ixgbe_add_uc_addr Complete\n"); +} + +/** + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 fctrl; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); + for (i = 0; i < uc_addr_in_use; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ixgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= ~IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } - return 0; + DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; } /** @@ -2142,10 +2538,12 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. **/ -static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector = 0; + DEBUGFUNC("ixgbe_mta_vector"); + switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); @@ -2160,7 +2558,8 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; default: /* Invalid mc_filter_type */ - hw_dbg(hw, "MC filter type param set incorrectly\n"); + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); break; } @@ -2176,16 +2575,18 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) * * Sets the bit-vector in the multicast table. **/ -static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector; u32 vector_bit; u32 vector_reg; + DEBUGFUNC("ixgbe_set_mta"); + hw->addr_ctrl.mta_in_use++; vector = ixgbe_mta_vector(hw, mc_addr); - hw_dbg(hw, " bit-vector = 0x%03X\n", vector); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); /* * The MTA is a register array of 128 32-bit registers. It is treated @@ -2198,40 +2599,46 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) */ vector_reg = (vector >> 5) & 0x7F; vector_bit = vector & 0x1F; - hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); } /** * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses * @hw: pointer to hardware structure - * @netdev: pointer to net device structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand * - * The given list replaces any existing list. Clears the MC addrs from receive - * address registers and the multicast table. Uses unused receive address - * registers for the first multicast addresses, and hashes the rest into the - * multicast table. + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. **/ -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, - struct net_device *netdev) +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) { - struct netdev_hw_addr *ha; u32 i; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); /* * Set the new number of MC addresses that we are being requested to * use. */ - hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + hw->addr_ctrl.num_mc_addrs = mc_addr_count; hw->addr_ctrl.mta_in_use = 0; /* Clear mta_shadow */ - hw_dbg(hw, " Clearing MTA\n"); - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } - /* Update mta shadow */ - netdev_for_each_mc_addr(ha, netdev) { - hw_dbg(hw, " Adding the multicast addresses:\n"); - ixgbe_set_mta(hw, ha->addr); + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); } /* Enable mta */ @@ -2243,8 +2650,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); - hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); - return 0; + DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; } /** @@ -2257,11 +2664,13 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + DEBUGFUNC("ixgbe_enable_mc_generic"); + if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); - return 0; + return IXGBE_SUCCESS; } /** @@ -2274,10 +2683,12 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + DEBUGFUNC("ixgbe_disable_mc_generic"); + if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); - return 0; + return IXGBE_SUCCESS; } /** @@ -2288,23 +2699,29 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) { + s32 ret_val = IXGBE_SUCCESS; u32 mflcn_reg, fccfg_reg; u32 reg; u32 fcrtl, fcrth; int i; - /* Validate the water mark configuration. */ - if (!hw->fc.pause_time) - return IXGBE_ERR_INVALID_LINK_SETTINGS; + DEBUGFUNC("ixgbe_fc_enable_generic"); + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } /* Low water mark of zero causes XOFF floods */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { - hw_dbg(hw, "Invalid water mark configuration\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; } } } @@ -2360,8 +2777,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; break; default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; } /* Set 802.3x based flow control settings. */ @@ -2369,8 +2789,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; @@ -2393,12 +2814,14 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - return 0; +out: + return ret_val; } /** @@ -2417,8 +2840,13 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { - if ((!(adv_reg)) || (!(lp_reg))) + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); return IXGBE_ERR_FC_NOT_NEGOTIATED; + } if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { /* @@ -2430,24 +2858,24 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, */ if (hw->fc.requested_mode == ixgbe_fc_full) { hw->fc.current_mode = ixgbe_fc_full; - hw_dbg(hw, "Flow Control = FULL.\n"); + DEBUGOUT("Flow Control = FULL.\n"); } else { hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); } } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && (lp_reg & lp_sym) && (lp_reg & lp_asm)) { hw->fc.current_mode = ixgbe_fc_tx_pause; - hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); } else { hw->fc.current_mode = ixgbe_fc_none; - hw_dbg(hw, "Flow Control = NONE.\n"); + DEBUGOUT("Flow Control = NONE.\n"); } - return 0; + return IXGBE_SUCCESS; } /** @@ -2456,10 +2884,10 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, * * Enable flow control according on 1 gig fiber. **/ -static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) { u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; - s32 ret_val; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; /* * On multispeed fiber at 1g, bail out if @@ -2469,18 +2897,21 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || - (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); + goto out; + } pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, - pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, - IXGBE_PCS1GANA_ASM_PAUSE, - IXGBE_PCS1GANA_SYM_PAUSE, - IXGBE_PCS1GANA_ASM_PAUSE); + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); +out: return ret_val; } @@ -2490,10 +2921,10 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) * * Enable flow control according to IEEE clause 37. **/ -static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) { u32 links2, anlp1_reg, autoc_reg, links; - s32 ret_val; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; /* * On backplane, bail out if @@ -2501,13 +2932,17 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) * - we are 82599 and link partner is not AN enabled */ links = IXGBE_READ_REG(hw, IXGBE_LINKS); - if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; + } if (hw->mac.type == ixgbe_mac_82599EB) { links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); - if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { + DEBUGOUT("Link partner is not AN enabled\n"); + goto out; + } } /* * Read the 10g AN autoc and LP ability registers and resolve @@ -2520,6 +2955,7 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); +out: return ret_val; } @@ -2529,16 +2965,16 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) * * Enable flow control according to IEEE clause 37. **/ -static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) { u16 technology_ability_reg = 0; u16 lp_technology_ability_reg = 0; - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &technology_ability_reg); - hw->phy.ops.read_reg(hw, MDIO_AN_LPA, - MDIO_MMD_AN, + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &lp_technology_ability_reg); return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, @@ -2560,24 +2996,29 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) ixgbe_link_speed speed; bool link_up; + DEBUGFUNC("ixgbe_fc_autoneg"); + /* * AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if * - link is not up. - * - * Since we're being called from an LSC, link is already known to be up. - * So use link_up_wait_to_complete=false. */ - if (hw->fc.disable_fc_autoneg) + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); goto out; + } hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); goto out; + } switch (hw->phy.media_type) { /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: if (speed == IXGBE_LINK_SPEED_1GB_FULL) ret_val = ixgbe_fc_autoneg_fiber(hw); @@ -2599,7 +3040,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) } out: - if (ret_val == 0) { + if (ret_val == IXGBE_SUCCESS) { hw->fc.fc_was_autonegged = true; } else { hw->fc.fc_was_autonegged = false; @@ -2607,46 +3048,46 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) } } -/** +/* * ixgbe_pcie_timeout_poll - Return number of times to poll for completion * @hw: pointer to hardware structure * * System-wide timeout range is encoded in PCIe Device Control2 register. * - * Add 10% to specified maximum and return the number of times to poll for - * completion timeout, in units of 100 microsec. Never return less than - * 800 = 80 millisec. - **/ -static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) + * Add 10% to specified maximum and return the number of times to poll for + * completion timeout, in units of 100 microsec. Never return less than + * 800 = 80 millisec. + */ +STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) { s16 devctl2; u32 pollcnt; - devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; switch (devctl2) { case IXGBE_PCIDEVCTRL2_65_130ms: - pollcnt = 1300; /* 130 millisec */ + pollcnt = 1300; /* 130 millisec */ break; case IXGBE_PCIDEVCTRL2_260_520ms: - pollcnt = 5200; /* 520 millisec */ + pollcnt = 5200; /* 520 millisec */ break; case IXGBE_PCIDEVCTRL2_1_2s: - pollcnt = 20000; /* 2 sec */ + pollcnt = 20000; /* 2 sec */ break; case IXGBE_PCIDEVCTRL2_4_8s: - pollcnt = 80000; /* 8 sec */ + pollcnt = 80000; /* 8 sec */ break; case IXGBE_PCIDEVCTRL2_17_34s: - pollcnt = 34000; /* 34 sec */ + pollcnt = 34000; /* 34 sec */ break; - case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ - case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ - case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ - case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ + case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ + case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ + case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ default: - pollcnt = 800; /* 80 millisec minimum */ + pollcnt = 800; /* 80 millisec minimum */ break; } @@ -2660,38 +3101,30 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) * * Disables PCI-Express master access and verifies there are no pending * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable - * bit hasn't caused the master requests to be disabled, else 0 + * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS * is returned signifying master requests disabled. **/ -static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u32 i, poll; u16 value; + DEBUGFUNC("ixgbe_disable_pcie_master"); + /* Always set this bit to ensure any future transactions are blocked */ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); - /* Poll for bit to read as set */ - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { - if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) - break; - usleep_range(100, 120); - } - if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) { - hw_dbg(hw, "GIO disable did not set - requesting resets\n"); - goto gio_disable_fail; - } - /* Exit if master requests are blocked */ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || - ixgbe_removed(hw->hw_addr)) - return 0; + IXGBE_REMOVED(hw->hw_addr)) + goto out; /* Poll for master request bit to clear */ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { - udelay(100); + usec_delay(100); if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) - return 0; + goto out; } /* @@ -2702,12 +3135,11 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * remaining completions from the PCIe bus to trickle in, and then reset * again to clear out any effects they may have had on our device. */ - hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); -gio_disable_fail: + DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; if (hw->mac.type >= ixgbe_mac_X550) - return 0; + goto out; /* * Before proceeding, make sure that the PCIe block does not have @@ -2715,16 +3147,20 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) */ poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { - udelay(100); - value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); - if (ixgbe_removed(hw->hw_addr)) - return 0; + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) - return 0; + goto out; } - hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); - return IXGBE_ERR_MASTER_REQUESTS_PENDING; + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PCIe transaction pending bit also did not clear.\n"); + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + +out: + return status; } /** @@ -2743,6 +3179,8 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) u32 timeout = 200; u32 i; + DEBUGFUNC("ixgbe_acquire_swfw_sync"); + for (i = 0; i < timeout; i++) { /* * SW NVM semaphore bit is used for access to all @@ -2756,11 +3194,11 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) gssr |= swmask; IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); ixgbe_release_eeprom_semaphore(hw); - return 0; + return IXGBE_SUCCESS; } else { /* Resource is currently in use by FW or SW */ ixgbe_release_eeprom_semaphore(hw); - usleep_range(5000, 10000); + msec_delay(5); } } @@ -2768,7 +3206,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) if (gssr & (fwmask | swmask)) ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); - usleep_range(5000, 10000); + msec_delay(5); return IXGBE_ERR_SWFW_SYNC; } @@ -2785,6 +3223,8 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) u32 gssr; u32 swmask = mask; + DEBUGFUNC("ixgbe_release_swfw_sync"); + ixgbe_get_eeprom_semaphore(hw); gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); @@ -2795,47 +3235,21 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) } /** - * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read - * @hw: pointer to hardware structure - * @reg_val: Value we read from AUTOC - * @locked: bool to indicate whether the SW/FW lock should be taken. Never - * true in this the generic case. - * - * The default case requires no protection so just to the register read. - **/ -s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) -{ - *locked = false; - *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); - return 0; -} - -/** - * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write - * @hw: pointer to hardware structure - * @reg_val: value to write to AUTOC - * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous read. - **/ -s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) -{ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); - return 0; -} - -/** - * ixgbe_disable_rx_buff_generic - Stops the receive data path + * ixgbe_disable_sec_rx_path_generic - Stops the receive data path * @hw: pointer to hardware structure * - * Stops the receive data path and waits for the HW to internally - * empty the Rx security block. + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block **/ -s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) { #define IXGBE_MAX_SECRX_POLL 40 + int i; int secrxreg; + DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg |= IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); @@ -2845,33 +3259,66 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) break; else /* Use interrupt-safe sleep just in case */ - udelay(1000); + usec_delay(1000); } /* For informational purposes only */ if (i >= IXGBE_MAX_SECRX_POLL) - hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @reg_val: Value we read from AUTOC + * + * The default case requires no protection so just to the register read. + */ +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + *locked = false; + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return IXGBE_SUCCESS; +} - return 0; +/** + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous read. + * + * The default case requires no protection so just to the register write. + */ +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +{ + UNREFERENCED_1PARAMETER(locked); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); + return IXGBE_SUCCESS; } /** - * ixgbe_enable_rx_buff - Enables the receive data path + * ixgbe_enable_sec_rx_path_generic - Enables the receive data path * @hw: pointer to hardware structure * - * Enables the receive data path + * Enables the receive data path. **/ -s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) { u32 secrxreg; + DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); IXGBE_WRITE_FLUSH(hw); - return 0; + return IXGBE_SUCCESS; } /** @@ -2883,12 +3330,14 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { + DEBUGFUNC("ixgbe_enable_rx_dma_generic"); + if (regval & IXGBE_RXCTRL_RXEN) - hw->mac.ops.enable_rx(hw); + ixgbe_enable_rx(hw); else - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); - return 0; + return IXGBE_SUCCESS; } /** @@ -2899,11 +3348,13 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) { ixgbe_link_speed speed = 0; - bool link_up = false; - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + bool link_up = 0; + u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = IXGBE_SUCCESS; bool locked = false; - s32 ret_val; + + DEBUGFUNC("ixgbe_blink_led_start_generic"); if (index > 3) return IXGBE_ERR_PARAM; @@ -2916,19 +3367,18 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) if (!link_up) { ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; IXGBE_WRITE_FLUSH(hw); - - usleep_range(10000, 20000); + msec_delay(10); } led_reg &= ~IXGBE_LED_MODE_MASK(index); @@ -2936,7 +3386,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; +out: + return ret_val; } /** @@ -2948,22 +3399,24 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = IXGBE_SUCCESS; bool locked = false; - s32 ret_val; + + DEBUGFUNC("ixgbe_blink_led_stop_generic"); if (index > 3) return IXGBE_ERR_PARAM; ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; autoc_reg &= ~IXGBE_AUTOC_FLU; autoc_reg |= IXGBE_AUTOC_AN_RESTART; ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); @@ -2971,7 +3424,8 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; +out: + return ret_val; } /** @@ -2983,20 +3437,24 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) * pointer, and returns the value at that location. This is used in both * get and set mac_addr routines. **/ -static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, - u16 *san_mac_offset) +STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) { s32 ret_val; + DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); + /* * First read the EEPROM pointer to see if the MAC addresses are * available. */ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); - if (ret_val) - hw_err(hw, "eeprom read at offset %d failed\n", - IXGBE_SAN_MAC_ADDR_PTR); + if (ret_val) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom at offset %d failed", + IXGBE_SAN_MAC_ADDR_PTR); + } return ret_val; } @@ -3017,14 +3475,15 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) u8 i; s32 ret_val; + DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); + /* * First read the EEPROM pointer to see if the MAC addresses are * available. If they're not, no point in calling set_lan_id() here. */ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) - - goto san_mac_addr_clr; + goto san_mac_addr_out; /* make sure we know which port we need to program */ hw->mac.ops.set_lan_id(hw); @@ -3035,23 +3494,61 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) ret_val = hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); if (ret_val) { - hw_err(hw, "eeprom read at offset %d failed\n", - san_mac_offset); - goto san_mac_addr_clr; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + san_mac_offset); + goto san_mac_addr_out; } san_mac_addr[i * 2] = (u8)(san_mac_data); san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); san_mac_offset++; } - return 0; + return IXGBE_SUCCESS; -san_mac_addr_clr: - /* No addresses available in this EEPROM. It's not necessarily an +san_mac_addr_out: + /* + * No addresses available in this EEPROM. It's not an * error though, so just wipe the local address and return. */ for (i = 0; i < 6; i++) san_mac_addr[i] = 0xFF; - return ret_val; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Write a SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + s32 ret_val; + u16 san_mac_data, san_mac_offset; + u8 i; + + DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); + + /* Look for SAN mac address pointer. If not defined, return */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + return IXGBE_ERR_NO_SAN_ADDR_PTR; + + /* Make sure we know which port we need to write */ + hw->mac.ops.set_lan_id(hw); + /* Apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + + for (i = 0; i < 3; i++) { + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); + san_mac_data |= (u16)(san_mac_addr[i * 2]); + hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); + san_mac_offset++; + } + + return IXGBE_SUCCESS; } /** @@ -3063,7 +3560,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) **/ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) { - u16 msix_count; + u16 msix_count = 1; u16 max_msix_count; u16 pcie_offset; @@ -3076,16 +3573,17 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; default: - return 1; + return msix_count; } - msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); - if (ixgbe_removed(hw->hw_addr)) + DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); + msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); + if (IXGBE_REMOVED(hw->hw_addr)) msix_count = 0; msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; @@ -3098,6 +3596,75 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) return msix_count; } +/** + * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + DEBUGFUNC("ixgbe_insert_mac_addr_generic"); + + /* swap bytes for HW little endian */ + addr_low = addr[0] | (addr[1] << 8) + | (addr[2] << 16) + | (addr[3] << 24); + addr_high = addr[4] | (addr[5] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + + if (((IXGBE_RAH_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + /* already there so just add to the pool bits */ + ixgbe_set_vmdq(hw, rar, vmdq); + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return IXGBE_ERR_INVALID_MAC_ADDR; + } + + /* + * If we found rar[0], make sure the default pool bit (we use pool 0) + * remains cleared to be sure default pool packets will get delivered + */ + if (rar == 0) + ixgbe_clear_vmdq(hw, rar, 0); + + return rar; +} + /** * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address * @hw: pointer to hardware struct @@ -3109,20 +3676,23 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) u32 mpsar_lo, mpsar_hi; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_clear_vmdq_generic"); + /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - if (ixgbe_removed(hw->hw_addr)) - return 0; + if (IXGBE_REMOVED(hw->hw_addr)) + goto done; if (!mpsar_lo && !mpsar_hi) - return 0; + goto done; if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { if (mpsar_lo) { @@ -3134,10 +3704,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) mpsar_hi = 0; } } else if (vmdq < 32) { - mpsar_lo &= ~BIT(vmdq); + mpsar_lo &= ~(1 << vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); } else { - mpsar_hi &= ~BIT(vmdq - 32); + mpsar_hi &= ~(1 << (vmdq - 32)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); } @@ -3145,8 +3715,8 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0 && rar != hw->mac.san_mac_rar_index) hw->mac.ops.clear_rar(hw, rar); - - return 0; +done: + return IXGBE_SUCCESS; } /** @@ -3160,22 +3730,25 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; + DEBUGFUNC("ixgbe_set_vmdq_generic"); + /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } if (vmdq < 32) { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= BIT(vmdq); + mpsar |= 1 << vmdq; IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); } else { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= BIT(vmdq - 32); + mpsar |= 1 << (vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); } - return 0; + return IXGBE_SUCCESS; } /** @@ -3192,15 +3765,17 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) { u32 rar = hw->mac.san_mac_rar_index; + DEBUGFUNC("ixgbe_set_vmdq_san_mac"); + if (vmdq < 32) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); } else { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); } - return 0; + return IXGBE_SUCCESS; } /** @@ -3211,10 +3786,13 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) { int i; + DEBUGFUNC("ixgbe_init_uta_tables_generic"); + DEBUGOUT(" Clearing UTA\n"); + for (i = 0; i < 128; i++) IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); - return 0; + return IXGBE_SUCCESS; } /** @@ -3225,7 +3803,7 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) * return the VLVF index where this VLAN id should be placed * **/ -static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { s32 regindex, first_empty_slot; u32 bits; @@ -3260,17 +3838,17 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) * slot we found during our search, else error. */ if (!first_empty_slot) - hw_dbg(hw, "No space in VLVF.\n"); + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); - return first_empty_slot ? : IXGBE_ERR_NO_SPACE; + return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; } /** * ixgbe_set_vfta_generic - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFVFB - * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. @@ -3278,10 +3856,12 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { - u32 regidx, vfta_delta, vfta, bits; - s32 vlvf_index; + u32 regidx, vfta_delta, vfta; + s32 ret_val; + + DEBUGFUNC("ixgbe_set_vfta_generic"); - if ((vlan > 4095) || (vind > 63)) + if (vlan > 4095 || vind > 63) return IXGBE_ERR_PARAM; /* @@ -3297,18 +3877,62 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[4-0]: which bit in the register */ regidx = vlan / 32; - vfta_delta = BIT(vlan % 32); + vfta_delta = 1 << (vlan % 32); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); - /* vfta_delta represents the difference between the current value + /* + * vfta_delta represents the difference between the current value * of vfta and the value we want in the register. Since the diff - * is an XOR mask we can just update vfta using an XOR. + * is an XOR mask we can just update the vfta using an XOR */ vfta_delta &= vlan_on ? ~vfta : vfta; vfta ^= vfta_delta; /* Part 2 - * If VT Mode is set + * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF + */ + ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, + vfta, vlvf_bypass); + if (ret_val != IXGBE_SUCCESS) { + if (vlvf_bypass) + goto vfta_update; + return ret_val; + } + +vfta_update: + /* Update VFTA now that we are ready for traffic */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vlvf_generic - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN in VLVF + * @vfta_delta: pointer to the difference between the current value of VFTA + * and the desired value + * @vfta: the desired value of the VFTA + * @vlvf_bypass: boolean flag indicating updating default pool is okay + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass) +{ + u32 bits; + s32 vlvf_index; + + DEBUGFUNC("ixgbe_set_vlvf_generic"); + + if (vlan > 4095 || vind > 63) + return IXGBE_ERR_PARAM; + + /* If VT Mode is set * Either vlan_on * make sure the vlan is in VLVF * set the vind bit in the matching VLVFB @@ -3316,24 +3940,21 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * clear the pool bit and possibly the vind */ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) - goto vfta_update; + return IXGBE_SUCCESS; vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); - if (vlvf_index < 0) { - if (vlvf_bypass) - goto vfta_update; + if (vlvf_index < 0) return vlvf_index; - } bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); /* set the pool bit */ - bits |= BIT(vind % 32); + bits |= 1 << (vind % 32); if (vlan_on) goto vlvf_update; /* clear the pool bit */ - bits ^= BIT(vind % 32); + bits ^= 1 << (vind % 32); if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { @@ -3341,14 +3962,14 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * we run the risk of stray packets leaking into * the PF via the default pool */ - if (vfta_delta) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + if (*vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); /* disable VLVF and clear remaining bit from pool */ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); - return 0; + return IXGBE_SUCCESS; } /* If there are still bits set in the VLVFB registers @@ -3365,19 +3986,14 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * been cleared. This will be indicated by "bits" being * zero. */ - vfta_delta = 0; + *vfta_delta = 0; vlvf_update: /* record pool change and enable VLAN ID if not already enabled */ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); -vfta_update: - /* Update VFTA now that we are ready for traffic */ - if (vfta_delta) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); - - return 0; + return IXGBE_SUCCESS; } /** @@ -3390,6 +4006,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) { u32 offset; + DEBUGFUNC("ixgbe_clear_vfta_generic"); + for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); @@ -3399,7 +4017,7 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); } - return 0; + return IXGBE_SUCCESS; } /** @@ -3411,6 +4029,7 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) **/ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) { + /* Does FW say we need the fix */ if (!hw->need_crosstalk_fix) return false; @@ -3442,6 +4061,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, u32 links_reg, links_orig; u32 i; + DEBUGFUNC("ixgbe_check_mac_link_generic"); + /* If Crosstalk fix enabled do the sanity check of making sure * the SFP+ cage is full. */ @@ -3454,7 +4075,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, IXGBE_ESDP_SDP2; break; case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & IXGBE_ESDP_SDP0; break; @@ -3467,7 +4088,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, if (!sfp_cage_full) { *link_up = false; *speed = IXGBE_LINK_SPEED_UNKNOWN; - return 0; + return IXGBE_SUCCESS; } } @@ -3477,19 +4098,19 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_orig != links_reg) { - hw_dbg(hw, "LINKS changed from %08X to %08X\n", - links_orig, links_reg); + DEBUGOUT2("LINKS changed from %08X to %08X\n", + links_orig, links_reg); } if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { if (links_reg & IXGBE_LINKS_UP) { *link_up = true; break; } else { *link_up = false; } - msleep(100); + msec_delay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { @@ -3501,21 +4122,21 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: - if ((hw->mac.type >= ixgbe_mac_X550) && - (links_reg & IXGBE_LINKS_SPEED_NON_STD)) - *speed = IXGBE_LINK_SPEED_2_5GB_FULL; - else - *speed = IXGBE_LINK_SPEED_10GB_FULL; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } break; case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: - if ((hw->mac.type >= ixgbe_mac_X550) && - (links_reg & IXGBE_LINKS_SPEED_NON_STD)) - *speed = IXGBE_LINK_SPEED_5GB_FULL; - else - *speed = IXGBE_LINK_SPEED_100_FULL; + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } break; case IXGBE_LINKS_SPEED_10_X550EM_A: *speed = IXGBE_LINK_SPEED_UNKNOWN; @@ -3528,7 +4149,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *speed = IXGBE_LINK_SPEED_UNKNOWN; } - return 0; + return IXGBE_SUCCESS; } /** @@ -3542,11 +4163,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * block to check the support for the alternative WWNN/WWPN prefix support. **/ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix) + u16 *wwpn_prefix) { u16 offset, caps; u16 alt_san_mac_blk_offset; + DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); + /* clear output first */ *wwnn_prefix = 0xFFFF; *wwpn_prefix = 0xFFFF; @@ -3558,29 +4181,82 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, if ((alt_san_mac_blk_offset == 0) || (alt_san_mac_blk_offset == 0xFFFF)) - return 0; + goto wwn_prefix_out; /* check capability in alternative san mac address block */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; if (hw->eeprom.ops.read(hw, offset, &caps)) goto wwn_prefix_err; if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) - return 0; + goto wwn_prefix_out; /* get the corresponding prefix for WWNN/WWPN */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; - if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) - hw_err(hw, "eeprom read at offset %d failed\n", offset); + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + } offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) goto wwn_prefix_err; - return 0; +wwn_prefix_out: + return IXGBE_SUCCESS; wwn_prefix_err: - hw_err(hw, "eeprom read at offset %d failed\n", offset); - return 0; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) +{ + u16 offset, caps, flags; + s32 status; + + DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); + + /* clear output first */ + *bs = ixgbe_fcoe_bootstatus_unavailable; + + /* check if FCOE IBA block is present */ + offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; + status = hw->eeprom.ops.read(hw, offset, &caps); + if (status != IXGBE_SUCCESS) + goto out; + + if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) + goto out; + + /* check if iSCSI FCOE block is populated */ + status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); + if (status != IXGBE_SUCCESS) + goto out; + + if ((offset == 0) || (offset == 0xFFFF)) + goto out; + + /* read fcoe flags in iSCSI FCOE block */ + offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; + status = hw->eeprom.ops.read(hw, offset, &flags); + if (status != IXGBE_SUCCESS) + goto out; + + if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) + *bs = ixgbe_fcoe_bootstatus_enabled; + else + *bs = ixgbe_fcoe_bootstatus_disabled; + +out: + return status; } /** @@ -3601,9 +4277,9 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= BIT(vf_target_shift); + pfvfspoof |= (1 << vf_target_shift); else - pfvfspoof &= ~BIT(vf_target_shift); + pfvfspoof &= ~(1 << vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } @@ -3611,7 +4287,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for VLAN anti-spoofing - * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing * **/ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) @@ -3625,9 +4301,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= BIT(vf_target_shift); + pfvfspoof |= (1 << vf_target_shift); else - pfvfspoof &= ~BIT(vf_target_shift); + pfvfspoof &= ~(1 << vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } @@ -3641,82 +4317,17 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) **/ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) { - hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); - - return 0; -} + DEBUGFUNC("ixgbe_get_device_caps_generic"); -/** - * ixgbe_set_rxpba_generic - Initialize RX packet buffer - * @hw: pointer to hardware structure - * @num_pb: number of packet buffers to allocate - * @headroom: reserve n KB of headroom - * @strategy: packet buffer allocation strategy - **/ -void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, - int num_pb, - u32 headroom, - int strategy) -{ - u32 pbsize = hw->mac.rx_pb_size; - int i = 0; - u32 rxpktsize, txpktsize, txpbthresh; - - /* Reserve headroom */ - pbsize -= headroom; - - if (!num_pb) - num_pb = 1; - - /* Divide remaining packet buffer space amongst the number - * of packet buffers requested using supplied strategy. - */ - switch (strategy) { - case (PBA_STRATEGY_WEIGHTED): - /* pba_80_48 strategy weight first half of packet buffer with - * 5/8 of the packet buffer space. - */ - rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); - pbsize -= rxpktsize * (num_pb / 2); - rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; - for (; i < (num_pb / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - /* fall through - configure remaining packet buffers */ - case (PBA_STRATEGY_EQUAL): - /* Divide the remaining Rx packet buffer evenly among the TCs */ - rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; - for (; i < num_pb; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - break; - default: - break; - } - - /* - * Setup Tx packet buffer and threshold equally for all TCs - * TXPBTHRESH register is set in K so divide by 1024 and subtract - * 10 since the largest packet we support is just over 9K. - */ - txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; - txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; - for (i = 0; i < num_pb; i++) { - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); - IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); - } + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); - /* Clear unused TCs, if any, to zero buffer size*/ - for (; i < IXGBE_MAX_PB; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); - } + return IXGBE_SUCCESS; } /** * ixgbe_calculate_checksum - Calculate checksum for buffer * @buffer: pointer to EEPROM * @length: size of EEPROM to calculate a checksum for - * * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ @@ -3725,6 +4336,8 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) u32 i; u8 sum = 0; + DEBUGFUNC("ixgbe_calculate_checksum"); + if (!buffer) return 0; @@ -3741,7 +4354,7 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion * - * Communicates with the manageability block. On success return 0 + * Communicates with the manageability block. On success return IXGBE_SUCCESS * else returns semaphore error when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. * @@ -3754,8 +4367,10 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 hicr, i, fwsts; u16 dword_len; + DEBUGFUNC("ixgbe_hic_unlocked"); + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } @@ -3766,13 +4381,13 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_EN)) { - hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); + DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Calculate length in DWORDs. We must be DWORD aligned */ if (length % sizeof(u32)) { - hw_dbg(hw, "Buffer length failure, not aligned to dword"); + DEBUGOUT("Buffer length failure, not aligned to dword"); return IXGBE_ERR_INVALID_ARGUMENT; } @@ -3783,7 +4398,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(buffer[i])); + i, IXGBE_CPU_TO_LE32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); @@ -3792,51 +4407,54 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_C)) break; - usleep_range(1000, 2000); + msec_delay(1); } - /* Check command successful completion. */ + /* Check command completion */ if ((timeout && i == timeout) || - !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { + ERROR_REPORT1(IXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_host_interface_command - Issue command to manageability block * @hw: pointer to the HW structure * @buffer: contains the command to write and where the return status will - * be placed + * be placed * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion * @return_data: read and return data from the buffer (true) or not (false) - * Needed because FW structures are big endian and decoding of - * these fields can be 8 bit or 16 bit based on command. Decoding - * is not easily understood without making a table of commands. - * So we will leave this up to the caller to read back the data - * in these cases. + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. * - * Communicates with the manageability block. On success return 0 - * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + * Communicates with the manageability block. On success return IXGBE_SUCCESS + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - u32 length, u32 timeout, - bool return_data) +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - union { - struct ixgbe_hic_hdr hdr; - u32 u32arr[1]; - } *bp = buffer; - u16 buf_len, dword_len; + u16 dword_len; + u16 buf_len; s32 status; u32 bi; - if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + DEBUGFUNC("ixgbe_host_interface_command"); + + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); if (status) @@ -3854,17 +4472,17 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { - bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&bp->u32arr[bi]); + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + IXGBE_LE32_TO_CPUS(&buffer[bi]); } /* If there is any thing in data position pull it in */ - buf_len = bp->hdr.buf_len; + buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; if (!buf_len) goto rel_out; - if (length < round_up(buf_len, 4) + hdr_size) { - hw_dbg(hw, "Buffer not large enough for reply message.\n"); + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); status = IXGBE_ERR_HOST_INTERFACE_COMMAND; goto rel_out; } @@ -3874,8 +4492,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { - bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&bp->u32arr[bi]); + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + IXGBE_LE32_TO_CPUS(&buffer[bi]); } rel_out: @@ -3891,47 +4509,48 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, * @min: driver version minor number * @build: driver version build number * @sub: driver version sub build number - * @len: length of driver_ver string - * @driver_ver: driver string * * Sends driver version number to firmware through the manageability - * block. On success return 0 + * block. On success return IXGBE_SUCCESS * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub, __always_unused u16 len, - __always_unused const char *driver_ver) + u8 build, u8 sub, u16 len, + const char *driver_ver) { struct ixgbe_hic_drv_info fw_cmd; int i; - s32 ret_val; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); + UNREFERENCED_2PARAMETER(len, driver_ver); fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = hw->bus.func; + fw_cmd.port_num = (u8)hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; - fw_cmd.pad = 0; - fw_cmd.pad2 = 0; fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, &fw_cmd, + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); - if (ret_val != 0) + if (ret_val != IXGBE_SUCCESS) continue; if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) - ret_val = 0; + ret_val = IXGBE_SUCCESS; else ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; @@ -3941,6 +4560,65 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, return ret_val; } +/** + * ixgbe_set_rxpba_generic - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* ixgbe_dcb_pba_80_48 strategy weight first half of packet + * buffer with 5/8 of the packet buffer space. + */ + rxpktsize = (pbsize * 5) / (num_pb * 4); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* fall through - configure remaining packet buffers */ + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; + for (; i < num_pb; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < IXGBE_MAX_PB; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } +} + /** * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo * @hw: pointer to the hardware structure @@ -3969,23 +4647,25 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); - /* wait for a last completion before clearing buffers */ + /* Wait for a last completion before clearing buffers */ IXGBE_WRITE_FLUSH(hw); - usleep_range(3000, 6000); + msec_delay(3); - /* Before proceeding, make sure that the PCIe block does not have + /* + * Before proceeding, make sure that the PCIe block does not have * transactions pending. */ poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { - usleep_range(100, 200); - value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); - if (ixgbe_removed(hw->hw_addr)) - break; + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) - break; + goto out; } +out: /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, @@ -3993,92 +4673,82 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) /* Flush all writes and allow 20usec for all transactions to clear */ IXGBE_WRITE_FLUSH(hw); - udelay(20); + usec_delay(20); /* restore previous register values */ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); } -static const u8 ixgbe_emc_temp_data[4] = { +STATIC const u8 ixgbe_emc_temp_data[4] = { IXGBE_EMC_INTERNAL_DATA, IXGBE_EMC_DIODE1_DATA, IXGBE_EMC_DIODE2_DATA, IXGBE_EMC_DIODE3_DATA }; -static const u8 ixgbe_emc_therm_limit[4] = { +STATIC const u8 ixgbe_emc_therm_limit[4] = { IXGBE_EMC_INTERNAL_THERM_LIMIT, IXGBE_EMC_DIODE1_THERM_LIMIT, IXGBE_EMC_DIODE2_THERM_LIMIT, IXGBE_EMC_DIODE3_THERM_LIMIT }; -/** - * ixgbe_get_ets_data - Extracts the ETS bit data - * @hw: pointer to hardware structure - * @ets_cfg: extected ETS data - * @ets_offset: offset of ETS data - * - * Returns error code. - **/ -static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, - u16 *ets_offset) -{ - s32 status; - - status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); - if (status) - return status; - - if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) - return IXGBE_NOT_IMPLEMENTED; - - status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); - if (status) - return status; - - if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) - return IXGBE_NOT_IMPLEMENTED; - - return 0; -} - /** * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure * * Returns the thermal sensor data structure **/ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) { - s32 status; + s32 status = IXGBE_SUCCESS; u16 ets_offset; u16 ets_cfg; u16 ets_sensor; u8 num_sensors; + u8 sensor_index; + u8 sensor_location; u8 i; struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; - /* Only support thermal sensors attached to physical port 0 */ - if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) - return IXGBE_NOT_IMPLEMENTED; + DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic"); - status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); + /* Only support thermal sensors attached to 82599 physical port 0 */ + if ((hw->mac.type != ixgbe_mac_82599EB) || + (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset); if (status) - return status; + goto out; + + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg); + if (status) + goto out; + + if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) + != IXGBE_ETS_TYPE_EMC) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); if (num_sensors > IXGBE_MAX_SENSORS) num_sensors = IXGBE_MAX_SENSORS; for (i = 0; i < num_sensors; i++) { - u8 sensor_index; - u8 sensor_location; - status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor); if (status) - return status; + goto out; sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> IXGBE_ETS_DATA_INDEX_SHIFT); @@ -4091,55 +4761,67 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) IXGBE_I2C_THERMAL_SENSOR_ADDR, &data->sensor[i].temp); if (status) - return status; + goto out; } } - - return 0; +out: + return status; } /** - * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds - * @hw: pointer to hardware structure + * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds + * @hw: pointer to hardware structure * - * Inits the thermal sensor thresholds according to the NVM map - * and save off the threshold and location values into mac.thermal_sensor_data + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data **/ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) { - s32 status; + s32 status = IXGBE_SUCCESS; + u16 offset; u16 ets_offset; u16 ets_cfg; u16 ets_sensor; u8 low_thresh_delta; u8 num_sensors; + u8 sensor_index; + u8 sensor_location; u8 therm_limit; u8 i; struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic"); + memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); - /* Only support thermal sensors attached to physical port 0 */ - if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) + /* Only support thermal sensors attached to 82599 physical port 0 */ + if ((hw->mac.type != ixgbe_mac_82599EB) || + (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) return IXGBE_NOT_IMPLEMENTED; - status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); - if (status) - return status; + offset = IXGBE_ETS_CFG; + if (hw->eeprom.ops.read(hw, offset, &ets_offset)) + goto eeprom_err; + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return IXGBE_NOT_IMPLEMENTED; + + offset = ets_offset; + if (hw->eeprom.ops.read(hw, offset, &ets_cfg)) + goto eeprom_err; + if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) + != IXGBE_ETS_TYPE_EMC) + return IXGBE_NOT_IMPLEMENTED; low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> IXGBE_ETS_LTHRES_DELTA_SHIFT); num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); - if (num_sensors > IXGBE_MAX_SENSORS) - num_sensors = IXGBE_MAX_SENSORS; for (i = 0; i < num_sensors; i++) { - u8 sensor_index; - u8 sensor_location; - - if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { - hw_err(hw, "eeprom read at offset %d failed\n", - ets_offset + 1 + i); + offset = ets_offset + 1 + i; + if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + offset); continue; } sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> @@ -4152,26 +4834,158 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) ixgbe_emc_therm_limit[sensor_index], IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); - if (sensor_location == 0) - continue; + if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return status; - data->sensor[i].location = sensor_location; - data->sensor[i].caution_thresh = therm_limit; - data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; +eeprom_err: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return IXGBE_NOT_IMPLEMENTED; +} + +/** + * ixgbe_get_orom_version - Return option ROM from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * if valid option ROM version, nvm_ver->or_valid set to true + * else nvm_ver->or_valid is false. + **/ +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; + + nvm_ver->or_valid = false; + /* Option Rom may or may not be present. Start with pointer */ + hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); + + /* make sure offset is valid */ + if ((offset == 0x0) || (offset == NVM_INVALID_PTR)) + return; + + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); + + /* option rom exists and is valid */ + if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || + eeprom_cfg_blkl == NVM_VER_INVALID || + eeprom_cfg_blkh == NVM_VER_INVALID) + return; + + nvm_ver->or_valid = true; + nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; + nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | + (eeprom_cfg_blkh >> NVM_OROM_SHIFT); + nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; +} + +/** + * ixgbe_get_oem_prod_version - Return OEM Product version + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * if valid OEM product version, nvm_ver->oem_valid set to true + * else nvm_ver->oem_valid is false. + **/ +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 rel_num, prod_ver, mod_len, cap, offset; + + nvm_ver->oem_valid = false; + hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); + + /* Return is offset to OEM Product Version block is invalid */ + if (offset == 0x0 && offset == NVM_INVALID_PTR) + return; + + /* Read product version block */ + hw->eeprom.ops.read(hw, offset, &mod_len); + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); + + /* Return if OEM product version block is invalid */ + if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || + (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) + return; + + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); + + /* Return if version is invalid */ + if ((rel_num | prod_ver) == 0x0 || + rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) + return; + + nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; + nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; + nvm_ver->oem_release = rel_num; + nvm_ver->oem_valid = true; +} + +/** + * ixgbe_get_etk_id - Return Etrack ID from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * word read errors will return 0xFFFF + **/ +void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) +{ + u16 etk_id_l, etk_id_h; + + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) + etk_id_l = NVM_VER_INVALID; + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) + etk_id_h = NVM_VER_INVALID; + + /* The word order for the version format is determined by high order + * word bit 15. + */ + if ((etk_id_h & NVM_ETK_VALID) == 0) { + nvm_ver->etk_id = etk_id_h; + nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); + } else { + nvm_ver->etk_id = etk_id_l; + nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); } +} - return 0; +/** + * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + map[i] = IXGBE_RTRUP2TC_UP_MASK & + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); + return; } void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) { + u32 pfdtxgswc; u32 rxctrl; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { if (hw->mac.type != ixgbe_mac_82598EB) { - u32 pfdtxgswc; - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; @@ -4188,6 +5002,7 @@ void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) { + u32 pfdtxgswc; u32 rxctrl; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); @@ -4195,8 +5010,6 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) if (hw->mac.type != ixgbe_mac_82598EB) { if (hw->mac.set_lben) { - u32 pfdtxgswc; - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); @@ -4205,9 +5018,10 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) } } -/** ixgbe_mng_present - returns true when management capability is present +/** + * ixgbe_mng_present - returns true when management capability is present * @hw: pointer to hardware structure - **/ + */ bool ixgbe_mng_present(struct ixgbe_hw *hw) { u32 fwsm; @@ -4215,11 +5029,38 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw) if (hw->mac.type < ixgbe_mac_82599EB) return false; - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); fwsm &= IXGBE_FWSM_MODE_MASK; return fwsm == IXGBE_FWSM_FW_MODE_PT; } +/** + * ixgbe_mng_enabled - Is the manageability engine enabled? + * @hw: pointer to hardware structure + * + * Returns true if the manageability engine is enabled. + **/ +bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +{ + u32 fwsm, manc, factps; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) + return false; + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + if (!(manc & IXGBE_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.type <= ixgbe_mac_X540) { + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); + if (factps & IXGBE_FACTPS_MNGCG) + return false; + } + + return true; +} + /** * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure @@ -4227,21 +5068,23 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw) * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the MAC and/or PHY register and restarts link. - */ + **/ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; - s32 status = 0; + s32 status = IXGBE_SUCCESS; u32 speedcnt = 0; u32 i = 0; bool autoneg, link_up = false; + DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); + /* Mask off requested but non-supported speeds */ - status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); - if (status) + status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); + if (status != IXGBE_SUCCESS) return status; speed &= link_speed; @@ -4256,29 +5099,28 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, /* Set the module link speed */ switch (hw->phy.media_type) { case ixgbe_media_type_fiber: - hw->mac.ops.set_rate_select_speed(hw, + ixgbe_set_rate_select_speed(hw, IXGBE_LINK_SPEED_10GB_FULL); break; case ixgbe_media_type_fiber_qsfp: /* QSFP module automatically detects MAC link speed */ break; default: - hw_dbg(hw, "Unexpected media type\n"); + DEBUGOUT("Unexpected media type.\n"); break; } /* Allow module to change analog characteristics (1G->10G) */ - msleep(40); + msec_delay(40); - status = hw->mac.ops.setup_mac_link(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg_wait_to_complete); - if (status) + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) return status; /* Flap the Tx laser if it has not already been done */ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); + ixgbe_flap_tx_laser(hw); /* Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 500ms if KR is @@ -4286,12 +5128,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, */ for (i = 0; i < 5; i++) { /* Wait for the link partner to also set speed */ - msleep(100); + msec_delay(100); /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status) + status = ixgbe_check_link(hw, &link_speed, + &link_up, false); + if (status != IXGBE_SUCCESS) return status; if (link_up) @@ -4307,37 +5149,35 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, /* Set the module link speed */ switch (hw->phy.media_type) { case ixgbe_media_type_fiber: - hw->mac.ops.set_rate_select_speed(hw, - IXGBE_LINK_SPEED_1GB_FULL); + ixgbe_set_rate_select_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); break; case ixgbe_media_type_fiber_qsfp: /* QSFP module automatically detects link speed */ break; default: - hw_dbg(hw, "Unexpected media type\n"); + DEBUGOUT("Unexpected media type.\n"); break; } /* Allow module to change analog characteristics (10G->1G) */ - msleep(40); + msec_delay(40); - status = hw->mac.ops.setup_mac_link(hw, - IXGBE_LINK_SPEED_1GB_FULL, - autoneg_wait_to_complete); - if (status) + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) return status; /* Flap the Tx laser if it has not already been done */ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); + ixgbe_flap_tx_laser(hw); /* Wait for the link partner to also set speed */ - msleep(100); + msec_delay(100); /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status) + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) return status; if (link_up) @@ -4374,7 +5214,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, * Set module link speed via the soft rate select. */ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed) + ixgbe_link_speed speed) { s32 status; u8 rs, eeprom_data; @@ -4388,7 +5228,7 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, rs = IXGBE_SFF_SOFT_RS_SELECT_1G; break; default: - hw_dbg(hw, "Invalid fixed module speed\n"); + DEBUGOUT("Invalid fixed module speed\n"); return; } @@ -4397,8 +5237,8 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, IXGBE_I2C_EEPROM_DEV_ADDR2, &eeprom_data); if (status) { - hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); - return; + DEBUGOUT("Failed to read Rx Rate Select RS0\n"); + goto out; } eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; @@ -4407,8 +5247,8 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, IXGBE_I2C_EEPROM_DEV_ADDR2, eeprom_data); if (status) { - hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); - return; + DEBUGOUT("Failed to write Rx Rate Select RS0\n"); + goto out; } /* Set RS1 */ @@ -4416,8 +5256,8 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, IXGBE_I2C_EEPROM_DEV_ADDR2, &eeprom_data); if (status) { - hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); - return; + DEBUGOUT("Failed to read Rx Rate Select RS1\n"); + goto out; } eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; @@ -4426,7 +5266,9 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, IXGBE_I2C_EEPROM_DEV_ADDR2, eeprom_data); if (status) { - hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); - return; + DEBUGOUT("Failed to write Rx Rate Select RS1\n"); + goto out; } +out: + return; } diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h index e083732adf64..4b657305974d 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,7 +26,8 @@ #define _IXGBE_COMMON_H_ #include "ixgbe_type.h" -#include "ixgbe.h" + +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map); u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); @@ -41,9 +38,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); -enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); -enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status); void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); @@ -69,64 +65,84 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr); s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, - struct net_device *netdev); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ixgbe_mc_addr_itr func, bool clear); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); -s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw); + s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_mac_addr(u8 *mac_addr); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); + +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); + s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on, bool vlvf_bypass); + u32 vind, bool vlan_on, bool vlvf_bypass); +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass); + s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete); + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); + s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix); -s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); -s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); - -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver, u16 len, const char *str); u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, - u32 timeout, bool return_data); -s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); -s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); +s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout); +s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *); +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity, u32 (*data)[FW_PHY_ACT_DATA_COUNT]); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); + +extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); -void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, - u32 headroom, int strategy); - -extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; - #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 #define IXGBE_EMC_INTERNAL_DATA 0x00 #define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 @@ -139,6 +155,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); + +void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, @@ -146,79 +168,4 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); - -#define IXGBE_FAILED_READ_REG 0xffffffffU -#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU -#define IXGBE_FAILED_READ_CFG_WORD 0xffffU - -u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); -void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); - -static inline bool ixgbe_removed(void __iomem *addr) -{ - return unlikely(!addr); -} - -static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) -{ - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); - - if (ixgbe_removed(reg_addr)) - return; - writel(value, reg_addr + reg); -} -#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value)) - -#ifndef writeq -#define writeq writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel((u32)val, addr); - writel((u32)(val >> 32), addr + 4); -} -#endif - -static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) -{ - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); - - if (ixgbe_removed(reg_addr)) - return; - writeq(value, reg_addr + reg); -} -#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) - -u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg); -#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) - -#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ - ixgbe_write_reg((a), (reg) + ((offset) << 2), (value)) - -#define IXGBE_READ_REG_ARRAY(a, reg, offset) \ - ixgbe_read_reg((a), (reg) + ((offset) << 2)) - -#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS) - -#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev) - -#define hw_dbg(hw, format, arg...) \ - netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) -#define hw_err(hw, format, arg...) \ - netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) -#define e_dev_info(format, arg...) \ - dev_info(&adapter->pdev->dev, format, ## arg) -#define e_dev_warn(format, arg...) \ - dev_warn(&adapter->pdev->dev, format, ## arg) -#define e_dev_err(format, arg...) \ - dev_err(&adapter->pdev->dev, format, ## arg) -#define e_dev_notice(format, arg...) \ - dev_notice(&adapter->pdev->dev, format, ## arg) -#define e_info(msglvl, format, arg...) \ - netif_info(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_err(msglvl, format, arg...) \ - netif_err(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_warn(msglvl, format, arg...) \ - netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_crit(msglvl, format, arg...) \ - netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) #endif /* IXGBE_COMMON */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c index 072ef3b5fc61..3eee95ce93c3 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -27,30 +23,29 @@ *******************************************************************************/ -#include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" /** - * ixgbe_ieee_credits - This calculates the ieee traffic class + * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class * credits from the configured bandwidth percentages. Credits * are the smallest unit programmable into the underlying * hardware. The IEEE 802.1Qaz specification do not use bandwidth * groups so this is much simplified from the CEE case. */ -static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, - __u16 *max, int max_frame) +s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, + int max_frame_size) { int min_percent = 100; int min_credit, multiplier; int i; - min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / - DCB_CREDIT_QUANTUM; + min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / + IXGBE_DCB_CREDIT_QUANTUM; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if (bw[i] < min_percent && bw[i]) min_percent = bw[i]; } @@ -58,50 +53,54 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, multiplier = (min_credit / min_percent) + 1; /* Find out the hw credits for each TC */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL); if (val < min_credit) val = min_credit; - refill[i] = val; + refill[i] = (u16)val; - max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; + max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit; } + return 0; } /** - * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits + * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits * @ixgbe_dcb_config: Struct containing DCB settings. * @direction: Configuring either Tx or Rx. * * This function calculates the credits allocated to each traffic class. * It should be called only after the rules are checked by - * ixgbe_dcb_check_config(). + * ixgbe_dcb_check_config_cee(). */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, +s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config, - int max_frame, u8 direction) + u32 max_frame_size, u8 direction) { - struct tc_bw_alloc *p; - int min_credit; - int min_multiplier; - int min_percent = 100; + struct ixgbe_dcb_tc_path *p; + u32 min_multiplier = 0; + u16 min_percent = 100; + s32 ret_val = IXGBE_SUCCESS; /* Initialization values default for Tx settings */ - u32 credit_refill = 0; - u32 credit_max = 0; - u16 link_percentage = 0; - u8 bw_percent = 0; + u32 min_credit = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; u8 i; - if (!dcb_config) - return DCB_ERR_CONFIG; + if (dcb_config == NULL) { + ret_val = IXGBE_ERR_CONFIG; + goto out; + } - min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / - DCB_CREDIT_QUANTUM; + min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / + IXGBE_DCB_CREDIT_QUANTUM; /* Find smallest link percentage */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; link_percentage = p->bwg_percent; @@ -123,7 +122,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, min_multiplier = (min_credit / min_percent) + 1; /* Find out the link percentage for each TC first */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; @@ -138,7 +137,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, - MAX_CREDIT_REFILL); + (u32)IXGBE_DCB_MAX_CREDIT_REFILL); /* Refill at least minimum credit */ if (credit_refill < min_credit) @@ -147,7 +146,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ - credit_max = (link_percentage * MAX_CREDIT) / 100; + credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100; /* * Adjustment based on rule checking, if the percentage @@ -157,87 +156,103 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, if (credit_max < min_credit) credit_max = min_credit; - if (direction == DCB_TX_CONFIG) { + if (direction == IXGBE_DCB_TX_CONFIG) { /* * Adjustment based on rule checking, if the * percentage of a TC is too small, the maximum * credit may not be enough to send out a TSO * packet in descriptor plane arbitration. */ - if ((hw->mac.type == ixgbe_mac_82598EB) && - credit_max && - (credit_max < MINIMUM_CREDIT_FOR_TSO)) - credit_max = MINIMUM_CREDIT_FOR_TSO; + if (credit_max && (credit_max < + IXGBE_DCB_MIN_TSO_CREDIT) + && (hw->mac.type == ixgbe_mac_82598EB)) + credit_max = IXGBE_DCB_MIN_TSO_CREDIT; dcb_config->tc_config[i].desc_credits_max = - (u16)credit_max; + (u16)credit_max; } p->data_credits_max = (u16)credit_max; } - return 0; +out: + return ret_val; } -void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) +/** + * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info + * @cfg: dcb configuration to unpack into hardware consumable fields + * @map: user priority to traffic class map + * @pfc_up: u8 to store user priority PFC bitmask + * + * This unpacks the dcb configuration PFC info which is stored per + * traffic class into a 8bit user priority bitmask that can be + * consumed by hardware routines. The priority to tc map must be + * updated before calling this routine to use current up-to maps. + */ +void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; - int tc; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int up; - for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { - if (tc_config[tc].dcb_pfc != pfc_disabled) - *pfc_en |= BIT(tc); + /* + * If the TC for this user priority has PFC enabled then set the + * matching bit in 'pfc_up' to reflect that PFC is enabled. + */ + for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { + if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled) + *pfc_up |= 1 << up; } } -void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, +void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction, u16 *refill) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; - for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) refill[tc] = tc_config[tc].path[direction].data_credits_refill; } -void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) +void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; - for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) max[tc] = tc_config[tc].desc_credits_max; } -void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, +void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction, u8 *bwgid) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; - for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) bwgid[tc] = tc_config[tc].path[direction].bwg_id; } -void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, - u8 *ptype) +void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *tsa) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; - for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) - ptype[tc] = tc_config[tc].path[direction].prio_type; + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + tsa[tc] = tc_config[tc].path[direction].tsa; } u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { - struct tc_configuration *tc_config = &cfg->tc_config[0]; - u8 prio_mask = BIT(up); + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; u8 tc = cfg->num_tcs.pg_tcs; /* If tc is 0 then DCB is likely not enabled or supported */ if (!tc) - return 0; + goto out; /* * Test from maximum TC to 1 and report the first match we find. If @@ -248,163 +263,456 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) break; } - +out: return tc; } -void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) +void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *map) { u8 up; - for (up = 0; up < MAX_USER_PRIORITY; up++) + for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); } /** - * ixgbe_dcb_hw_config - Config and enable DCB + * ixgbe_dcb_config - Struct containing DCB settings. + * @dcb_config: Pointer to DCB config structure + * + * This function checks DCB rules for DCB settings. + * The following rules are checked: + * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. + * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth + * Group must total 100. + * 3. A Traffic Class should not be set to both Link Strict Priority + * and Group Strict Priority. + * 4. Link strict Bandwidth Groups can only have link strict traffic classes + * with zero bandwidth. + */ +s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config) +{ + struct ixgbe_dcb_tc_path *p; + s32 ret_val = IXGBE_SUCCESS; + u8 i, j, bw = 0, bw_id; + u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP]; + bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP]; + + memset(bw_sum, 0, sizeof(bw_sum)); + memset(link_strict, 0, sizeof(link_strict)); + + /* First Tx, then Rx */ + for (i = 0; i < 2; i++) { + /* Check each traffic class for rule violation */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + p = &dcb_config->tc_config[j].path[i]; + + bw = p->bwg_percent; + bw_id = p->bwg_id; + + if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + if (p->tsa == ixgbe_dcb_tsa_strict) { + link_strict[i][bw_id] = true; + /* Link strict should have zero bandwidth */ + if (bw) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } else if (!bw) { + /* + * Traffic classes without link strict + * should have non-zero bandwidth. + */ + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + bw_sum[i][bw_id] += bw; + } + + bw = 0; + + /* Check each bandwidth group for rule violation */ + for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) { + bw += dcb_config->bw_percentage[i][j]; + /* + * Sum of bandwidth percentages of all traffic classes + * within a Bandwidth Group must total 100 except for + * link strict group (zero bandwidth). + */ + if (link_strict[i][j]) { + if (bw_sum[i][j]) { + /* + * Link strict group should have zero + * bandwidth. + */ + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT && + bw_sum[i][j] != 0) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } + + if (bw != IXGBE_DCB_BW_PERCENT) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } + +err_config: + + return ret_val; +} + +/** + * ixgbe_dcb_get_tc_stats - Returns status of each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * - * Configure dcb settings and enable dcb mode. + * Configure Rx Data Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) { - u8 pfc_en; - u8 ptype[MAX_TRAFFIC_CLASS]; - u8 bwgid[MAX_TRAFFIC_CLASS]; - u8 prio_tc[MAX_TRAFFIC_CLASS]; - u16 refill[MAX_TRAFFIC_CLASS]; - u16 max[MAX_TRAFFIC_CLASS]; + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); - /* Unpack CEE standard containers */ - ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); - ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max(dcb_config, max); - ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); - ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); - ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid, + tsa, map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); switch (hw->mac.type) { case ixgbe_mac_82598EB: - return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, - bwgid, ptype); + ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwgid, tsa); + break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, - bwgid, ptype, prio_tc); + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwgid, tsa); + break; default: break; } - return 0; + return ret; } -/* Helper routines to abstract HW specifics from DCB netlink ops */ -s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +/** + * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) { + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + switch (hw->mac.type) { case ixgbe_mac_82598EB: - return ixgbe_dcb_config_pfc_82598(hw, pfc_en); + ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwgid, tsa); + break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, + bwgid, tsa, + map); + break; default: break; } - return -EINVAL; + return ret; } -s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) +/** + * ixgbe_dcb_config_pfc_cee - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) { - __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; - __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; - int i; + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; - /* naively give each TC a bwg to map onto CEE hardware */ - __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); - /* Map TSA onto CEE prio type */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - switch (ets->tc_tsa[i]) { - case IEEE_8021QAZ_TSA_STRICT: - prio_type[i] = 2; - break; - case IEEE_8021QAZ_TSA_ETS: - prio_type[i] = 0; - break; - default: - /* Hardware only supports priority strict or - * ETS transmission selection algorithms if - * we receive some other value from dcbnl - * throw an error - */ - return -EINVAL; - } + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); + break; + default: + break; } + return ret; +} - ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); - return ixgbe_dcb_hw_ets_config(hw, refill, max, - bwg_id, prio_type, ets->prio_tc); +/** + * ixgbe_dcb_config_tc_stats - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tc_stats_82598(hw); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL); + break; + default: + break; + } + return ret; } -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, - u16 *refill, u16 *max, u8 *bwg_id, - u8 *prio_type, u8 *prio_tc) +/** + * ixgbe_dcb_hw_config_cee - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) { + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + switch (hw->mac.type) { case ixgbe_mac_82598EB: - ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, - prio_type); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, - bwg_id, prio_type); + ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed, + refill, max, bwgid, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, - bwg_id, prio_type, prio_tc); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, - prio_type, prio_tc); + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_82599(hw, dcb_config); + ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed, + refill, max, bwgid, + tsa, map); + + ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); break; default: break; } - return 0; + + if (!ret && dcb_config->pfc_mode_enable) { + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); + } + + return ret; } -static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) { - u32 reg, i; + int ret = IXGBE_ERR_PARAM; - reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); - for (i = 0; i < MAX_USER_PRIORITY; i++) - map[i] = IXGBE_RTRUP2TC_UP_MASK & - (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); + break; + default: + break; + } + return ret; } -void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *tsa, u8 *map) { switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, + tsa); + break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - ixgbe_dcb_read_rtrup2tc_82599(hw, map); + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); break; default: break; } + return 0; } diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h index fc0a2dd52499..d19c3c277095 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,146 +22,144 @@ *******************************************************************************/ -#ifndef _DCB_CONFIG_H_ -#define _DCB_CONFIG_H_ +#ifndef _IXGBE_DCB_H_ +#define _IXGBE_DCB_H_ -#include #include "ixgbe_type.h" -/* DCB data structures */ - -#define IXGBE_MAX_PACKET_BUFFERS 8 -#define MAX_USER_PRIORITY 8 -#define MAX_BW_GROUP 8 -#define BW_PERCENT 100 - -#define DCB_TX_CONFIG 0 -#define DCB_RX_CONFIG 1 - -/* DCB error Codes */ -#define DCB_SUCCESS 0 -#define DCB_ERR_CONFIG -1 -#define DCB_ERR_PARAM -2 - -/* Transmit and receive Errors */ -/* Error in bandwidth group allocation */ -#define DCB_ERR_BW_GROUP -3 -/* Error in traffic class bandwidth allocation */ -#define DCB_ERR_TC_BW -4 -/* Traffic class has both link strict and group strict enabled */ -#define DCB_ERR_LS_GS -5 -/* Link strict traffic class has non zero bandwidth */ -#define DCB_ERR_LS_BW_NONZERO -6 -/* Link strict bandwidth group has non zero bandwidth */ -#define DCB_ERR_LS_BWG_NONZERO -7 -/* Traffic class has zero bandwidth */ -#define DCB_ERR_TC_BW_ZERO -8 - -#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF - -struct dcb_pfc_tc_debug { - u8 tc; - u8 pause_status; - u64 pause_quanta; -}; +/* DCB defines */ +/* DCB credit calculation defines */ +#define IXGBE_DCB_CREDIT_QUANTUM 64 +#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ +#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/ +#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL) -enum strict_prio_type { - prio_none = 0, - prio_group, - prio_link -}; +/* 513 for 32KB TSO packet */ +#define IXGBE_DCB_MIN_TSO_CREDIT \ + ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1) -/* DCB capability definitions */ -#define IXGBE_DCB_PG_SUPPORT 0x00000001 -#define IXGBE_DCB_PFC_SUPPORT 0x00000002 -#define IXGBE_DCB_BCN_SUPPORT 0x00000004 -#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 -#define IXGBE_DCB_GSP_SUPPORT 0x00000010 +/* DCB configuration defines */ +#define IXGBE_DCB_MAX_USER_PRIORITY 8 +#define IXGBE_DCB_MAX_BW_GROUP 8 +#define IXGBE_DCB_BW_PERCENT 100 -#define IXGBE_DCB_8_TC_SUPPORT 0x80 +#define IXGBE_DCB_TX_CONFIG 0 +#define IXGBE_DCB_RX_CONFIG 1 -struct dcb_support { - /* DCB capabilities */ - u32 capabilities; +/* DCB capability defines */ +#define IXGBE_DCB_PG_SUPPORT 0x00000001 +#define IXGBE_DCB_PFC_SUPPORT 0x00000002 +#define IXGBE_DCB_BCN_SUPPORT 0x00000004 +#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define IXGBE_DCB_GSP_SUPPORT 0x00000010 + +struct ixgbe_dcb_support { + u32 capabilities; /* DCB capabilities */ /* Each bit represents a number of TCs configurable in the hw. - * If 8 traffic classes can be configured, the value is 0x80. - */ - u8 traffic_classes; - u8 pfc_traffic_classes; + * If 8 traffic classes can be configured, the value is 0x80. */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +enum ixgbe_dcb_tsa { + ixgbe_dcb_tsa_ets = 0, + ixgbe_dcb_tsa_group_strict_cee, + ixgbe_dcb_tsa_strict }; /* Traffic class bandwidth allocation per direction */ -struct tc_bw_alloc { - u8 bwg_id; /* Bandwidth Group (BWG) ID */ - u8 bwg_percent; /* % of BWG's bandwidth */ - u8 link_percent; /* % of link bandwidth */ - u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ - u16 data_credits_refill; /* Credit refill amount in 64B granularity */ - u16 data_credits_max; /* Max credits for a configured packet buffer - * in 64B granularity.*/ - enum strict_prio_type prio_type; /* Link or Group Strict Priority */ +struct ixgbe_dcb_tc_path { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer + * in 64B granularity.*/ + enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */ }; -enum dcb_pfc_type { - pfc_disabled = 0, - pfc_enabled_full, - pfc_enabled_tx, - pfc_enabled_rx +enum ixgbe_dcb_pfc { + ixgbe_dcb_pfc_disabled = 0, + ixgbe_dcb_pfc_enabled, + ixgbe_dcb_pfc_enabled_txonly, + ixgbe_dcb_pfc_enabled_rxonly }; /* Traffic class configuration */ -struct tc_configuration { - struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ - enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ +struct ixgbe_dcb_tc_config { + struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */ + enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */ u16 desc_credits_max; /* For Tx Descriptor arbitration */ u8 tc; /* Traffic class (TC) */ }; -struct dcb_num_tcs { +enum ixgbe_dcb_pba { + /* PBA[0-7] each use 64KB FIFO */ + ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL, + /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ + ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED +}; + +struct ixgbe_dcb_num_tcs { u8 pg_tcs; u8 pfc_tcs; }; struct ixgbe_dcb_config { - struct dcb_support support; - struct dcb_num_tcs num_tcs; - struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; - u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ - bool pfc_mode_enable; - - u32 dcb_cfg_version; /* Not used...OS-specific? */ - u32 link_speed; /* For bandwidth allocation validation purpose */ + struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + struct ixgbe_dcb_support support; + struct ixgbe_dcb_num_tcs num_tcs; + u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + bool round_robin_enable; + + enum ixgbe_dcb_pba rx_pba_cfg; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ + bool vt_mode; }; /* DCB driver APIs */ -void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); -void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); -void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); -void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); -void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); -void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); -u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); + +/* DCB rule checking */ +s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *); /* DCB credits calculation */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, - struct ixgbe_dcb_config *, int, u8); - -/* DCB hw initialization */ -s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max); -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, - u8 *bwg_id, u8 *prio_type, u8 *tc_prio); -s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); - -void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); - -/* DCB definitions for credit calculation */ -#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ -#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ -#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ -#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ -#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ - -#endif /* _DCB_CONFIG_H */ +s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int); +s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *, u32, u8); + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *); +s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); + +/* DCB unpack routines */ +void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *); +void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); + +/* DCB initialization */ +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *); +s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); +#endif /* _IXGBE_DCB_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c index b79e93a5b699..b1d8df9f10b1 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,11 +22,82 @@ *******************************************************************************/ -#include "ixgbe.h" + #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" +/** + * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_tc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + /* Statistics pertaining to each traffic class */ + for (tc = 0; tc < tc_count; tc++) { + /* Transmitted Packets */ + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); + /* Transmitted Bytes */ + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); + /* Received Packets */ + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); + +#if 0 + /* Can we get rid of these?? Consequently, getting rid + * of the tc_stats structure. + */ + tc_stats_array[up]->in_overflow_discards = 0; + tc_stats_array[up]->out_overflow_discards = 0; +#endif + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_pfc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + for (tc = 0; tc < tc_count; tc++) { + /* Priority XOFF Transmitted */ + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); + /* Priority XOFF Received */ + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); + } + + return IXGBE_SUCCESS; +} + /** * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter * @hw: pointer to hardware structure @@ -38,15 +105,13 @@ * * Configure Rx Data Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *prio_type) +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *tsa) { - u32 reg = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u8 i = 0; + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); @@ -62,13 +127,13 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { credit_refill = refill[i]; - credit_max = max[i]; + credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RT2CR_LSP; IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); @@ -85,7 +150,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, reg &= ~IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); - return 0; + return IXGBE_SUCCESS; } /** @@ -96,13 +161,11 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, * Configure Tx Descriptor Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) { - u32 reg, max_credits; - u8 i; + u32 reg, max_credits; + u8 i; reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); @@ -116,22 +179,22 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { max_credits = max[i]; reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; reg |= refill[i]; reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; - if (prio_type[i] == prio_group) + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_TDTQ2TCCR_GSP; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_TDTQ2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); } - return 0; + return IXGBE_SUCCESS; } /** @@ -142,10 +205,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, * Configure Tx Data Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) { u32 reg; u8 i; @@ -159,15 +220,15 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = refill[i]; reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; - if (prio_type[i] == prio_group) + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_TDPT2TCCR_GSP; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_TDPT2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); @@ -178,7 +239,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, reg |= IXGBE_DTXCTL_ENDBUBD; IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); - return 0; + return IXGBE_SUCCESS; } /** @@ -191,7 +252,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) { u32 fcrtl, reg; - u8 i; + u8 i; /* Enable Transmit Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_RMCS); @@ -209,8 +270,8 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); /* Configure PFC Tx thresholds per TC */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - if (!(pfc_en & BIT(i))) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (!(pfc_en & (1 << i))) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); continue; @@ -223,15 +284,14 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) } /* Configure pause time */ - reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - - return 0; + return IXGBE_SUCCESS; } /** @@ -241,11 +301,11 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ -static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) { u32 reg = 0; - u8 i = 0; - u8 j = 0; + u8 i = 0; + u8 j = 0; /* Receive Queues stats setting - 8 queues per statistics reg */ for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { @@ -256,14 +316,14 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) reg |= ((0x1010101) * j); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); } - /* Transmit Queues stats setting - 4 queues per statistics reg */ + /* Transmit Queues stats setting - 4 queues per statistics reg*/ for (i = 0; i < 8; i++) { reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); reg |= ((0x1010101) * i); IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); } - return 0; + return IXGBE_SUCCESS; } /** @@ -273,16 +333,18 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) * * Configure dcb settings and enable dcb mode. */ -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type) +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) { - ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_pfc_82598(hw, pfc_en); + UNREFERENCED_1PARAMETER(link_speed); + + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, + tsa); ixgbe_dcb_config_tc_stats_82598(hw); - return 0; + return IXGBE_SUCCESS; } diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h index 3164f5453b8f..d340a691d7d2 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,72 +22,69 @@ *******************************************************************************/ -#ifndef _DCB_82598_CONFIG_H_ -#define _DCB_82598_CONFIG_H_ +#ifndef _IXGBE_DCB_82598_H_ +#define _IXGBE_DCB_82598_H_ /* DCB register definitions */ -#define IXGBE_DPMCS_MTSOS_SHIFT 16 -#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ -#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ -#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ - -#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ - -#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ -#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ - -#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */ -#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */ - -#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 -#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 -#define IXGBE_TDTQ2TCCR_GSP 0x40000000 -#define IXGBE_TDTQ2TCCR_LSP 0x80000000 - -#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 -#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 -#define IXGBE_TDPT2TCCR_GSP 0x40000000 -#define IXGBE_TDPT2TCCR_LSP 0x80000000 - -#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ -#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ -#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ - -#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ - -#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ -#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ -#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ -#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ - -#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 - -/* DCB hardware-specific driver APIs */ - -/* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); - -/* DCB hw initialization */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type); - -#endif /* _DCB_82598_CONFIG_H */ +#define IXGBE_DPMCS_MTSOS_SHIFT 16 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 DFP - Deficit Fixed Priority */ +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ + +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ + +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable */ + +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000 + +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 +#define IXGBE_TDPT2TCCR_GSP 0x40000000 +#define IXGBE_TDPT2TCCR_LSP 0x80000000 + +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 DFP - Deficit Fixed Priority */ +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ + +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ + +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ + +/* DCB driver APIs */ + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *); +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *); + +/* DCB initialization */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *); +#endif /* _IXGBE_DCB_82958_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c index 1011d644978f..b0c5e523093b 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,32 +22,96 @@ *******************************************************************************/ -#include "ixgbe.h" + #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82599.h" +/** + * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_tc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + /* Statistics pertaining to each traffic class */ + for (tc = 0; tc < tc_count; tc++) { + /* Transmitted Packets */ + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); + /* Transmitted Bytes (read low first to prevent missed carry) */ + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); + stats->qbtc[tc] += + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); + /* Received Packets */ + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes (read low first to prevent missed carry) */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); + stats->qbrc[tc] += + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); + + /* Received Dropped Packet */ + stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_pfc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + for (tc = 0; tc < tc_count; tc++) { + /* Priority XOFF Transmitted */ + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); + /* Priority XOFF Received */ + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); + } + + return IXGBE_SUCCESS; +} + /** * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter * @hw: pointer to hardware structure - * @refill: refill credits index by traffic class - * @max: max credits index by traffic class - * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class + * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure Rx Packet Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc) +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) { - u32 reg = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u8 i = 0; + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; /* * Disable the arbiter before changing parameters @@ -60,21 +120,27 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - /* Map all traffic classes to their UP */ + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ reg = 0; - for (i = 0; i < MAX_USER_PRIORITY; i++) - reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { credit_refill = refill[i]; - credit_max = max[i]; + credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTRPT4C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); @@ -87,27 +153,21 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter * @hw: pointer to hardware structure - * @refill: refill credits index by traffic class - * @max: max credits index by traffic class - * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class + * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa) { - u32 reg, max_credits; - u8 i; + u32 reg, max_credits; + u8 i; /* Clear the per-Tx queue credits; we use per-TC instead */ for (i = 0; i < 128; i++) { @@ -116,16 +176,16 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, } /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { max_credits = max[i]; reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; reg |= refill[i]; reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; - if (prio_type[i] == prio_group) + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_RTTDT2C_GSP; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTTDT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); @@ -138,25 +198,19 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter * @hw: pointer to hardware structure - * @refill: refill credits index by traffic class - * @max: max credits index by traffic class - * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class + * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure Tx Packet Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc) +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) { u32 reg; u8 i; @@ -170,22 +224,28 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, IXGBE_RTTPCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); - /* Map all traffic classes to their UP */ + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ reg = 0; - for (i = 0; i < MAX_USER_PRIORITY; i++) - reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = refill[i]; reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; - if (prio_type[i] == prio_group) + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_RTTPT2C_GSP; - if (prio_type[i] == prio_link) + if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTTPT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); @@ -199,18 +259,18 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_pfc_82599 - Configure priority flow control * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask - * @prio_tc: priority to tc assignments indexed by priority + * @map: priority to tc assignments indexed by priority * * Configure Priority Flow Control (PFC) for each traffic class. */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) { u32 i, j, fcrtl, reg; u8 max_tc = 0; @@ -223,8 +283,8 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) reg |= IXGBE_MFLCN_DPF; /* - * X540 & X550 supports per TC Rx priority flow control. - * So clear all TCs and only enable those that should be + * X540 supports per TC Rx priority flow control. So + * clear all TCs and only enable those that should be * enabled. */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); @@ -237,9 +297,9 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); - for (i = 0; i < MAX_USER_PRIORITY; i++) { - if (prio_tc[i] > max_tc) - max_tc = prio_tc[i]; + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { + if (map[i] > max_tc) + max_tc = map[i]; } @@ -247,8 +307,8 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) for (i = 0; i <= max_tc; i++) { int enabled = 0; - for (j = 0; j < MAX_USER_PRIORITY; j++) { - if ((prio_tc[j] == i) && (pfc_en & BIT(j))) { + for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { + if ((map[j] == i) && (pfc_en & (1 << j))) { enabled = 1; break; } @@ -259,7 +319,8 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); } else { - /* In order to prevent Tx hangs when the internal Tx + /* + * In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark * to the Rx packet buffer size - 24KB. This allows * the Tx switch to function even under heavy Rx @@ -272,20 +333,20 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); } - for (; i < MAX_TRAFFIC_CLASS; i++) { + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); } /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - return 0; + return IXGBE_SUCCESS; } /** @@ -295,75 +356,229 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ -static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) { u32 reg = 0; u8 i = 0; + u8 tc_count = 8; + bool vt_mode = false; - /* - * Receive Queues stats setting - * 32 RQSMR registers, each configuring 4 queues. - * Set all 16 queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - */ - for (i = 0; i < 32; i++) { - reg = 0x01010101 * (i / 4); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + if (dcb_config != NULL) { + tc_count = dcb_config->num_tcs.pg_tcs; + vt_mode = dcb_config->vt_mode; } - /* - * Transmit Queues stats setting - * 32 TQSM registers, each controlling 4 queues. - * Set all queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - * Tx queues are allocated non-uniformly to TCs: - * 32, 32, 16, 16, 8, 8, 8, 8. - */ - for (i = 0; i < 32; i++) { - if (i < 8) - reg = 0x00000000; - else if (i < 16) - reg = 0x01010101; - else if (i < 20) - reg = 0x02020202; - else if (i < 24) - reg = 0x03030303; - else if (i < 26) - reg = 0x04040404; - else if (i < 28) - reg = 0x05050505; - else if (i < 30) - reg = 0x06060606; + + if (!((tc_count == 8 && vt_mode == false) || tc_count == 4)) + return IXGBE_ERR_PARAM; + + if (tc_count == 8 && vt_mode == false) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + reg = 0x01010101 * (i / 4); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 32, 32, 16, 16, 8, 8, 8, 8. + */ + for (i = 0; i < 32; i++) { + if (i < 8) + reg = 0x00000000; + else if (i < 16) + reg = 0x01010101; + else if (i < 20) + reg = 0x02020202; + else if (i < 24) + reg = 0x03030303; + else if (i < 26) + reg = 0x04040404; + else if (i < 28) + reg = 0x05050505; + else if (i < 30) + reg = 0x06060606; + else + reg = 0x07070707; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + } else if (tc_count == 4 && vt_mode == false) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + if (i % 8 > 3) + /* In 4 TC mode, odd 16-queue ranges are + * not used. + */ + continue; + reg = 0x01010101 * (i / 8); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 64, 32, 16, 16. + */ + for (i = 0; i < 32; i++) { + if (i < 16) + reg = 0x00000000; + else if (i < 24) + reg = 0x01010101; + else if (i < 28) + reg = 0x02020202; + else + reg = 0x03030303; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + } else if (tc_count == 4 && vt_mode == true) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each + * pool. Set all 32 queues of each TC across pools to the same + * stat with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each + * pool. Set all 32 queues of each TC across pools to the same + * stat with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_82599 - Configure general DCB parameters + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure general DCB parameters. + */ +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + u32 reg; + u32 q; + + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + if (dcb_config->num_tcs.pg_tcs == 8) { + /* Enable DCB for Rx with 8 TCs */ + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case 0: + case IXGBE_MRQC_RT4TCEN: + /* RSS disabled cases */ + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RT8TCEN; + break; + case IXGBE_MRQC_RSSEN: + case IXGBE_MRQC_RTRSS4TCEN: + /* RSS enabled cases */ + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS8TCEN; + break; + default: + /* + * Unsupported value, assume stale data, + * overwrite no RSS + */ + ASSERT(0); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RT8TCEN; + } + } + if (dcb_config->num_tcs.pg_tcs == 4) { + /* We support both VT-on and VT-off with 4 TCs. */ + if (dcb_config->vt_mode) + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_VMDQRT4TCEN; else - reg = 0x07070707; - IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS4TCEN; + } + IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); + + /* Enable DCB for Tx with 8 TCs */ + if (dcb_config->num_tcs.pg_tcs == 8) + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else { + /* We support both VT-on and VT-off with 4 TCs. */ + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + if (dcb_config->vt_mode) + reg |= IXGBE_MTQC_VT_ENA; } + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); - return 0; + /* Disable drop for all queues */ + for (q = 0; q < 128; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + /* Enable Security TX Buffer IFG for DCB */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + + return IXGBE_SUCCESS; } /** * ixgbe_dcb_hw_config_82599 - Configure and enable DCB * @hw: pointer to hardware structure - * @refill: refill credits index by traffic class - * @max: max credits index by traffic class - * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class - * @pfc_en: enabled pfc bitmask + * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure dcb settings and enable dcb mode. */ -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, + u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) { - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, - prio_type, prio_tc); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, - bwg_id, prio_type, prio_tc); - ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); - ixgbe_dcb_config_tc_stats_82599(hw); - - return 0; + UNREFERENCED_1PARAMETER(link_speed); + + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, + map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + + return IXGBE_SUCCESS; } diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h index 90c370230e20..24be9065d90d 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,100 +22,97 @@ *******************************************************************************/ -#ifndef _DCB_82599_CONFIG_H_ -#define _DCB_82599_CONFIG_H_ +#ifndef _IXGBE_DCB_82599_H_ +#define _IXGBE_DCB_82599_H_ /* DCB register definitions */ -#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, +#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, * 1 WSP - Weighted Strict Priority */ -#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, +#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, * 1 WRR - Weighted Round Robin */ -#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ -#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ -#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must +#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ +#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must * clear! */ -#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ +#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ /* Receive UP2TC mapping */ -#define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_SHIFT 3 #define IXGBE_RTRUP2TC_UP_MASK 7 /* Transmit UP2TC mapping */ -#define IXGBE_RTTUP2TC_UP_SHIFT 3 +#define IXGBE_RTTUP2TC_UP_SHIFT 3 -#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ -#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ -#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ -#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ +#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ +#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ -#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet * buffers enable */ -#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores * (RSS) enable */ /* RTRPCS Bit Masks */ -#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ -#define IXGBE_RTRPCS_RAC 0x00000004 -#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ +#define IXGBE_RTRPCS_RAC 0x00000004 +#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ /* RTTDT2C Bit Masks */ -#define IXGBE_RTTDT2C_MCL_SHIFT 12 -#define IXGBE_RTTDT2C_BWG_SHIFT 9 -#define IXGBE_RTTDT2C_GSP 0x40000000 -#define IXGBE_RTTDT2C_LSP 0x80000000 +#define IXGBE_RTTDT2C_MCL_SHIFT 12 +#define IXGBE_RTTDT2C_BWG_SHIFT 9 +#define IXGBE_RTTDT2C_GSP 0x40000000 +#define IXGBE_RTTDT2C_LSP 0x80000000 -#define IXGBE_RTTPT2C_MCL_SHIFT 12 -#define IXGBE_RTTPT2C_BWG_SHIFT 9 -#define IXGBE_RTTPT2C_GSP 0x40000000 -#define IXGBE_RTTPT2C_LSP 0x80000000 +#define IXGBE_RTTPT2C_MCL_SHIFT 12 +#define IXGBE_RTTPT2C_BWG_SHIFT 9 +#define IXGBE_RTTPT2C_GSP 0x40000000 +#define IXGBE_RTTPT2C_LSP 0x80000000 /* RTTPCS Bit Masks */ -#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, +#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, * 1 SP - Strict Priority */ -#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ -#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ -#define IXGBE_RTTPCS_ARBD_SHIFT 22 -#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ - -/* SECTXMINIFG DCB */ -#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ - - -/* DCB hardware-specific driver APIs */ +#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_RTTPCS_ARBD_SHIFT 22 +#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ -/* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc); +#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ -/* DCB hw initialization */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc); - -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc); - -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type, - u8 *prio_tc); - -#endif /* _DCB_82599_CONFIG_H */ +/* SECTXMINIFG DCB */ +#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */ + +/* DCB driver APIs */ + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *, u8 *); +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *, + u8 *, u8 *); + +/* DCB initialization */ +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *, + struct ixgbe_dcb_config *); + +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, + u8 *, u8 *); +#endif /* _IXGBE_DCB_82959_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c index b8fc3cfec831..20d9d05c76ea 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -27,10 +23,11 @@ *******************************************************************************/ #include "ixgbe.h" + +#if IS_ENABLED(CONFIG_DCB) #include #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" -#include "ixgbe_sriov.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_DCB_MODE 0x01 @@ -38,40 +35,36 @@ #define BIT_PG_RX 0x04 #define BIT_PG_TX 0x08 #define BIT_APP_UPCHG 0x10 -#define BIT_LINKSPEED 0x80 +#define BIT_RESETLINK 0x40 +#define BIT_LINKSPEED 0x80 /* Responses for the DCB_C_SET_ALL command */ -#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ -#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ -#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ -static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) { struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; - struct tc_configuration *src = NULL; - struct tc_configuration *dst = NULL; + struct ixgbe_dcb_tc_config *src = NULL; + struct ixgbe_dcb_tc_config *dst = NULL; int i, j; - int tx = DCB_TX_CONFIG; - int rx = DCB_RX_CONFIG; + int tx = IXGBE_DCB_TX_CONFIG; + int rx = IXGBE_DCB_RX_CONFIG; int changes = 0; -#ifdef IXGBE_FCOE - struct dcb_app app = { - .selector = DCB_APP_IDTYPE_ETHTYPE, - .protocol = ETH_P_FCOE, - }; - u8 up = dcb_getapp(adapter->netdev, &app); - if (up && !(up & BIT(adapter->fcoe.up))) +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->fcoe.up_set != adapter->fcoe.up) changes |= BIT_APP_UPCHG; -#endif +#endif /* CONFIG_FCOE */ for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; - if (dst->path[tx].prio_type != src->path[tx].prio_type) { - dst->path[tx].prio_type = src->path[tx].prio_type; + if (dst->path[tx].tsa != src->path[tx].tsa) { + dst->path[tx].tsa = src->path[tx].tsa; changes |= BIT_PG_TX; } @@ -86,14 +79,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) } if (dst->path[tx].up_to_tc_bitmap != - src->path[tx].up_to_tc_bitmap) { + src->path[tx].up_to_tc_bitmap) { dst->path[tx].up_to_tc_bitmap = src->path[tx].up_to_tc_bitmap; changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); } - if (dst->path[rx].prio_type != src->path[rx].prio_type) { - dst->path[rx].prio_type = src->path[rx].prio_type; + if (dst->path[rx].tsa != src->path[rx].tsa) { + dst->path[rx].tsa = src->path[rx].tsa; changes |= BIT_PG_RX; } @@ -108,7 +101,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) } if (dst->path[rx].up_to_tc_bitmap != - src->path[rx].up_to_tc_bitmap) { + src->path[rx].up_to_tc_bitmap) { dst->path[rx].up_to_tc_bitmap = src->path[rx].up_to_tc_bitmap; changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); @@ -117,6 +110,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { j = i - DCB_PG_ATTR_BW_ID_0; + if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; changes |= BIT_PG_TX; @@ -129,8 +123,8 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { j = i - DCB_PFC_UP_ATTR_0; - if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) { - dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc; + if (dcfg->tc_config[j].pfc != scfg->tc_config[j].pfc) { + dcfg->tc_config[j].pfc = scfg->tc_config[j].pfc; changes |= BIT_PFC; } } @@ -153,6 +147,7 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err = 0; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) @@ -160,10 +155,12 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) /* verify there is something to do, if not then exit */ if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return 0; + goto out; - return !!ixgbe_setup_tc(netdev, - state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); + err = ixgbe_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); +out: + return !!err; } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, @@ -196,7 +193,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; + adapter->temp_dcb_cfg.tc_config[tc].path[0].tsa = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) @@ -222,7 +219,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; + adapter->temp_dcb_cfg.tc_config[tc].path[1].tsa = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) @@ -247,7 +244,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; + *prio = adapter->dcb_cfg.tc_config[tc].path[0].tsa; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; @@ -267,7 +264,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; + *prio = adapter->dcb_cfg.tc_config[tc].path[1].tsa; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; @@ -281,23 +278,22 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; } -static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, - u8 setting) +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up); - adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; - if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != - adapter->dcb_cfg.tc_config[priority].dcb_pfc) + adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc; + if (adapter->temp_dcb_cfg.tc_config[tc].pfc != + adapter->dcb_cfg.tc_config[tc].pfc) adapter->temp_dcb_cfg.pfc_mode_enable = true; } -static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, - u8 *setting) +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; + u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up); + *pfc = adapter->dcb_cfg.tc_config[tc].pfc; } static void ixgbe_dcbnl_devreset(struct net_device *dev) @@ -308,13 +304,21 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) usleep_range(1000, 2000); if (netif_running(dev)) +#ifdef HAVE_NET_DEVICE_OPS dev->netdev_ops->ndo_stop(dev); +#else + dev->stop(dev); +#endif ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); if (netif_running(dev)) +#ifdef HAVE_NET_DEVICE_OPS dev->netdev_ops->ndo_open(dev); +#else + dev->open(dev); +#endif clear_bit(__IXGBE_RESETTING, &adapter->state); } @@ -325,83 +329,80 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; struct ixgbe_hw *hw = &adapter->hw; int ret = DCB_NO_HW_CHG; - int i; + u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) - return DCB_NO_HW_CHG; + return ret; adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, - MAX_TRAFFIC_CLASS); + IXGBE_DCB_MAX_TRAFFIC_CLASS); if (!adapter->dcb_set_bitmap) - return DCB_NO_HW_CHG; + return ret; + + ixgbe_dcb_unpack_map_cee(dcb_cfg, IXGBE_DCB_TX_CONFIG, prio_tc); - if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { - u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; - u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; + if (adapter->dcb_set_bitmap & (BIT_PG_TX | BIT_PG_RX)) { /* Priority to TC mapping in CEE case default to 1:1 */ - u8 prio_tc[MAX_USER_PRIORITY]; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; +#ifdef HAVE_MQPRIO + int i; +#endif -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif - ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, - DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, - DCB_RX_CONFIG); + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, + IXGBE_DCB_TX_CONFIG); - ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max(dcb_cfg, max); - ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id); - ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type); - ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, + IXGBE_DCB_RX_CONFIG); - ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, - prio_type, prio_tc); + ixgbe_dcb_hw_config_cee(hw, dcb_cfg); +#ifdef HAVE_MQPRIO for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) netdev_set_prio_tc_map(netdev, i, prio_tc[i]); - +#endif /* HAVE_MQPRIO */ ret = DCB_HW_CHG_RST; } if (adapter->dcb_set_bitmap & BIT_PFC) { if (dcb_cfg->pfc_mode_enable) { u8 pfc_en; - u8 prio_tc[MAX_USER_PRIORITY]; - - ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); - ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en); - ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc); + ixgbe_dcb_unpack_pfc_cee(dcb_cfg, prio_tc, &pfc_en); + ixgbe_dcb_config_pfc(hw, pfc_en, prio_tc); } else { hw->mac.ops.fc_enable(hw); } + /* This is known driver so disable MDD before updating SRRCTL */ + if ((adapter->num_vfs) && (hw->mac.ops.disable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.disable_mdd(hw); ixgbe_set_rx_drop_en(adapter); - ret = DCB_HW_CHG; + if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.enable_mdd(hw); + + if (ret != DCB_HW_CHG_RST) + ret = DCB_HW_CHG; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* Reprogam FCoE hardware offloads when the traffic class * FCoE is using changes. This happens if the APP info * changes or the up2tc mapping is updated. */ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - struct dcb_app app = { - .selector = DCB_APP_IDTYPE_ETHTYPE, - .protocol = ETH_P_FCOE, - }; - u8 up = dcb_getapp(netdev, &app); - - adapter->fcoe.up = ffs(up) - 1; + adapter->fcoe.up_set = adapter->fcoe.up; ixgbe_dcbnl_devreset(netdev); ret = DCB_HW_CHG_RST; } -#endif +#endif /* CONFIG_FCOE */ adapter->dcb_set_bitmap = 0x00; return ret; @@ -409,7 +410,9 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { +#ifdef HAVE_DCBNL_IEEE struct ixgbe_adapter *adapter = netdev_priv(netdev); +#endif switch (capid) { case DCB_CAP_ATTR_PG: @@ -433,9 +436,11 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) case DCB_CAP_ATTR_BCN: *cap = false; break; +#ifdef HAVE_DCBNL_IEEE case DCB_CAP_ATTR_DCBX: *cap = adapter->dcbx_cap; break; +#endif default: *cap = false; break; @@ -444,9 +449,14 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) return 0; } +#ifdef NUMTCS_RETURNS_U8 +static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#else static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { @@ -457,18 +467,42 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) *num = adapter->dcb_cfg.num_tcs.pfc_tcs; break; default: - return -EINVAL; + rval = -EINVAL; + break; } } else { - return -EINVAL; + rval = -EINVAL; } - return 0; + return rval; } +#ifdef NUMTCS_RETURNS_U8 +static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#else static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#endif { - return -EINVAL; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + adapter->dcb_cfg.num_tcs.pg_tcs = num; + break; + case DCB_NUMTCS_ATTR_PFC: + adapter->dcb_cfg.num_tcs.pfc_tcs = num; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; } static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) @@ -483,8 +517,10 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.pfc_mode_enable = state; + return; } +#ifdef HAVE_DCBNL_OPS_GETAPP /** * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority * @netdev : the corresponding netdev @@ -492,35 +528,99 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) * @id: id is either ether type or TCP/UDP port number * * Returns : on success, returns a non-zero 802.1p user priority bitmap - * otherwise returns -EINVAL as the invalid user priority bitmap to indicate an + * otherwise returns 0 as the invalid user priority bitmap to indicate an * error. */ +#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +#else +static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +#endif { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; +#ifdef HAVE_DCBNL_IEEE struct dcb_app app = { - .selector = idtype, - .protocol = id, - }; + .selector = idtype, + .protocol = id, + }; - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) - return -EINVAL; + rval = dcb_getapp(netdev, &app); +#endif + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: +#if IS_ENABLED(CONFIG_FCOE) + if (id == ETH_P_FCOE) + rval = ixgbe_fcoe_getapp(netdev); +#endif + break; + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + break; + } + + return rval; +} + +/** + * ixgbe_dcbnl_setapp - set the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * @up: the 802.1p user priority bitmap + * + * Returns : 0 on success or 1 on error + */ +#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT +static int ixgbe_dcbnl_setapp(struct net_device *netdev, +#else +static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, +#endif + u8 idtype, u16 id, u8 up) +{ + int err = 0; +#ifdef HAVE_DCBNL_IEEE + struct dcb_app app; + + app.selector = idtype; + app.protocol = id; + app.priority = up; + err = dcb_setapp(netdev, &app); +#endif + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: +#if IS_ENABLED(CONFIG_FCOE) + if (id == ETH_P_FCOE) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->fcoe.up = up ? ffs(up) - 1 : IXGBE_FCOE_DEFUP; + } +#endif + break; + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + break; + } - return dcb_getapp(netdev, &app); + return err; } +#endif /* HAVE_DCBNL_OPS_GETAPP */ +#ifdef HAVE_DCBNL_IEEE static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; - ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; - /* No IEEE PFC settings available */ if (!my_ets) - return 0; + return -EINVAL; + ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; ets->cbs = my_ets->cbs; memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); @@ -534,7 +634,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i, err; + int i, err = 0; __u8 max_tc = 0; __u8 map_chg = 0; @@ -546,13 +646,13 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; - /* initialize UP2TC mappings to invalid value */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) adapter->ixgbe_ieee_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS; /* if possible update UP2TC mappings from HW */ - ixgbe_dcb_read_rtrup2tc(&adapter->hw, + if (adapter->hw.mac.ops.get_rtrup2tc) + adapter->hw.mac.ops.get_rtrup2tc(&adapter->hw, adapter->ixgbe_ieee_ets->prio_tc); } @@ -571,15 +671,17 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; - if (max_tc != netdev_get_num_tc(dev)) { + if (max_tc != netdev_get_num_tc(dev)) err = ixgbe_setup_tc(dev, max_tc); - if (err) - return err; - } else if (map_chg) { + else if (map_chg) ixgbe_dcbnl_devreset(dev); - } - return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); + if (err) + goto err_out; + + err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); +err_out: + return err; } static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, @@ -589,17 +691,16 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; int i; - pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; - /* No IEEE PFC settings available */ if (!my_pfc) - return 0; + return -EINVAL; + pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; pfc->pfc_en = my_pfc->pfc_en; pfc->mbc = my_pfc->mbc; pfc->delay = my_pfc->delay; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { pfc->requests[i] = adapter->stats.pxoffrxc[i]; pfc->indications[i] = adapter->stats.pxofftxc[i]; } @@ -628,14 +729,24 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, prio_tc = adapter->ixgbe_ieee_ets->prio_tc; memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); + /* Enable link flow control parameters if PFC is disabled */ if (pfc->pfc_en) - err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc); + err = ixgbe_dcb_config_pfc(hw, pfc->pfc_en, prio_tc); else err = hw->mac.ops.fc_enable(hw); + /* This is known driver so disable MDD before updating SRRCTL */ + if ((adapter->num_vfs) && (hw->mac.ops.disable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.disable_mdd(hw); + ixgbe_set_rx_drop_en(adapter); + if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.enable_mdd(hw); + return err; } @@ -643,47 +754,30 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { struct ixgbe_adapter *adapter = netdev_priv(dev); - int err; + int err = -EINVAL; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return -EINVAL; + return err; err = dcb_ieee_setapp(dev, app); - if (err) - return err; -#ifdef IXGBE_FCOE - if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && +#if IS_ENABLED(CONFIG_FCOE) + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & BIT(adapter->fcoe.up)) - return 0; + if (app_mask & (1 << adapter->fcoe.up)) + return err; adapter->fcoe.up = app->priority; + adapter->fcoe.up_set = adapter->fcoe.up; ixgbe_dcbnl_devreset(dev); } #endif - - /* VF devices should use default UP when available */ - if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == 0) { - int vf; - - adapter->default_up = app->priority; - - for (vf = 0; vf < adapter->num_vfs; vf++) { - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - - if (!vfinfo->pf_qos) - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - app->priority, vf); - } - } - return 0; } +#ifdef HAVE_DCBNL_IEEE_DELAPP static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) { @@ -695,39 +789,22 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, err = dcb_ieee_delapp(dev, app); -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & BIT(adapter->fcoe.up)) - return 0; + if (app_mask & (1 << adapter->fcoe.up)) + return err; adapter->fcoe.up = app_mask ? - ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; + ffs(app_mask) - 1 : IXGBE_FCOE_DEFUP; ixgbe_dcbnl_devreset(dev); } #endif - /* IF default priority is being removed clear VF default UP */ - if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == 0 && adapter->default_up == app->priority) { - int vf; - long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); - int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; - - adapter->default_up = qos; - - for (vf = 0; vf < adapter->num_vfs; vf++) { - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - - if (!vfinfo->pf_qos) - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - qos, vf); - } - } - return err; } +#endif /* HAVE_DCBNL_IEEE_DELAPP */ static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) { @@ -738,9 +815,8 @@ static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ieee_ets ets = {0}; - struct ieee_pfc pfc = {0}; - int err = 0; + struct ieee_ets ets = { .ets_cap = 0 }; + struct ieee_pfc pfc = { .pfc_en = 0 }; /* no support for LLD_MANAGED modes or CEE+IEEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || @@ -761,7 +837,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); } else if (mode & DCB_CAP_DCBX_VER_CEE) { - u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; + u8 mask = (BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG); adapter->dcb_set_bitmap |= mask; ixgbe_dcbnl_set_all(dev); @@ -771,19 +847,25 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) */ ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); - err = ixgbe_setup_tc(dev, 0); + ixgbe_setup_tc(dev, 0); } - return err ? 1 : 0; + return 0; } -const struct dcbnl_rtnl_ops dcbnl_ops = { +#endif + +struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { +#ifdef HAVE_DCBNL_IEEE .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, .ieee_setapp = ixgbe_dcbnl_ieee_setapp, +#ifdef HAVE_DCBNL_IEEE_DELAPP .ieee_delapp = ixgbe_dcbnl_ieee_delapp, +#endif +#endif .getstate = ixgbe_dcbnl_get_state, .setstate = ixgbe_dcbnl_set_state, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, @@ -803,7 +885,14 @@ const struct dcbnl_rtnl_ops dcbnl_ops = { .setnumtcs = ixgbe_dcbnl_setnumtcs, .getpfcstate = ixgbe_dcbnl_getpfcstate, .setpfcstate = ixgbe_dcbnl_setpfcstate, +#ifdef HAVE_DCBNL_OPS_GETAPP .getapp = ixgbe_dcbnl_getapp, + .setapp = ixgbe_dcbnl_setapp, +#endif +#ifdef HAVE_DCBNL_IEEE .getdcbx = ixgbe_dcbnl_getdcbx, .setdcbx = ixgbe_dcbnl_setdcbx, +#endif }; + +#endif /* CONFIG_DCB */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c index 5e2c1e35e517..66f52e211040 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,11 +21,13 @@ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ -#include -#include #include "ixgbe.h" +#ifdef HAVE_IXGBE_DEBUG_FS +#include +#include + static struct dentry *ixgbe_dbg_root; static char ixgbe_dbg_reg_ops_buf[256] = ""; @@ -205,7 +203,11 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, ixgbe_dbg_netdev_ops_buf[len] = '\0'; if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { +#ifdef HAVE_NET_DEVICE_OPS adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); +#else + adapter->netdev->tx_timeout(adapter->netdev); +#endif /* HAVE_NET_DEVICE_OPS */ e_dev_info("tx_timeout called\n"); } else { e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); @@ -215,7 +217,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, return count; } -static const struct file_operations ixgbe_dbg_netdev_ops_fops = { +static struct file_operations ixgbe_dbg_netdev_ops_fops = { .owner = THIS_MODULE, .open = simple_open, .read = ixgbe_dbg_netdev_ops_read, @@ -253,7 +255,8 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) **/ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) { - debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); + if (adapter->ixgbe_dbg_adapter) + debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); adapter->ixgbe_dbg_adapter = NULL; } @@ -274,3 +277,5 @@ void ixgbe_dbg_exit(void) { debugfs_remove_recursive(ixgbe_dbg_root); } + +#endif /* HAVE_IXGBE_DEBUG_FS */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c index 6b23b7406f27..a446f227820c 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,99 +24,122 @@ /* ethtool support for ixgbe */ -#include #include #include -#include #include #include #include #include #include -#include + +#ifdef SIOCETHTOOL +#include #include "ixgbe.h" +#ifdef ETHTOOL_GMODULEINFO #include "ixgbe_phy.h" +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO +#include +#endif +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif #define IXGBE_ALL_RAR_ENTRIES 16 -enum {NETDEV_STATS, IXGBE_STATS}; - +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif +#ifdef ETHTOOL_GSTATS struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; - int type; int sizeof_stat; int stat_offset; }; -#define IXGBE_STAT(m) IXGBE_STATS, \ - sizeof(((struct ixgbe_adapter *)0)->m), \ - offsetof(struct ixgbe_adapter, m) -#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ - sizeof(((struct rtnl_link_stats64 *)0)->m), \ - offsetof(struct rtnl_link_stats64, m) - -static const struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, - {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, - {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, - {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, - {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, - {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, - {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, - {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, - {"lsc_int", IXGBE_STAT(lsc_int)}, - {"tx_busy", IXGBE_STAT(tx_busy)}, - {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, - {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, - {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, - {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, - {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, - {"multicast", IXGBE_NETDEV_STAT(multicast)}, - {"broadcast", IXGBE_STAT(stats.bprc)}, - {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, - {"collisions", IXGBE_NETDEV_STAT(collisions)}, - {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, - {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, - {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, - {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, - {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, - {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, - {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, - {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, - {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, - {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, - {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, - {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, - {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, - {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, - {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, - {"tx_restart_queue", IXGBE_STAT(restart_queue)}, - {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, - {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, - {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, - {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, - {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, - {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, - {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, - {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, - {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, - {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, - {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, - {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, - {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, - {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, -#ifdef IXGBE_FCOE - {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, - {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, - {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, - {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, - {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, - {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, - {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, - {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, -#endif /* IXGBE_FCOE */ +#define IXGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} +static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = { + IXGBE_NETDEV_STAT(rx_packets), + IXGBE_NETDEV_STAT(tx_packets), + IXGBE_NETDEV_STAT(rx_bytes), + IXGBE_NETDEV_STAT(tx_bytes), + IXGBE_NETDEV_STAT(rx_errors), + IXGBE_NETDEV_STAT(tx_errors), + IXGBE_NETDEV_STAT(rx_dropped), + IXGBE_NETDEV_STAT(tx_dropped), + IXGBE_NETDEV_STAT(multicast), + IXGBE_NETDEV_STAT(collisions), + IXGBE_NETDEV_STAT(rx_over_errors), + IXGBE_NETDEV_STAT(rx_crc_errors), + IXGBE_NETDEV_STAT(rx_frame_errors), + IXGBE_NETDEV_STAT(rx_fifo_errors), + IXGBE_NETDEV_STAT(rx_missed_errors), + IXGBE_NETDEV_STAT(tx_aborted_errors), + IXGBE_NETDEV_STAT(tx_carrier_errors), + IXGBE_NETDEV_STAT(tx_fifo_errors), + IXGBE_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define IXGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbe_adapter, _stat), \ + .stat_offset = offsetof(struct ixgbe_adapter, _stat) \ +} +static struct ixgbe_stats ixgbe_gstrings_stats[] = { + IXGBE_STAT("rx_pkts_nic", stats.gprc), + IXGBE_STAT("tx_pkts_nic", stats.gptc), + IXGBE_STAT("rx_bytes_nic", stats.gorc), + IXGBE_STAT("tx_bytes_nic", stats.gotc), + IXGBE_STAT("lsc_int", lsc_int), + IXGBE_STAT("tx_busy", tx_busy), + IXGBE_STAT("non_eop_descs", non_eop_descs), + IXGBE_STAT("broadcast", stats.bprc), + IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) , + IXGBE_STAT("tx_timeout_count", tx_timeout_count), + IXGBE_STAT("tx_restart_queue", restart_queue), + IXGBE_STAT("rx_long_length_errors", stats.roc), + IXGBE_STAT("rx_short_length_errors", stats.ruc), + IXGBE_STAT("tx_flow_control_xon", stats.lxontxc), + IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + IXGBE_STAT("hw_rsc_aggregated", rsc_total_count), + IXGBE_STAT("hw_rsc_flushed", rsc_total_flush), +#ifdef HAVE_TX_MQ + IXGBE_STAT("fdir_match", stats.fdirmatch), + IXGBE_STAT("fdir_miss", stats.fdirmiss), + IXGBE_STAT("fdir_overflow", fdir_overflow), +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) + IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc), + IXGBE_STAT("fcoe_last_errors", stats.fclast), + IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc), + IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc), + IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc), + IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp), + IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff), + IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc), + IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc), +#endif /* CONFIG_FCOE */ + IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), +#ifdef HAVE_PTP_1588_CLOCK + IXGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IXGBE_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IXGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +#endif /* HAVE_PTP_1588_CLOCK */ }; /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so @@ -128,58 +147,292 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { * used because we do not have a good way to get the max number of * rx queues with CONFIG_RPS disabled. */ +#ifdef HAVE_TX_MQ +#ifdef HAVE_NETDEV_SELECT_QUEUE #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define IXGBE_NUM_TX_QUEUES netdev->num_tx_queues +#else +#define IXGBE_NUM_RX_QUEUES adapter->indices +#define IXGBE_NUM_TX_QUEUES adapter->indices +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else /* HAVE_TX_MQ */ +#define IXGBE_NUM_TX_QUEUES 1 +#define IXGBE_NUM_RX_QUEUES ( \ + ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) +#endif /* HAVE_TX_MQ */ #define IXGBE_QUEUE_STATS_LEN ( \ - (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ - (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) + (IXGBE_NUM_TX_QUEUES + IXGBE_NUM_RX_QUEUES) * \ + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats) #define IXGBE_PB_STATS_LEN ( \ - (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ - / sizeof(u64)) + (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define IXGBE_VF_STATS_LEN \ + ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \ + (sizeof(struct vf_stats) / sizeof(u64))) #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ + IXGBE_NETDEV_STATS_LEN + \ IXGBE_PB_STATS_LEN + \ - IXGBE_QUEUE_STATS_LEN) + IXGBE_QUEUE_STATS_LEN + \ + IXGBE_VF_STATS_LEN) +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; -#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN +#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IXGBE_PRIV_FLAGS_FD_ATR BIT(0) + "flow-director-atr", +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(1) + "legacy-rx", +#endif +}; + +#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ /* currently supported speeds for 10G */ -#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ - SUPPORTED_10000baseKX4_Full | \ - SUPPORTED_10000baseKR_Full) - -#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) - -static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) -{ - if (!ixgbe_isbackplane(hw->phy.media_type)) - return SUPPORTED_10000baseT_Full; - - switch (hw->device_id) { - case IXGBE_DEV_ID_82598: - case IXGBE_DEV_ID_82599_KX4: - case IXGBE_DEV_ID_82599_KX4_MEZZ: - case IXGBE_DEV_ID_X550EM_X_KX4: - return SUPPORTED_10000baseKX4_Full; - case IXGBE_DEV_ID_82598_BX: - case IXGBE_DEV_ID_82599_KR: - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_XFI: - return SUPPORTED_10000baseKR_Full; - default: - return SUPPORTED_10000baseKX4_Full | - SUPPORTED_10000baseKR_Full; +#define ADVERTISED_MASK_10G (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full) + +#define ixgbe_isbackplane(type) ((type == ixgbe_media_type_backplane)? true : false) + +static __u32 ixgbe_backplane_type(struct ixgbe_hw *hw) +{ + __u32 mode = 0x00; + switch(hw->device_id) + { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_X550EM_X_KX4: + mode = SUPPORTED_10000baseKX4_Full; + break; + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_XFI: + mode = SUPPORTED_10000baseKR_Full; + break; + default: + mode = (SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full); + break; } + return mode; } +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static int ixgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + ixgbe_link_speed supported_link; + bool autoneg = false; + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + ixgbe_backplane_type(hw) : + SUPPORTED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) + supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_100_FULL) + supported |= SUPPORTED_100baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_10_FULL) + supported |= SUPPORTED_10baseT_Full; + + /* default advertised speed if phy.autoneg_advertised isn't set */ + advertising = supported; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + advertising = 0; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) + advertising |= ADVERTISED_10baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + advertising |= supported & ADVERTISED_MASK_10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { + if (supported & SUPPORTED_1000baseKX_Full) + advertising |= ADVERTISED_1000baseKX_Full; + else + advertising |= ADVERTISED_1000baseT_Full; + } + } else { + if (hw->phy.multispeed_fiber && !autoneg) { + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + advertising = ADVERTISED_10000baseT_Full; + } + } + + if (autoneg) { + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; + cmd->base.autoneg = AUTONEG_ENABLE; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_x550em_ext_t: + case ixgbe_phy_fw: + case ixgbe_phy_cu_unknown: + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + break; + case ixgbe_phy_qt: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + break; + case ixgbe_phy_nl: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case ixgbe_sfp_type_da_cu: + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_DA; + break; + case ixgbe_sfp_type_sr: + case ixgbe_sfp_type_lr: + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + break; + case ixgbe_sfp_type_not_present: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_NONE; + break; + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + break; + case ixgbe_sfp_type_unknown: + default: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; + break; + } + break; + case ixgbe_phy_xaui: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_NONE; + break; + case ixgbe_phy_unknown: + case ixgbe_phy_generic: + case ixgbe_phy_sfp_unsupported: + default: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; + break; + } + + /* Indicate pause support */ + supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + advertising |= ADVERTISED_Pause; + break; + case ixgbe_fc_rx_pause: + advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + break; + case ixgbe_fc_tx_pause: + advertising |= ADVERTISED_Asym_Pause; + break; + default: + advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + } + + if (netif_carrier_ok(netdev)) { + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + cmd->base.speed = SPEED_10000; + break; + case IXGBE_LINK_SPEED_5GB_FULL: + cmd->base.speed = SPEED_5000; + break; +#ifdef SUPPORTED_2500baseX_Full + case IXGBE_LINK_SPEED_2_5GB_FULL: + cmd->base.speed = SPEED_2500; + break; +#endif /* SUPPORTED_2500baseX_Full */ + case IXGBE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + break; + case IXGBE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + break; + case IXGBE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + supported); + + return 0; +} +#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ static int ixgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { @@ -192,25 +445,30 @@ static int ixgbe_get_settings(struct net_device *netdev, /* set the supported link speeds */ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->supported |= ixgbe_get_supported_10gtypes(hw); + ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + ixgbe_backplane_type(hw) : + SUPPORTED_10000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_1000baseT_Full; + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_100baseT_Full; + ecmd->supported |= SUPPORTED_100baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_10_FULL) + ecmd->supported |= SUPPORTED_10baseT_Full; /* default advertised speed if phy.autoneg_advertised isn't set */ ecmd->advertising = ecmd->supported; + /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { ecmd->advertising = 0; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G; + ecmd->advertising |= (ecmd->supported & ADVERTISED_MASK_10G); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { if (ecmd->supported & SUPPORTED_1000baseKX_Full) ecmd->advertising |= ADVERTISED_1000baseKX_Full; @@ -228,8 +486,9 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->supported |= SUPPORTED_Autoneg; ecmd->advertising |= ADVERTISED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; - } else + } else { ecmd->autoneg = AUTONEG_DISABLE; + } ecmd->transceiver = XCVR_EXTERNAL; @@ -260,8 +519,8 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_qsfp_active_unknown: case ixgbe_phy_qsfp_intel: case ixgbe_phy_qsfp_unknown: - /* SFP+ devices, further checking needed */ switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ case ixgbe_sfp_type_da_cu: case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: @@ -339,15 +598,23 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; + case IXGBE_LINK_SPEED_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_5000); + break; +#ifdef SUPPORTED_2500baseX_Full case IXGBE_LINK_SPEED_2_5GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_2500); break; +#endif /* SUPPORTED_2500baseX_Full */ case IXGBE_LINK_SPEED_1GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_1000); break; case IXGBE_LINK_SPEED_100_FULL: ethtool_cmd_speed_set(ecmd, SPEED_100); break; + case IXGBE_LINK_SPEED_10_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10); + break; default: break; } @@ -359,7 +626,80 @@ static int ixgbe_get_settings(struct net_device *netdev, return 0; } +#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static int ixgbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 advertised, old; + s32 err = 0; + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (advertising & ~supported) + return -EINVAL; + + /* only allow one speed at a time if no autoneg */ + if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { + if (advertising == + (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full)) + return -EINVAL; + } + + old = hw->phy.autoneg_advertised; + advertised = 0; + if (advertising & ADVERTISED_10000baseT_Full) + advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (advertising & ADVERTISED_1000baseT_Full) + advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + + if (advertising & ADVERTISED_10baseT_Full) + advertised |= IXGBE_LINK_SPEED_10_FULL; + + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + hw->mac.autotry_restart = true; + err = hw->mac.ops.setup_link(hw, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->mac.ops.setup_link(hw, old, true); + } + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + } else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = cmd->base.speed; + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (advertising != ADVERTISED_10000baseT_Full) || + (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} +#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ static int ixgbe_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { @@ -412,9 +752,11 @@ static int ixgbe_set_settings(struct net_device *netdev, hw->mac.ops.setup_link(hw, old, true); } clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - } else { + } + else { /* in this case we currently only support 10Gb/FULL */ u32 speed = ethtool_cmd_speed(ecmd); + if ((ecmd->autoneg == AUTONEG_ENABLE) || (ecmd->advertising != ADVERTISED_10000baseT_Full) || (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) @@ -423,6 +765,7 @@ static int ixgbe_set_settings(struct net_device *netdev, return err; } +#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ static void ixgbe_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) @@ -458,18 +801,19 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return -EINVAL; - /* some devices do not support autoneg of link flow control */ + + /* some devices do not support autoneg of flow control */ if ((pause->autoneg == AUTONEG_ENABLE) && !ixgbe_device_supports_autoneg_fc(hw)) - return -EINVAL; + return -EINVAL; fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) fc.requested_mode = ixgbe_fc_full; - else if (pause->rx_pause && !pause->tx_pause) + else if (pause->rx_pause) fc.requested_mode = ixgbe_fc_rx_pause; - else if (!pause->rx_pause && pause->tx_pause) + else if (pause->tx_pause) fc.requested_mode = ixgbe_fc_tx_pause; else fc.requested_mode = ixgbe_fc_none; @@ -495,16 +839,12 @@ static u32 ixgbe_get_msglevel(struct net_device *netdev) static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; - - /* 2018/11/14 pega-julia modified start */ - /* Purpose : Add for light OOB LED static. */ - struct ixgbe_hw *hw = &adapter->hw; u16 regVal; s32 rc; - /* For M88E1512, write 3 in (page 0,register 22)[Page Address Register] to goto page 3 */ + adapter->msg_enable = data; + regVal = 0x03; rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); if (rc) @@ -548,19 +888,18 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) if (rc) hw_err(hw, "page register write failed, rc:%x\n", rc); - /* 2018/11/14 pega-julia modified end */ } -static int ixgbe_get_regs_len(struct net_device *netdev) +static int ixgbe_get_regs_len(struct net_device __always_unused *netdev) { -#define IXGBE_REGS_LEN 1139 +#define IXGBE_REGS_LEN 1129 return IXGBE_REGS_LEN * sizeof(u32); } -#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ +#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) -static void ixgbe_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) +static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -573,197 +912,166 @@ static void ixgbe_get_regs(struct net_device *netdev, hw->device_id; /* General Registers */ - regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); - regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); - regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); - regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); - regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); - regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); - regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); + regs_buff[0] = IXGBE_R32_Q(hw, IXGBE_CTRL); + regs_buff[1] = IXGBE_R32_Q(hw, IXGBE_STATUS); + regs_buff[2] = IXGBE_R32_Q(hw, IXGBE_CTRL_EXT); + regs_buff[3] = IXGBE_R32_Q(hw, IXGBE_ESDP); + regs_buff[4] = IXGBE_R32_Q(hw, IXGBE_EODSDP); + regs_buff[5] = IXGBE_R32_Q(hw, IXGBE_LEDCTL); + regs_buff[6] = IXGBE_R32_Q(hw, IXGBE_FRTIMER); + regs_buff[7] = IXGBE_R32_Q(hw, IXGBE_TCPTIMER); /* NVM Register */ - regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); - regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw)); - regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); - regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); - regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); - regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); - regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); - regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); - regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw)); + regs_buff[8] = IXGBE_R32_Q(hw, IXGBE_EEC); + regs_buff[9] = IXGBE_R32_Q(hw, IXGBE_EERD); + regs_buff[10] = IXGBE_R32_Q(hw, IXGBE_FLA); + regs_buff[11] = IXGBE_R32_Q(hw, IXGBE_EEMNGCTL); + regs_buff[12] = IXGBE_R32_Q(hw, IXGBE_EEMNGDATA); + regs_buff[13] = IXGBE_R32_Q(hw, IXGBE_FLMNGCTL); + regs_buff[14] = IXGBE_R32_Q(hw, IXGBE_FLMNGDATA); + regs_buff[15] = IXGBE_R32_Q(hw, IXGBE_FLMNGCNT); + regs_buff[16] = IXGBE_R32_Q(hw, IXGBE_FLOP); + regs_buff[17] = IXGBE_R32_Q(hw, IXGBE_GRC); /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead * read EICS which is a shadow but doesn't clear EICR */ - regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); - regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); - regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); - regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); - regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); - regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); - regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); - regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); - regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); - regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); - regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); - regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); + regs_buff[18] = IXGBE_R32_Q(hw, IXGBE_EICS); + regs_buff[19] = IXGBE_R32_Q(hw, IXGBE_EICS); + regs_buff[20] = IXGBE_R32_Q(hw, IXGBE_EIMS); + regs_buff[21] = IXGBE_R32_Q(hw, IXGBE_EIMC); + regs_buff[22] = IXGBE_R32_Q(hw, IXGBE_EIAC); + regs_buff[23] = IXGBE_R32_Q(hw, IXGBE_EIAM); + regs_buff[24] = IXGBE_R32_Q(hw, IXGBE_EITR(0)); + regs_buff[25] = IXGBE_R32_Q(hw, IXGBE_IVAR(0)); + regs_buff[26] = IXGBE_R32_Q(hw, IXGBE_MSIXT); + regs_buff[27] = IXGBE_R32_Q(hw, IXGBE_MSIXPBA); + regs_buff[28] = IXGBE_R32_Q(hw, IXGBE_PBACL(0)); + regs_buff[29] = IXGBE_R32_Q(hw, IXGBE_GPIE); /* Flow Control */ - regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); - for (i = 0; i < 4; i++) - regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i)); + regs_buff[30] = IXGBE_R32_Q(hw, IXGBE_PFCTOP); + regs_buff[31] = IXGBE_R32_Q(hw, IXGBE_FCTTV(0)); + regs_buff[32] = IXGBE_R32_Q(hw, IXGBE_FCTTV(1)); + regs_buff[33] = IXGBE_R32_Q(hw, IXGBE_FCTTV(2)); + regs_buff[34] = IXGBE_R32_Q(hw, IXGBE_FCTTV(3)); for (i = 0; i < 8; i++) { switch (hw->mac.type) { case ixgbe_mac_82598EB: - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + regs_buff[35 + i] = IXGBE_R32_Q(hw, IXGBE_FCRTL(i)); + regs_buff[43 + i] = IXGBE_R32_Q(hw, IXGBE_FCRTH(i)); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + regs_buff[35 + i] = IXGBE_R32_Q(hw, + IXGBE_FCRTL_82599(i)); + regs_buff[43 + i] = IXGBE_R32_Q(hw, + IXGBE_FCRTH_82599(i)); break; default: break; } } - regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); - regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); + regs_buff[51] = IXGBE_R32_Q(hw, IXGBE_FCRTV); + regs_buff[52] = IXGBE_R32_Q(hw, IXGBE_TFCS); /* Receive DMA */ for (i = 0; i < 64; i++) - regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + regs_buff[53 + i] = IXGBE_R32_Q(hw, IXGBE_RDBAL(i)); for (i = 0; i < 64; i++) - regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + regs_buff[117 + i] = IXGBE_R32_Q(hw, IXGBE_RDBAH(i)); for (i = 0; i < 64; i++) - regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + regs_buff[181 + i] = IXGBE_R32_Q(hw, IXGBE_RDLEN(i)); for (i = 0; i < 64; i++) - regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + regs_buff[245 + i] = IXGBE_R32_Q(hw, IXGBE_RDH(i)); for (i = 0; i < 64; i++) - regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + regs_buff[309 + i] = IXGBE_R32_Q(hw, IXGBE_RDT(i)); for (i = 0; i < 64; i++) - regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + regs_buff[373 + i] = IXGBE_R32_Q(hw, IXGBE_RXDCTL(i)); for (i = 0; i < 16; i++) - regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + regs_buff[437 + i] = IXGBE_R32_Q(hw, IXGBE_SRRCTL(i)); for (i = 0; i < 16; i++) - regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + regs_buff[453 + i] = IXGBE_R32_Q(hw, IXGBE_DCA_RXCTRL(i)); + regs_buff[469] = IXGBE_R32_Q(hw, IXGBE_RDRXCTL); for (i = 0; i < 8; i++) - regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); + regs_buff[470 + i] = IXGBE_R32_Q(hw, IXGBE_RXPBSIZE(i)); + regs_buff[478] = IXGBE_R32_Q(hw, IXGBE_RXCTRL); + regs_buff[479] = IXGBE_R32_Q(hw, IXGBE_DROPEN); /* Receive */ - regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); - regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); + regs_buff[480] = IXGBE_R32_Q(hw, IXGBE_RXCSUM); + regs_buff[481] = IXGBE_R32_Q(hw, IXGBE_RFCTL); for (i = 0; i < 16; i++) - regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + regs_buff[482 + i] = IXGBE_R32_Q(hw, IXGBE_RAL(i)); for (i = 0; i < 16; i++) - regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); - regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); - regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); - regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); - regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); - regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + regs_buff[498 + i] = IXGBE_R32_Q(hw, IXGBE_RAH(i)); + regs_buff[514] = IXGBE_R32_Q(hw, IXGBE_PSRTYPE(0)); + regs_buff[515] = IXGBE_R32_Q(hw, IXGBE_FCTRL); + regs_buff[516] = IXGBE_R32_Q(hw, IXGBE_VLNCTRL); + regs_buff[517] = IXGBE_R32_Q(hw, IXGBE_MCSTCTRL); + regs_buff[518] = IXGBE_R32_Q(hw, IXGBE_MRQC); + regs_buff[519] = IXGBE_R32_Q(hw, IXGBE_VMD_CTL); for (i = 0; i < 8; i++) - regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); + regs_buff[520 + i] = IXGBE_R32_Q(hw, IXGBE_IMIR(i)); for (i = 0; i < 8; i++) - regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); - regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); + regs_buff[528 + i] = IXGBE_R32_Q(hw, IXGBE_IMIREXT(i)); + regs_buff[536] = IXGBE_R32_Q(hw, IXGBE_IMIRVP); /* Transmit */ for (i = 0; i < 32; i++) - regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + regs_buff[537 + i] = IXGBE_R32_Q(hw, IXGBE_TDBAL(i)); for (i = 0; i < 32; i++) - regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + regs_buff[569 + i] = IXGBE_R32_Q(hw, IXGBE_TDBAH(i)); for (i = 0; i < 32; i++) - regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + regs_buff[601 + i] = IXGBE_R32_Q(hw, IXGBE_TDLEN(i)); for (i = 0; i < 32; i++) - regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + regs_buff[633 + i] = IXGBE_R32_Q(hw, IXGBE_TDH(i)); for (i = 0; i < 32; i++) - regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + regs_buff[665 + i] = IXGBE_R32_Q(hw, IXGBE_TDT(i)); for (i = 0; i < 32; i++) - regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + regs_buff[697 + i] = IXGBE_R32_Q(hw, IXGBE_TXDCTL(i)); for (i = 0; i < 32; i++) - regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); + regs_buff[729 + i] = IXGBE_R32_Q(hw, IXGBE_TDWBAL(i)); for (i = 0; i < 32; i++) - regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); - regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + regs_buff[761 + i] = IXGBE_R32_Q(hw, IXGBE_TDWBAH(i)); + regs_buff[793] = IXGBE_R32_Q(hw, IXGBE_DTXCTL); for (i = 0; i < 16; i++) - regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); - regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); + regs_buff[794 + i] = IXGBE_R32_Q(hw, IXGBE_DCA_TXCTRL(i)); + regs_buff[810] = IXGBE_R32_Q(hw, IXGBE_TIPG); for (i = 0; i < 8; i++) - regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); - regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); + regs_buff[811 + i] = IXGBE_R32_Q(hw, IXGBE_TXPBSIZE(i)); + regs_buff[819] = IXGBE_R32_Q(hw, IXGBE_MNGTXMAP); /* Wake Up */ - regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); - regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); - regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); - regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); - regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); - regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); - regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); - regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); - regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); + regs_buff[820] = IXGBE_R32_Q(hw, IXGBE_WUC); + regs_buff[821] = IXGBE_R32_Q(hw, IXGBE_WUFC); + regs_buff[822] = IXGBE_R32_Q(hw, IXGBE_WUS); + regs_buff[823] = IXGBE_R32_Q(hw, IXGBE_IPAV); + regs_buff[824] = IXGBE_R32_Q(hw, IXGBE_IP4AT); + regs_buff[825] = IXGBE_R32_Q(hw, IXGBE_IP6AT); + regs_buff[826] = IXGBE_R32_Q(hw, IXGBE_WUPL); + regs_buff[827] = IXGBE_R32_Q(hw, IXGBE_WUPM); + regs_buff[828] = IXGBE_R32_Q(hw, IXGBE_FHFT(0)); /* DCB */ - regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ - regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); - regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); - for (i = 0; i < 8; i++) - regs_buff[833 + i] = - IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); - for (i = 0; i < 8; i++) - regs_buff[841 + i] = - IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); - for (i = 0; i < 8; i++) - regs_buff[849 + i] = - IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); - for (i = 0; i < 8; i++) - regs_buff[857 + i] = - IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); - regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); - for (i = 0; i < 8; i++) - regs_buff[833 + i] = - IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); - for (i = 0; i < 8; i++) - regs_buff[841 + i] = - IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); - for (i = 0; i < 8; i++) - regs_buff[849 + i] = - IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); - for (i = 0; i < 8; i++) - regs_buff[857 + i] = - IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); - break; - default: - break; - } - + regs_buff[829] = IXGBE_R32_Q(hw, IXGBE_RMCS); + regs_buff[830] = IXGBE_R32_Q(hw, IXGBE_DPMCS); + regs_buff[831] = IXGBE_R32_Q(hw, IXGBE_PDPMCS); + regs_buff[832] = IXGBE_R32_Q(hw, IXGBE_RUPPBMR); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = IXGBE_R32_Q(hw, IXGBE_RT2CR(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = IXGBE_R32_Q(hw, IXGBE_RT2SR(i)); for (i = 0; i < 8; i++) - regs_buff[865 + i] = - IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ + regs_buff[849 + i] = IXGBE_R32_Q(hw, IXGBE_TDTQ2TCCR(i)); for (i = 0; i < 8; i++) - regs_buff[873 + i] = - IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ + regs_buff[857 + i] = IXGBE_R32_Q(hw, IXGBE_TDTQ2TCSR(i)); + for (i = 0; i < 8; i++) + regs_buff[865 + i] = IXGBE_R32_Q(hw, IXGBE_TDPT2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[873 + i] = IXGBE_R32_Q(hw, IXGBE_TDPT2TCSR(i)); /* Statistics */ regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); @@ -797,10 +1105,8 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); - regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc); - regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32); - regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc); - regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32); + regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); + regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); for (i = 0; i < 8; i++) regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); @@ -810,8 +1116,7 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); - regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor); - regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32); + regs_buff[961] = IXGBE_GET_STAT(adapter, tor); regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); @@ -833,89 +1138,77 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); /* MAC */ - regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); - regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); - regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); - regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); - regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); - regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); - regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); - regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); - regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); - regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); - regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); - regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); - regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); - regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); - regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); - regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); - regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); - regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); - regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); - regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); - regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); - regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); - regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); - regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); - regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); - regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); - regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); - regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); - regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + regs_buff[1038] = IXGBE_R32_Q(hw, IXGBE_PCS1GCFIG); + regs_buff[1039] = IXGBE_R32_Q(hw, IXGBE_PCS1GLCTL); + regs_buff[1040] = IXGBE_R32_Q(hw, IXGBE_PCS1GLSTA); + regs_buff[1041] = IXGBE_R32_Q(hw, IXGBE_PCS1GDBG0); + regs_buff[1042] = IXGBE_R32_Q(hw, IXGBE_PCS1GDBG1); + regs_buff[1043] = IXGBE_R32_Q(hw, IXGBE_PCS1GANA); + regs_buff[1044] = IXGBE_R32_Q(hw, IXGBE_PCS1GANLP); + regs_buff[1045] = IXGBE_R32_Q(hw, IXGBE_PCS1GANNP); + regs_buff[1046] = IXGBE_R32_Q(hw, IXGBE_PCS1GANLPNP); + regs_buff[1047] = IXGBE_R32_Q(hw, IXGBE_HLREG0); + regs_buff[1048] = IXGBE_R32_Q(hw, IXGBE_HLREG1); + regs_buff[1049] = IXGBE_R32_Q(hw, IXGBE_PAP); + regs_buff[1050] = IXGBE_R32_Q(hw, IXGBE_MACA); + regs_buff[1051] = IXGBE_R32_Q(hw, IXGBE_APAE); + regs_buff[1052] = IXGBE_R32_Q(hw, IXGBE_ARD); + regs_buff[1053] = IXGBE_R32_Q(hw, IXGBE_AIS); + regs_buff[1054] = IXGBE_R32_Q(hw, IXGBE_MSCA); + regs_buff[1055] = IXGBE_R32_Q(hw, IXGBE_MSRWD); + regs_buff[1056] = IXGBE_R32_Q(hw, IXGBE_MLADD); + regs_buff[1057] = IXGBE_R32_Q(hw, IXGBE_MHADD); + regs_buff[1058] = IXGBE_R32_Q(hw, IXGBE_TREG); + regs_buff[1059] = IXGBE_R32_Q(hw, IXGBE_PCSS1); + regs_buff[1060] = IXGBE_R32_Q(hw, IXGBE_PCSS2); + regs_buff[1061] = IXGBE_R32_Q(hw, IXGBE_XPCSS); + regs_buff[1062] = IXGBE_R32_Q(hw, IXGBE_SERDESC); + regs_buff[1063] = IXGBE_R32_Q(hw, IXGBE_MACS); + regs_buff[1064] = IXGBE_R32_Q(hw, IXGBE_AUTOC); + regs_buff[1065] = IXGBE_R32_Q(hw, IXGBE_LINKS); + regs_buff[1066] = IXGBE_R32_Q(hw, IXGBE_AUTOC2); + regs_buff[1067] = IXGBE_R32_Q(hw, IXGBE_AUTOC3); + regs_buff[1068] = IXGBE_R32_Q(hw, IXGBE_ANLP1); + regs_buff[1069] = IXGBE_R32_Q(hw, IXGBE_ANLP2); + regs_buff[1070] = IXGBE_R32_Q(hw, IXGBE_ATLASCTL); /* Diagnostic */ - regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); - for (i = 0; i < 8; i++) - regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); - regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); - for (i = 0; i < 4; i++) - regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); - regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); - regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); + regs_buff[1071] = IXGBE_R32_Q(hw, IXGBE_RDSTATCTL); for (i = 0; i < 8; i++) - regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); - regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); + regs_buff[1072 + i] = IXGBE_R32_Q(hw, IXGBE_RDSTAT(i)); + regs_buff[1080] = IXGBE_R32_Q(hw, IXGBE_RDHMPN); for (i = 0; i < 4; i++) - regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); - regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); - regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); + regs_buff[1081 + i] = IXGBE_R32_Q(hw, IXGBE_RIC_DW(i)); + regs_buff[1085] = IXGBE_R32_Q(hw, IXGBE_RDPROBE); + regs_buff[1095] = IXGBE_R32_Q(hw, IXGBE_TDHMPN); for (i = 0; i < 4; i++) - regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i)); - regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); - for (i = 0; i < 4; i++) - regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i)); + regs_buff[1096 + i] = IXGBE_R32_Q(hw, IXGBE_TIC_DW(i)); + regs_buff[1100] = IXGBE_R32_Q(hw, IXGBE_TDPROBE); + regs_buff[1101] = IXGBE_R32_Q(hw, IXGBE_TXBUFCTRL); + regs_buff[1102] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA0); + regs_buff[1103] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA1); + regs_buff[1104] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA2); + regs_buff[1105] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA3); + regs_buff[1106] = IXGBE_R32_Q(hw, IXGBE_RXBUFCTRL); + regs_buff[1107] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA0); + regs_buff[1108] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA1); + regs_buff[1109] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA2); + regs_buff[1110] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA3); for (i = 0; i < 8; i++) - regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); - regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); - regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); - regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); - regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); - regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); - regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); - regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); - regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); - regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); + regs_buff[1111 + i] = IXGBE_R32_Q(hw, IXGBE_PCIE_DIAG(i)); + regs_buff[1119] = IXGBE_R32_Q(hw, IXGBE_RFVAL); + regs_buff[1120] = IXGBE_R32_Q(hw, IXGBE_MDFTC1); + regs_buff[1121] = IXGBE_R32_Q(hw, IXGBE_MDFTC2); + regs_buff[1122] = IXGBE_R32_Q(hw, IXGBE_MDFTFIFO1); + regs_buff[1123] = IXGBE_R32_Q(hw, IXGBE_MDFTFIFO2); + regs_buff[1124] = IXGBE_R32_Q(hw, IXGBE_MDFTS); + regs_buff[1125] = IXGBE_R32_Q(hw, IXGBE_PCIEECCCTL); + regs_buff[1126] = IXGBE_R32_Q(hw, IXGBE_PBTXECC); + regs_buff[1127] = IXGBE_R32_Q(hw, IXGBE_PBRXECC); /* 82599 X540 specific registers */ - regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); - - /* 82599 X540 specific DCB registers */ - regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); - regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); - for (i = 0; i < 4; i++) - regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); - regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); - /* same as RTTQCNRM */ - regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); - /* same as RTTQCNRR */ + regs_buff[1128] = IXGBE_R32_Q(hw, IXGBE_MFLCN); - /* X540 specific DCB registers */ - regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); - regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); } static int ixgbe_get_eeprom_len(struct net_device *netdev) @@ -948,7 +1241,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev, return -ENOMEM; ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, - eeprom_buff); + eeprom_buff); /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < eeprom_len; i++) @@ -965,10 +1258,9 @@ static int ixgbe_set_eeprom(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - u16 *eeprom_buff; - void *ptr; int max_len, first_word, last_word, ret_val = 0; - u16 i; + u16 *eeprom_buff, i; + void *ptr; if (eeprom->len == 0) return -EINVAL; @@ -997,7 +1289,7 @@ static int ixgbe_set_eeprom(struct net_device *netdev, ptr++; } - if ((eeprom->offset + eeprom->len) & 1) { + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { /* * need read/modify/write of last changed EEPROM word * only the first byte of the word is being modified @@ -1018,8 +1310,8 @@ static int ixgbe_set_eeprom(struct net_device *netdev, cpu_to_le16s(&eeprom_buff[i]); ret_val = hw->eeprom.ops.write_buffer(hw, first_word, - last_word - first_word + 1, - eeprom_buff); + last_word - first_word + 1, + eeprom_buff); /* Update the checksum */ if (ret_val == 0) @@ -1034,32 +1326,35 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 nvm_track_id; - strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ixgbe_driver_version, - sizeof(drvinfo->version)); + strncpy(drvinfo->driver, ixgbe_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, ixgbe_driver_version, + sizeof(drvinfo->version) - 1); - nvm_track_id = (adapter->eeprom_verh << 16) | - adapter->eeprom_verl; - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", - nvm_track_id); + strncpy(drvinfo->fw_version, adapter->eeprom_id, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; +#endif } static void ixgbe_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; - struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; ring->rx_max_pending = IXGBE_MAX_RXD; ring->tx_max_pending = IXGBE_MAX_TXD; - ring->rx_pending = rx_ring->count; - ring->tx_pending = tx_ring->count; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; } static int ixgbe_set_ringparam(struct net_device *netdev, @@ -1158,9 +1453,9 @@ static int ixgbe_set_ringparam(struct net_device *netdev, } goto err_setup; } - } + for (i = 0; i < adapter->num_rx_queues; i++) { ixgbe_free_rx_resources(adapter->rx_ring[i]); @@ -1179,69 +1474,86 @@ static int ixgbe_set_ringparam(struct net_device *netdev, return err; } +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int ixgbe_get_stats_count(struct net_device *netdev) +{ + return IXGBE_STATS_LEN; +} + +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) { +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#endif +#endif + switch (sset) { case ETH_SS_TEST: return IXGBE_TEST_LEN; case ETH_SS_STATS: return IXGBE_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return IXGBE_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } } +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ static void ixgbe_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats __always_unused *stats, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct rtnl_link_stats64 temp; - const struct rtnl_link_stats64 *net_stats; +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif + u64 *queue_stat; + int stat_count, k; +#ifdef HAVE_NDO_GET_STATS64 unsigned int start; +#endif struct ixgbe_ring *ring; int i, j; - char *p = NULL; + char *p; ixgbe_update_stats(adapter); - net_stats = dev_get_stats(netdev, &temp); - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - switch (ixgbe_gstrings_stats[i].type) { - case NETDEV_STATS: - p = (char *) net_stats + - ixgbe_gstrings_stats[i].stat_offset; - break; - case IXGBE_STATS: - p = (char *) adapter + - ixgbe_gstrings_stats[i].stat_offset; - break; - default: - data[i] = 0; - continue; - } - data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == + for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset; + data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset; + data[i] = (ixgbe_gstrings_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < netdev->num_tx_queues; j++) { + for (j = 0; j < IXGBE_NUM_TX_QUEUES; j++) { ring = adapter->tx_ring[j]; if (!ring) { - data[i] = 0; - data[i+1] = 0; - i += 2; + data[i++] = 0; + data[i++] = 0; #ifdef BP_EXTENDED_STATS - data[i] = 0; - data[i+1] = 0; - data[i+2] = 0; - i += 3; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; #endif continue; } +#ifdef HAVE_NDO_GET_STATS64 do { start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif i += 2; #ifdef BP_EXTENDED_STATS data[i] = ring->stats.yields; @@ -1253,23 +1565,25 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; if (!ring) { - data[i] = 0; - data[i+1] = 0; - i += 2; + data[i++] = 0; + data[i++] = 0; #ifdef BP_EXTENDED_STATS - data[i] = 0; - data[i+1] = 0; - data[i+2] = 0; - i += 3; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; #endif continue; } +#ifdef HAVE_NDO_GET_STATS64 do { start = u64_stats_fetch_begin_irq(&ring->syncp); +#endif data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +#endif i += 2; #ifdef BP_EXTENDED_STATS data[i] = ring->stats.yields; @@ -1278,7 +1592,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, i += 3; #endif } - for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { data[i++] = adapter->stats.pxontxc[j]; data[i++] = adapter->stats.pxofftxc[j]; @@ -1287,28 +1600,42 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i++] = adapter->stats.pxonrxc[j]; data[i++] = adapter->stats.pxoffrxc[j]; } + stat_count = sizeof(struct vf_stats) / sizeof(u64); + for (j = 0; j < adapter->num_vfs; j++) { + queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] += queue_stat[k]; + i += k; + } } static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); char *p = (char *)data; - int i; + unsigned int i; switch (stringset) { case ETH_SS_TEST: - for (i = 0; i < IXGBE_TEST_LEN; i++) { - memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + memcpy(data, *ixgbe_gstrings_test, + IXGBE_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: + for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { memcpy(p, ixgbe_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < IXGBE_NUM_TX_QUEUES; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); @@ -1348,8 +1675,26 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, sprintf(p, "rx_pb_%u_pxoff", i); p += ETH_GSTRING_LEN; } + for (i = 0; i < adapter->num_vfs; i++) { + sprintf(p, "VF %u Rx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %u Rx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %u Tx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %u Tx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %u MC Packets", i); + p += ETH_GSTRING_LEN; + } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + case ETH_SS_PRIV_FLAGS: + memcpy(data, ixgbe_priv_flags_strings, + IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ } } @@ -1359,13 +1704,12 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) bool link_up; u32 link_speed = 0; - if (ixgbe_removed(hw->hw_addr)) { + if (IXGBE_REMOVED(hw->hw_addr)) { *data = 1; return 1; } *data = 0; - - hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); if (link_up) return *data; else @@ -1400,7 +1744,7 @@ struct ixgbe_reg_test { #define TABLE64_TEST_HI 6 /* default 82599 register test */ -static const struct ixgbe_reg_test reg_test_82599[] = { +static struct ixgbe_reg_test reg_test_82599[] = { { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -1424,7 +1768,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = { }; /* default 82598 register test */ -static const struct ixgbe_reg_test reg_test_82598[] = { +static struct ixgbe_reg_test reg_test_82598[] = { { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -1451,29 +1795,32 @@ static const struct ixgbe_reg_test reg_test_82598[] = { { .reg = 0 } }; + static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { u32 pat, val, before; static const u32 test_pattern[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { *data = 1; return true; } for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { - before = ixgbe_read_reg(&adapter->hw, reg); - ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); - val = ixgbe_read_reg(&adapter->hw, reg); + before = IXGBE_READ_REG(&adapter->hw, reg); + IXGBE_WRITE_REG(&adapter->hw, reg, test_pattern[pat] & write); + val = IXGBE_READ_REG(&adapter->hw, reg); if (val != (test_pattern[pat] & write & mask)) { - e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", - reg, val, (test_pattern[pat] & write & mask)); + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); *data = reg; - ixgbe_write_reg(&adapter->hw, reg, before); + IXGBE_WRITE_REG(&adapter->hw, reg, before); return true; } - ixgbe_write_reg(&adapter->hw, reg, before); + IXGBE_WRITE_REG(&adapter->hw, reg, before); } return false; } @@ -1483,36 +1830,38 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, { u32 val, before; - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { *data = 1; return true; } - before = ixgbe_read_reg(&adapter->hw, reg); - ixgbe_write_reg(&adapter->hw, reg, write & mask); - val = ixgbe_read_reg(&adapter->hw, reg); + before = IXGBE_READ_REG(&adapter->hw, reg); + IXGBE_WRITE_REG(&adapter->hw, reg, write & mask); + val = IXGBE_READ_REG(&adapter->hw, reg); if ((write & mask) != (val & mask)) { - e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; - ixgbe_write_reg(&adapter->hw, reg, before); + IXGBE_WRITE_REG(&adapter->hw, reg, before); return true; } - ixgbe_write_reg(&adapter->hw, reg, before); + IXGBE_WRITE_REG(&adapter->hw, reg, before); return false; } -static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) +static bool ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) { - const struct ixgbe_reg_test *test; + struct ixgbe_reg_test *test; + struct ixgbe_hw *hw = &adapter->hw; u32 value, before, after; u32 i, toggle; - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(hw->hw_addr)) { e_err(drv, "Adapter removed - register test blocked\n"); *data = 1; - return 1; + return true; } - switch (adapter->hw.mac.type) { + switch (hw->mac.type) { case ixgbe_mac_82598EB: toggle = 0x7FFFF3FF; test = reg_test_82598; @@ -1521,13 +1870,13 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: toggle = 0x7FFFF30F; test = reg_test_82599; break; default: *data = 1; - return 1; + return true; } /* @@ -1536,18 +1885,19 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) * tests. Some bits are read-only, some toggle, and some * are writeable on newer MACs. */ - before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); - value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); - ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); - after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; + before = IXGBE_READ_REG(hw, IXGBE_STATUS); + value = IXGBE_READ_REG(hw, IXGBE_STATUS) & toggle; + IXGBE_WRITE_REG(hw, IXGBE_STATUS, toggle); + after = IXGBE_READ_REG(hw, IXGBE_STATUS) & toggle; if (value != after) { - e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + e_err(drv, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", after, value); *data = 1; - return 1; + return true; } /* restore previous status */ - ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); + IXGBE_WRITE_REG(hw, IXGBE_STATUS, before); /* * Perform the remainder of the register test, looping through @@ -1560,61 +1910,63 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) switch (test->test_type) { case PATTERN_TEST: b = reg_pattern_test(adapter, data, - test->reg + (i * 0x40), - test->mask, - test->write); - break; - case SET_READ_TEST: - b = reg_set_and_check(adapter, data, test->reg + (i * 0x40), test->mask, test->write); break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; case WRITE_NO_TEST: - ixgbe_write_reg(&adapter->hw, - test->reg + (i * 0x40), + IXGBE_WRITE_REG(hw, test->reg + (i * 0x40), test->write); break; case TABLE32_TEST: b = reg_pattern_test(adapter, data, - test->reg + (i * 4), - test->mask, - test->write); + test->reg + (i * 4), + test->mask, + test->write); break; case TABLE64_TEST_LO: b = reg_pattern_test(adapter, data, - test->reg + (i * 8), - test->mask, - test->write); + test->reg + (i * 8), + test->mask, + test->write); break; case TABLE64_TEST_HI: b = reg_pattern_test(adapter, data, - (test->reg + 4) + (i * 8), - test->mask, - test->write); + (test->reg + 4) + (i * 8), + test->mask, + test->write); break; } if (b) - return 1; + return true; } test++; } *data = 0; - return 0; + return false; } -static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) +static bool ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) { struct ixgbe_hw *hw = &adapter->hw; - if (hw->eeprom.ops.validate_checksum(hw, NULL)) + + if (hw->eeprom.ops.validate_checksum(hw, NULL)) { *data = 1; - else + return true; + } else { *data = 0; - return *data; + return false; + } } -static irqreturn_t ixgbe_test_intr(int irq, void *data) +static irqreturn_t ixgbe_test_intr(int __always_unused irq, void *data) { struct net_device *netdev = (struct net_device *) data; struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -1630,6 +1982,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) u32 mask, i = 0, shared_int = true; u32 irq = adapter->pdev->irq; + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } *data = 0; /* Hook up test interrupt handler just for this test */ @@ -1638,21 +1994,21 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) return 0; } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { shared_int = false; - if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, + if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name, netdev)) { *data = 1; return -1; } - } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, + } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED, netdev->name, netdev)) { shared_int = false; - } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, + } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED, netdev->name, netdev)) { *data = 1; return -1; } - e_info(hw, "testing %s interrupt\n", shared_int ? - "shared" : "unshared"); + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); @@ -1662,7 +2018,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 10; i++) { /* Interrupt to test */ - mask = BIT(i); + mask = 1 << i; if (!shared_int) { /* @@ -1747,20 +2103,18 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) /* shut down the DMA engines now so they can be reinitialized later */ /* first Rx */ - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); ixgbe_disable_rx_queue(adapter, rx_ring); /* now Tx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); - reg_ctl &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), 0); switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); @@ -1779,7 +2133,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - struct ixgbe_hw *hw = &adapter->hw; u32 rctl, reg_data; int ret_val; int err; @@ -1787,7 +2140,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) /* Setup Tx descriptor ring and Tx buffers */ tx_ring->count = IXGBE_DEFAULT_TXD; tx_ring->queue_index = 0; - tx_ring->dev = &adapter->pdev->dev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; @@ -1800,7 +2153,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1814,9 +2167,12 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) /* Setup Rx Descriptor ring and Rx buffers */ rx_ring->count = IXGBE_DEFAULT_RXD; rx_ring->queue_index = 0; - rx_ring->dev = &adapter->pdev->dev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; +#endif err = ixgbe_setup_rx_resources(rx_ring); if (err) { @@ -1824,15 +2180,14 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) goto err_nomem; } - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(&adapter->hw); ixgbe_configure_rx_ring(adapter, rx_ring); - rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); rctl |= IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); - - hw->mac.ops.enable_rx(hw); + ixgbe_enable_rx(&adapter->hw); return 0; @@ -1856,12 +2211,12 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); - /* X540 and X550 needs to set the MACC.FLU bit to force link up */ + /* X540 needs to set the MACC.FLU bit to force link up */ switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); @@ -1928,15 +2283,21 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, frame_size >>= 1; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + data = rx_buffer->skb->data; +#else data = kmap(rx_buffer->page) + rx_buffer->page_offset; +#endif if (data[3] != 0xFF || data[frame_size + 10] != 0xBE || data[frame_size + 12] != 0xAF) match = false; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT kunmap(rx_buffer->page); +#endif return match; } @@ -1945,8 +2306,11 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, unsigned int size) { union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer; - struct ixgbe_tx_buffer *tx_buffer; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buf_len; +#else + const int bufsz = ixgbe_rx_bufsz(rx_ring); +#endif u16 rx_ntc, tx_ntc, count = 0; /* initialize next to clean and descriptor values */ @@ -1954,14 +2318,21 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, tx_ntc = tx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); - while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { + while (rx_desc->wb.upper.length) { + struct ixgbe_rx_buffer *rx_buffer; + struct ixgbe_tx_buffer *tx_buffer; + + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + /* check Rx buffer */ rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; /* sync Rx buffer for CPU read */ dma_sync_single_for_cpu(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_bufsz(rx_ring), + bufsz, DMA_FROM_DEVICE); /* verify contents of skb */ @@ -1971,13 +2342,9 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, /* sync Rx buffer for device write */ dma_sync_single_for_device(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_bufsz(rx_ring), + bufsz, DMA_FROM_DEVICE); - /* unmap buffer on Tx side */ - tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); - /* increment Rx/Tx next to clean counters */ rx_ntc++; if (rx_ntc == rx_ring->count) @@ -1990,8 +2357,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); } - netdev_tx_reset_queue(txring_txq(tx_ring)); - /* re-map buffers to ring, store next to clean values */ ixgbe_alloc_rx_buffers(rx_ring, count); rx_ring->next_to_clean = rx_ntc; @@ -2004,7 +2369,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - int i, j, lc, good_cnt, ret_val = 0; + int i, j, lc, ret_val = 0; unsigned int size = 1024; netdev_tx_t tx_ret_val; struct sk_buff *skb; @@ -2034,6 +2399,8 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) lc = ((rx_ring->count / 64) * 2) + 1; for (j = 0; j <= lc; j++) { + unsigned int good_cnt; + /* reset count of good packets */ good_cnt = 0; @@ -2086,13 +2453,21 @@ static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) return *data; } +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int ixgbe_diag_test_count(struct net_device __always_unused *netdev) +{ + return IXGBE_TEST_LEN; +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ static void ixgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); bool if_running = netif_running(netdev); + struct ixgbe_hw *hw = &adapter->hw; - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(hw->hw_addr)) { e_err(hw, "Adapter removed - test blocked\n"); data[0] = 1; data[1] = 1; @@ -2104,13 +2479,14 @@ static void ixgbe_diag_test(struct net_device *netdev, } set_bit(__IXGBE_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - struct ixgbe_hw *hw = &adapter->hw; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { int i; for (i = 0; i < adapter->num_vfs; i++) { if (adapter->vfinfo[i].clear_to_send) { - netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); + e_warn(drv, "Please take active VFS " + "offline and restart the " + "adapter before running NIC " + "diagnostics\n"); data[0] = 1; data[1] = 1; data[2] = 1; @@ -2128,14 +2504,13 @@ static void ixgbe_diag_test(struct net_device *netdev, e_info(hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result - */ + * interfere with test result */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; if (if_running) /* indicate we're in test mode */ - ixgbe_close(netdev); + dev_close(netdev); else ixgbe_reset(adapter); @@ -2157,7 +2532,7 @@ static void ixgbe_diag_test(struct net_device *netdev, * loopback diagnostic. */ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) { - e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); data[3] = 0; goto skip_loopback; } @@ -2173,7 +2548,7 @@ static void ixgbe_diag_test(struct net_device *netdev, /* clear testing bit and return adapter to previous state */ clear_bit(__IXGBE_TESTING, &adapter->state); if (if_running) - ixgbe_open(netdev); + dev_open(netdev); else if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); } else { @@ -2222,7 +2597,7 @@ static void ixgbe_get_wol(struct net_device *netdev, wol->wolopts = 0; if (ixgbe_wol_exclusion(adapter, wol) || - !device_can_wakeup(&adapter->pdev->dev)) + !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) return; if (adapter->wol & IXGBE_WUFC_EX) @@ -2238,6 +2613,7 @@ static void ixgbe_get_wol(struct net_device *netdev, static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; @@ -2256,7 +2632,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & WAKE_MAGIC) adapter->wol |= IXGBE_WUFC_MAG; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + hw->wol_enabled = !!(adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); return 0; } @@ -2271,16 +2649,14 @@ static int ixgbe_nway_reset(struct net_device *netdev) return 0; } +#ifdef HAVE_ETHTOOL_SET_PHYS_ID static int ixgbe_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - - /* Modified by hilbert for C22 MDI directly access */ - s32 rc; + s32 rc; u16 regVal; - /* Modified by hilbert done */ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) return -EOPNOTSUPP; @@ -2291,17 +2667,18 @@ static int ixgbe_set_phys_id(struct net_device *netdev, return 2; case ETHTOOL_ID_ON: - hw->mac.ops.led_on(hw, hw->mac.led_link_act); + if (hw->mac.ops.led_on(hw, hw->mac.led_link_act)) + return -EINVAL; break; case ETHTOOL_ID_OFF: - hw->mac.ops.led_off(hw, hw->mac.led_link_act); + if (hw->mac.ops.led_off(hw, hw->mac.led_link_act)) + return -EINVAL; break; case ETHTOOL_ID_INACTIVE: /* Restore LED settings */ - /* Modified by hilbert for C22 MDI directly access */ - if (hw->mac.type == ixgbe_mac_x550em_a) { + if (hw->mac.type == ixgbe_mac_X550EM_a) { /* For M88E1512, to select page 3 in register 22 */ regVal = 0x03; rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); @@ -2329,20 +2706,50 @@ static int ixgbe_set_phys_id(struct net_device *netdev, if (rc) { hw_err(hw, "page register write failed, rc:%x\n", rc); } - } else { - IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); - } + } + else + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); break; } return 0; } +#else +static int ixgbe_phys_id(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + u32 i; + + if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) + return -EOPNOTSUPP; + + if (!data || data > 300) + data = 300; + + for (i = 0; i < (data * 1000); i += 400) { + if (hw->mac.ops.led_on(hw, hw->mac.led_link_act)) + return -EINVAL; + msleep_interruptible(200); + if (hw->mac.ops.led_off(hw, hw->mac.led_link_act)) + return -EINVAL; + msleep_interruptible(200); + } + + /* Restore LED settings */ + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + + return IXGBE_SUCCESS; +} +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ static int ixgbe_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; /* only valid if in constant ITR mode */ if (adapter->rx_itr_setting <= 1) ec->rx_coalesce_usecs = adapter->rx_itr_setting; @@ -2380,7 +2787,8 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); + e_info(probe, "rx-usecs value high enough " + "to re-enable RSC\n"); return true; } /* if interrupt rate is too high then disable RSC */ @@ -2392,89 +2800,350 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) return false; } -static int ixgbe_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int ixgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < IXGBE_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= IXGBE_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < IXGBE_100K_ITR)) + need_reset = true; + } + + /* check the old value and enable RSC if necessary */ + need_reset |= ixgbe_update_rsc(adapter); + + if (adapter->hw.mac.dmac_config.watchdog_timer && + (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { + e_info(probe, + "Disabling DMA coalescing because interrupt throttling is disabled\n"); + adapter->hw.mac.dmac_config.watchdog_timer = 0; + ixgbe_dmac_config(&adapter->hw); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + +#ifndef HAVE_NDO_SET_FEATURES +static u32 ixgbe_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + if (data) + netdev->features |= NETIF_F_RXCSUM; + else + netdev->features &= ~NETIF_F_RXCSUM; + + /* LRO and RSC both depend on RX checksum to function */ + if (!data && (netdev->features & NETIF_F_LRO)) { + netdev->features &= ~NETIF_F_LRO; + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } + } + +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && data) { + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + if (!need_reset) + adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; + } else { + netdev->hw_enc_features &= ~(NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + ixgbe_clear_udp_tunnel_port(adapter, + IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + +static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef NETIF_F_IPV6_CSUM + u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#else + u32 feature_list = NETIF_F_IP_CSUM; +#endif + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (data) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + else + netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; + feature_list |= NETIF_F_GSO_UDP_TUNNEL; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + feature_list |= NETIF_F_SCTP_CSUM; + break; + default: + break; + } + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + + return 0; +} + +#ifdef NETIF_F_TSO +static int ixgbe_set_tso(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_TSO6 + u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; +#else + u32 feature_list = NETIF_F_TSO; +#endif + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + +#ifndef HAVE_NETDEV_VLAN_FEATURES + if (!data) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct net_device *v_netdev; + int i; + + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~feature_list; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + return 0; +} + +#endif /* NETIF_F_TSO */ +#ifdef ETHTOOL_GFLAGS +static int ixgbe_set_flags(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_q_vector *q_vector; - int i; - u16 tx_itr_param, rx_itr_param, tx_itr_prev; + u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; + u32 changed = netdev->features ^ data; bool need_reset = false; + int rc; - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { - /* reject Tx specific changes in case of mixed RxTx vectors */ - if (ec->tx_coalesce_usecs) - return -EINVAL; - tx_itr_prev = adapter->rx_itr_setting; - } else { - tx_itr_prev = adapter->tx_itr_setting; - } - - if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || - (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) +#ifndef HAVE_VLAN_RX_REGISTER + if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + !(data & ETH_FLAG_RXVLAN)) return -EINVAL; - if (ec->rx_coalesce_usecs > 1) - adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; - else - adapter->rx_itr_setting = ec->rx_coalesce_usecs; +#endif + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + supported_flags |= ETH_FLAG_LRO; - if (adapter->rx_itr_setting == 1) - rx_itr_param = IXGBE_20K_ITR; - else - rx_itr_param = adapter->rx_itr_setting; +#ifdef ETHTOOL_GRXRINGS + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + supported_flags |= ETH_FLAG_NTUPLE; + default: + break; + } - if (ec->tx_coalesce_usecs > 1) - adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; - else - adapter->tx_itr_setting = ec->tx_coalesce_usecs; +#endif +#ifdef NETIF_F_RXHASH + supported_flags |= ETH_FLAG_RXHASH; - if (adapter->tx_itr_setting == 1) - tx_itr_param = IXGBE_12K_ITR; - else - tx_itr_param = adapter->tx_itr_setting; +#endif + rc = ethtool_op_set_flags(netdev, data, supported_flags); + if (rc) + return rc; - /* mixed Rx/Tx */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) - adapter->tx_itr_setting = adapter->rx_itr_setting; +#ifndef HAVE_VLAN_RX_REGISTER + if (changed & ETH_FLAG_RXVLAN) + ixgbe_vlan_mode(netdev, netdev->features); +#endif - /* detect ITR changes that require update of TXDCTL.WTHRESH */ - if ((adapter->tx_itr_setting != 1) && - (adapter->tx_itr_setting < IXGBE_100K_ITR)) { - if ((tx_itr_prev == 1) || - (tx_itr_prev >= IXGBE_100K_ITR)) +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + netdev->features & NETIF_F_RXCSUM) { + vxlan_get_rx_port(netdev); + else + ixgbe_clear_udp_tunnel_port(adapter, + IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); + } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + + /* if state changes we need to update adapter->flags and reset */ + if (!(netdev->features & NETIF_F_LRO)) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) need_reset = true; - } else { - if ((tx_itr_prev != 1) && - (tx_itr_prev < IXGBE_100K_ITR)) + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; need_reset = true; + } else if (changed & ETH_FLAG_LRO) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + } } - /* check the old value and enable RSC if necessary */ - need_reset |= ixgbe_update_rsc(adapter); +#ifdef ETHTOOL_GRXRINGS + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (netdev->features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; - for (i = 0; i < adapter->num_q_vectors; i++) { - q_vector = adapter->q_vector[i]; - if (q_vector->tx.count && !q_vector->rx.count) - /* tx only */ - q_vector->itr = tx_itr_param; - else - /* rx only or mixed */ - q_vector->itr = rx_itr_param; - ixgbe_write_eitr(q_vector); + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; } - /* - * do reset here at the end to make sure EITR==0 case is handled - * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings - * also locks in RSC enable/disable which requires reset - */ +#endif /* ETHTOOL_GRXRINGS */ if (need_reset) ixgbe_do_reset(netdev); return 0; } +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -2577,11 +3246,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, switch (cmd->flow_type) { case TCP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + /* fall through */ case UDP_V4_FLOW: if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + /* fall through */ case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -2591,11 +3260,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, break; case TCP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + /* fall through */ case UDP_V6_FLOW: if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + /* fall through */ case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -2611,7 +3280,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, } static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else u32 *rule_locs) +#endif { struct ixgbe_adapter *adapter = netdev_priv(dev); int ret = -EOPNOTSUPP; @@ -2629,7 +3302,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); break; case ETHTOOL_GRXCLSRLALL: - ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); + ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); break; case ETHTOOL_GRXFH: ret = ixgbe_get_rss_hash_opts(adapter, cmd); @@ -2641,14 +3315,15 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ixgbe_fdir_filter *input, - u16 sw_idx) +static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx) { struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *rule, *parent; - int err = -EINVAL; + bool deleted = false; + s32 err; parent = NULL; rule = NULL; @@ -2663,24 +3338,32 @@ int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* if there is an old rule occupying our place remove it */ if (rule && (rule->sw_idx == sw_idx)) { - if (!input || (rule->filter.formatted.bkt_hash != - input->filter.formatted.bkt_hash)) { + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(adapter->netdev) && + (!input || (rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash))) { err = ixgbe_fdir_erase_perfect_filter_82599(hw, &rule->filter, sw_idx); + if (err) + return -EINVAL; } hlist_del(&rule->fdir_node); kfree(rule); adapter->fdir_filter_count--; + deleted = true; } - /* - * If no input this was a delete, err should be 0 if a rule was - * successfully found and removed from the list else -EINVAL + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. */ if (!input) - return err; + return deleted ? 0 : -EINVAL; /* initialize node and set software index */ INIT_HLIST_NODE(&input->fdir_node); @@ -2727,6 +3410,7 @@ static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; break; } + /* fall through */ default: return 0; } @@ -2746,35 +3430,18 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fdir_filter *input; union ixgbe_atr_input mask; - u8 queue; int err; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) return -EOPNOTSUPP; - /* ring_cookie is a masked into a set of queues and ixgbe pools or - * we use the drop index. + /* + * Don't allow programming if the action is a queue greater than + * the number of online Rx queues. */ - if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { - queue = IXGBE_FDIR_DROP_QUEUE; - } else { - u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); - u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); - - if (!vf && (ring >= adapter->num_rx_queues)) - return -EINVAL; - else if (vf && - ((vf > adapter->num_vfs) || - ring >= adapter->num_rx_queues_per_pool)) - return -EINVAL; - - /* Map the ring onto the absolute queue index */ - if (!vf) - queue = adapter->rx_ring[ring]->reg_idx; - else - queue = ((vf - 1) * - adapter->num_rx_queues_per_pool) + ring; - } + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= adapter->num_rx_queues)) + return -EINVAL; /* Don't allow indexes to exist outside of available space */ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { @@ -2837,24 +3504,33 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, if (hlist_empty(&adapter->fdir_filter_list)) { /* save mask and program input mask into HW */ memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask, adapter->cloud_mode); if (err) { e_err(drv, "Error writing mask\n"); goto err_out_w_lock; } } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { - e_err(drv, "Only one mask supported per port\n"); + e_err(drv, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); goto err_out_w_lock; } /* apply mask and compute/store hash */ ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); - /* program filters to filter memory */ - err = ixgbe_fdir_write_perfect_filter_82599(hw, - &input->filter, input->sw_idx, queue); - if (err) - goto err_out_w_lock; + /* only program filters to hardware if the net device is running, as + * we store the filters in the Rx buffer which is not allocated when + * the device is down + */ + if (netif_running(adapter->netdev)) { + err = ixgbe_fdir_write_perfect_filter_82599(hw, + &input->filter, input->sw_idx, + (input->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : + adapter->rx_ring[input->action]->reg_idx, + adapter->cloud_mode); + if (err) + goto err_out_w_lock; + } ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); @@ -2882,6 +3558,19 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, return err; } +#ifdef ETHTOOL_SRXNTUPLE +/* + * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid + * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag + * was defined that this function was present. + */ +static int ixgbe_set_rx_ntuple(struct net_device __always_unused *dev, + struct ethtool_rx_ntuple __always_unused *cmd) +{ + return -EOPNOTSUPP; +} + +#endif #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, @@ -2968,7 +3657,8 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, if ((flags2 & UDP_RSS_FLAGS) && !(adapter->flags2 & UDP_RSS_FLAGS)) - e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + e_warn(drv, "enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); adapter->flags2 = flags2; @@ -3019,6 +3709,7 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) { if (adapter->hw.mac.type < ixgbe_mac_X550) @@ -3029,9 +3720,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return sizeof(adapter->rss_key); + return IXGBE_RSS_KEY_SIZE; } static u32 ixgbe_rss_indir_size(struct net_device *netdev) @@ -3053,13 +3742,19 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) indir[i] = adapter->rss_indir_tbl[i] & rss_m; } +#ifdef HAVE_RXFH_HASHFUNC static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#else +static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_RXFH_HASHFUNC if (hfunc) *hfunc = ETH_RSS_HASH_TOP; +#endif if (indir) ixgbe_get_reta(adapter, indir); @@ -3070,15 +3765,26 @@ static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } +#ifdef HAVE_RXFH_HASHFUNC static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +static int ixgbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); +#ifdef HAVE_RXFH_HASHFUNC if (hfunc) return -EINVAL; +#endif /* Fill out the redirection table */ if (indir) { @@ -3100,27 +3806,32 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, } /* Fill out the rss hash key */ - if (key) + if (key) { memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + ixgbe_store_key(adapter); + } ixgbe_store_reta(adapter); return 0; } +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef HAVE_ETHTOOL_GET_TS_INFO static int ixgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct ixgbe_adapter *adapter = netdev_priv(dev); /* we always support timestamping disabled */ - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; switch (adapter->hw.mac.type) { +#ifdef HAVE_PTP_1588_CLOCK case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + case ixgbe_mac_X550EM_a: + info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; /* fallthrough */ case ixgbe_mac_X540: case ixgbe_mac_82599EB: @@ -3138,20 +3849,33 @@ static int ixgbe_get_ts_info(struct net_device *dev, info->phc_index = -1; info->tx_types = - BIT(HWTSTAMP_TX_OFF) | - BIT(HWTSTAMP_TX_ON); + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); info->rx_filters |= - BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; +#endif /* HAVE_PTP_1588_CLOCK */ default: return ethtool_op_get_ts_info(dev, info); + break; } return 0; } +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ +#endif /* ETHTOOL_GRXRINGS */ +#ifdef ETHTOOL_SCHANNELS static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) { unsigned int max_combined; @@ -3179,7 +3903,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) /* support up to 64 queues with ATR */ max_combined = IXGBE_MAX_FDIR_INDICES; } else { - /* support up to 16 queues with RSS */ + /* support up to max allowed queues with RSS */ max_combined = ixgbe_max_rss_indices(adapter); } @@ -3250,23 +3974,25 @@ static int ixgbe_set_channels(struct net_device *dev, count = max_rss_indices; adapter->ring_feature[RING_F_RSS].limit = count; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* cap FCoE limit at 8 */ if (count > IXGBE_FCRETA_SIZE) count = IXGBE_FCRETA_SIZE; adapter->ring_feature[RING_F_FCOE].limit = count; +#endif /* CONFIG_FCOE */ -#endif /* use setup TC to update any traffic class queue mapping */ return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); } +#endif /* ETHTOOL_SCHANNELS */ +#ifdef ETHTOOL_GMODULEINFO static int ixgbe_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - s32 status; + u32 status; u8 sff8472_rev, addr_mode; bool page_swap = false; @@ -3274,14 +4000,14 @@ static int ixgbe_get_module_info(struct net_device *dev, status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, &sff8472_rev); - if (status) + if (status != 0) return -EIO; /* addressing mode is not supported */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_SWAP, &addr_mode); - if (status) + if (status != 0) return -EIO; if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { @@ -3308,7 +4034,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 status = IXGBE_ERR_PHY_ADDR_INVALID; u8 databyte = 0xFF; int i = 0; @@ -3325,7 +4051,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, else status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); - if (status) + if (status != 0) return -EIO; data[i - ee->offset] = databyte; @@ -3333,47 +4059,371 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, return 0; } +#endif /* ETHTOOL_GMODULEINFO */ + +#ifdef ETHTOOL_GEEE + +static const struct { + ixgbe_link_speed mac_speed; + u32 supported; +} ixgbe_ls_map[] = { + { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, + { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, + { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, + { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, + { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, +}; + +static const struct { + u32 lp_advertised; + u32 mac_speed; +} ixgbe_lp_map[] = { + { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, + { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, + { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, + { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, + { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, + { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, +}; + +static int +ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + struct ixgbe_hw *hw = &adapter->hw; + s32 rc; + u16 i; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); + if (rc) + return rc; + + edata->lp_advertised = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { + if (info[0] & ixgbe_lp_map[i].lp_advertised) + edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; + } + + edata->supported = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { + if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) + edata->supported |= ixgbe_ls_map[i].supported; + } + + edata->advertised = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { + if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) + edata->advertised |= ixgbe_ls_map[i].supported; + } + + edata->eee_enabled = !!edata->advertised; + edata->tx_lpi_enabled = edata->eee_enabled; + if (edata->advertised & edata->lp_advertised) + edata->eee_active = true; + + return 0; +} + +static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (!hw->mac.ops.setup_eee) + return -EOPNOTSUPP; + + if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) + return -EOPNOTSUPP; + + if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw) + return ixgbe_get_eee_fw(adapter, edata); + + return -EOPNOTSUPP; +} +#endif /* ETHTOOL_GEEE */ + +#ifdef ETHTOOL_SEEE +static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + s32 ret_val; + + if (!(hw->mac.ops.setup_eee && + (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = ixgbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not supported\n"); + return -EINVAL; + } + + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + + if (edata->eee_enabled) { + adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; + hw->phy.eee_speeds_advertised = + hw->phy.eee_speeds_supported; + } else { + adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; + hw->phy.eee_speeds_advertised = 0; + } + + /* reset link */ + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + + return 0; +} +#endif /* ETHTOOL_SEEE */ + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +/** + * ixgbe_get_priv_flags - report device private flags + * @netdev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the ixgbe_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 ixgbe_get_priv_flags(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + priv_flags |= IXGBE_PRIV_FLAGS_FD_ATR; +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; +#endif + + return priv_flags; +} + +/** + * ixgbe_set_priv_flags - set private flags + * @netdev: network interface device structure + * @flags: bit flags to be set + **/ +static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + unsigned int flags2 = adapter->flags2; +#endif + unsigned int flags = adapter->flags; + + /* allow the user to control the state of the Flow + * Director ATR (Application Targeted Routing) feature + * of the driver + */ + flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + if (priv_flags & IXGBE_PRIV_FLAGS_FD_ATR) { + /* We cannot enable ATR if VMDq is enabled */ + if (flags & IXGBE_FLAG_VMDQ_ENABLED) + return -EINVAL; + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + return -EINVAL; + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + return -EINVAL; + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + return -EINVAL; + flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + + flags2 &= ~IXGBE_FLAG2_RX_LEGACY; + if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) + flags2 |= IXGBE_FLAG2_RX_LEGACY; +#endif + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* ATR state change requires a reset */ + ixgbe_do_reset(netdev); +#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC + } +#else + } else if (flags2 != adapter->flags2) { + adapter->flags2 = flags2; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + } +#endif + + return 0; +} -static const struct ethtool_ops ixgbe_ethtool_ops = { - .get_settings = ixgbe_get_settings, - .set_settings = ixgbe_set_settings, - .get_drvinfo = ixgbe_get_drvinfo, - .get_regs_len = ixgbe_get_regs_len, - .get_regs = ixgbe_get_regs, - .get_wol = ixgbe_get_wol, - .set_wol = ixgbe_set_wol, - .nway_reset = ixgbe_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = ixgbe_get_eeprom_len, - .get_eeprom = ixgbe_get_eeprom, - .set_eeprom = ixgbe_set_eeprom, - .get_ringparam = ixgbe_get_ringparam, - .set_ringparam = ixgbe_set_ringparam, - .get_pauseparam = ixgbe_get_pauseparam, - .set_pauseparam = ixgbe_set_pauseparam, - .get_msglevel = ixgbe_get_msglevel, - .set_msglevel = ixgbe_set_msglevel, - .self_test = ixgbe_diag_test, - .get_strings = ixgbe_get_strings, - .set_phys_id = ixgbe_set_phys_id, - .get_sset_count = ixgbe_get_sset_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ +static struct ethtool_ops ixgbe_ethtool_ops = { +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + .get_link_ksettings = ixgbe_get_link_ksettings, + .set_link_ksettings = ixgbe_set_link_ksettings, +#else + .get_settings = ixgbe_get_settings, + .set_settings = ixgbe_set_settings, +#endif + .get_drvinfo = ixgbe_get_drvinfo, + .get_regs_len = ixgbe_get_regs_len, + .get_regs = ixgbe_get_regs, + .get_wol = ixgbe_get_wol, + .set_wol = ixgbe_set_wol, + .nway_reset = ixgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgbe_get_eeprom_len, + .get_eeprom = ixgbe_get_eeprom, + .set_eeprom = ixgbe_set_eeprom, + .get_ringparam = ixgbe_get_ringparam, + .set_ringparam = ixgbe_set_ringparam, + .get_pauseparam = ixgbe_get_pauseparam, + .set_pauseparam = ixgbe_set_pauseparam, + .get_msglevel = ixgbe_get_msglevel, + .set_msglevel = ixgbe_set_msglevel, +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .self_test_count = ixgbe_diag_test_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .self_test = ixgbe_diag_test, + .get_strings = ixgbe_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = ixgbe_set_phys_id, +#else + .phys_id = ixgbe_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = ixgbe_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_sset_count = ixgbe_get_sset_count, + .get_priv_flags = ixgbe_get_priv_flags, + .set_priv_flags = ixgbe_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ .get_ethtool_stats = ixgbe_get_ethtool_stats, - .get_coalesce = ixgbe_get_coalesce, - .set_coalesce = ixgbe_set_coalesce, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + .get_coalesce = ixgbe_get_coalesce, + .set_coalesce = ixgbe_set_coalesce, +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = ixgbe_get_rx_csum, + .set_rx_csum = ixgbe_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ixgbe_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ixgbe_set_tso, +#endif +#ifdef ETHTOOL_GFLAGS + .get_flags = ethtool_op_get_flags, + .set_flags = ixgbe_set_flags, +#endif +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, +#ifdef ETHTOOL_SRXNTUPLE + .set_rx_ntuple = ixgbe_set_rx_ntuple, +#endif +#endif /* ETHTOOL_GRXRINGS */ +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef ETHTOOL_GEEE + .get_eee = ixgbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = ixgbe_set_eee, +#endif /* ETHTOOL_SEEE */ +#ifdef ETHTOOL_SCHANNELS + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, +#endif +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = ixgbe_get_ts_info, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) .get_rxfh_indir_size = ixgbe_rss_indir_size, .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, .set_rxfh = ixgbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext ixgbe_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = ixgbe_get_ts_info, + .set_phys_id = ixgbe_set_phys_id, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, - .get_ts_info = ixgbe_get_ts_info, +#ifdef ETHTOOL_GMODULEINFO .get_module_info = ixgbe_get_module_info, .get_module_eeprom = ixgbe_get_module_eeprom, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = ixgbe_rss_indir_size, + .get_rxfh_key_size = ixgbe_get_rxfh_key_size, + .get_rxfh = ixgbe_get_rxfh, + .set_rxfh = ixgbe_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#ifdef ETHTOOL_GEEE + .get_eee = ixgbe_get_eee, +#endif /* ETHTOOL_GEEE */ +#ifdef ETHTOOL_SEEE + .set_eee = ixgbe_set_eee, +#endif /* ETHTOOL_SEEE */ }; +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ void ixgbe_set_ethtool_ops(struct net_device *netdev) { +#ifndef ETHTOOL_OPS_COMPAT netdev->ethtool_ops = &ixgbe_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &ixgbe_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ } +#endif /* SIOCETHTOOL */ + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c index 2a653ec954f5..abd12a9a7f75 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -27,9 +23,12 @@ *******************************************************************************/ #include "ixgbe.h" + +#if IS_ENABLED(CONFIG_FCOE) +#if IS_ENABLED(CONFIG_DCB) +#include "ixgbe_dcb_82599.h" +#endif /* CONFIG_DCB */ #include -#include -#include #include #include #include @@ -39,7 +38,7 @@ /** * ixgbe_fcoe_clear_ddp - clear the given ddp context - * @ddp: ptr to the ixgbe_fcoe_ddp + * @ddp - ptr to the ixgbe_fcoe_ddp * * Returns : none * @@ -67,72 +66,76 @@ static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) */ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) { - int len; + int len = 0; struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; - struct ixgbe_fcoe_ddp *ddp; struct ixgbe_hw *hw; + struct ixgbe_fcoe_ddp *ddp; u32 fcbuff; if (!netdev) - return 0; + goto out_ddp_put; - if (xid >= netdev->fcoe_ddp_xid) - return 0; + if (xid > netdev->fcoe_ddp_xid) + goto out_ddp_put; adapter = netdev_priv(netdev); + hw = &adapter->hw; fcoe = &adapter->fcoe; ddp = &fcoe->ddp[xid]; if (!ddp->udl) - return 0; + goto out_ddp_put; - hw = &adapter->hw; len = ddp->len; - /* if no error then skip ddp context invalidation */ - if (!ddp->err) - goto skip_ddpinv; - - if (hw->mac.type == ixgbe_mac_X550) { - /* X550 does not require DDP FCoE lock */ - - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), - (xid | IXGBE_FCFLTRW_WE)); - - /* program FCBUFF */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); - - /* program FCDMARW */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), - (xid | IXGBE_FCDMARW_WE)); - - /* read FCBUFF to check context invalidated */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), - (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); - } else { - /* other hardware requires DDP FCoE lock */ - spin_lock_bh(&fcoe->lock); - IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, - (xid | IXGBE_FCFLTRW_WE)); - IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, - (xid | IXGBE_FCDMARW_WE)); - - /* guaranteed to be invalidated after 100us */ - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, - (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); - spin_unlock_bh(&fcoe->lock); + /* if there an error, force to invalidate ddp context */ + if (ddp->err) { + switch (hw->mac.type) { + case ixgbe_mac_X550: + /* X550 does not require DDP FCoE lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), + (xid | IXGBE_FCFLTRW_WE)); + + /* program FCBUFF */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); + + /* program FCDMARW */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_WE)); + + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, + IXGBE_FCDDC(2, xid)); + break; + default: + /* other hardware requires DDP FCoE lock */ + spin_lock_bh(&fcoe->lock); + + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, + (xid | IXGBE_FCFLTRW_WE)); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_WE)); + + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); + + spin_unlock_bh(&fcoe->lock); + break; } - if (fcbuff & IXGBE_FCBUFF_VALID) - usleep_range(100, 150); - -skip_ddpinv: + /* guaranteed to be invalidated after 100us */ + if (fcbuff & IXGBE_FCBUFF_VALID) + udelay(100); + } if (ddp->sgl) - dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); if (ddp->pool) { dma_pool_free(ddp->pool, ddp->udl, ddp->udp); @@ -141,6 +144,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) ixgbe_fcoe_clear_ddp(ddp); +out_ddp_put: return len; } @@ -173,11 +177,11 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; dma_addr_t addr = 0; - if (!netdev || !sgl) + if (!netdev || !sgl || !sgc) return 0; adapter = netdev_priv(netdev); - if (xid >= netdev->fcoe_ddp_xid) { + if (xid > netdev->fcoe_ddp_xid) { e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; } @@ -191,7 +195,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ddp = &fcoe->ddp[xid]; if (ddp->sgl) { e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", - xid, ddp->sgl, ddp->sgc); + xid, ddp->sgl, ddp->sgc); return 0; } ixgbe_fcoe_clear_ddp(ddp); @@ -209,7 +213,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* setup dma from scsi command sgl */ - dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); + dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); if (dmacount == 0) { e_err(drv, "xid 0x%x DMA map error\n", xid); goto out_noddp; @@ -267,8 +271,9 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, lastsize = thisoff + thislen; /* - * lastsize can not be buffer len. + * lastsize can not be bufflen. * If it is then adding another buffer with lastsize = 1. + * Since lastsize is 1 there will be no HW access to this buffer. */ if (lastsize == bufflen) { if (j >= IXGBE_BUFFCNT_MAX) { @@ -309,7 +314,8 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); } - if (hw->mac.type == ixgbe_mac_X550) { + switch (hw->mac.type) { + case ixgbe_mac_X550: /* X550 does not require DDP lock */ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), @@ -321,8 +327,14 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); - } else { - /* DDP lock for indirect DDP context access */ + /* + * TBD: SMAC and FCID info not available with current + * netdev APIs, add code to pull that from skb later + * and then program that here before enabling DDP context. + */ + break; + default: + /* other devices require DDP lock with direct DDP context access */ spin_lock_bh(&fcoe->lock); IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); @@ -335,6 +347,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); spin_unlock_bh(&fcoe->lock); + break; } return 1; @@ -344,7 +357,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ixgbe_fcoe_clear_ddp(ddp); out_noddp_unmap: - dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); out_noddp: put_cpu(); return 0; @@ -370,6 +383,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); } +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET /** * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode * @netdev: the corresponding net_device @@ -386,11 +400,12 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, * Returns : 1 for success and 0 for no ddp */ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) + struct scatterlist *sgl, unsigned int sgc) { return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); } +#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ /** * ixgbe_fcoe_ddp - check ddp status and mark it done * @adapter: ixgbe adapter @@ -399,7 +414,7 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, * * This checks ddp status. * - * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates + * Returns : < 0 indicates an error or not a FCoE ddp, 0 indicates * not passing the skb to ULD, > 0 indicates is the length of data * being ddped. */ @@ -407,14 +422,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - int rc = -EINVAL; - struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe_ddp *ddp; struct fc_frame_header *fh; - struct fcoe_crc_eof *crc; + int rc = -EINVAL, ddp_max; __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); __le32 ddp_err; - int ddp_max; u32 fctl; u16 xid; @@ -423,35 +436,36 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, else skb->ip_summed = CHECKSUM_UNNECESSARY; - if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) - fh = (struct fc_frame_header *)(skb->data + - sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); - else - fh = (struct fc_frame_header *)(skb->data + - sizeof(struct fcoe_hdr)); + /* verify header contains at least the FCOE header */ + BUG_ON(skb_headlen(skb) < FCOE_HEADER_LEN); + + fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr)); + + if (skb->protocol == htons(ETH_P_8021Q)) + fh = (struct fc_frame_header *)((char *)fh + VLAN_HLEN); fctl = ntoh24(fh->fh_f_ctl); if (fctl & FC_FC_EX_CTX) - xid = be16_to_cpu(fh->fh_ox_id); + xid = ntohs(fh->fh_ox_id); else - xid = be16_to_cpu(fh->fh_rx_id); + xid = ntohs(fh->fh_rx_id); ddp_max = IXGBE_FCOE_DDP_MAX; /* X550 has different DDP Max limit */ if (adapter->hw.mac.type == ixgbe_mac_X550) ddp_max = IXGBE_FCOE_DDP_MAX_X550; + if (xid >= ddp_max) - return -EINVAL; + goto ddp_out; - fcoe = &adapter->fcoe; ddp = &fcoe->ddp[xid]; if (!ddp->udl) - return -EINVAL; + goto ddp_out; ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | IXGBE_RXDADV_ERR_FCERR); if (ddp_err) - return -EINVAL; + goto ddp_out; switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { /* return 0 to bypass going to ULD for DDPed data */ @@ -462,7 +476,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, break; /* unmap the sg list when FCPRSP is received */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): - dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); ddp->err = ddp_err; ddp->sgl = NULL; @@ -490,11 +504,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, */ if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && (fctl & FC_FC_END_SEQ)) { + struct fcoe_crc_eof *crc; skb_linearize(skb); crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); crc->fcoe_eof = FC_EOF_T; } - +ddp_out: return rc; } @@ -517,15 +532,17 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens; u32 fcoe_sof_eof = 0; u32 mss_l4len_idx; - u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; u8 sof, eof; + u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { - dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", - skb_shinfo(skb)->gso_type); +#ifdef NETIF_F_FSO + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { + dev_err(tx_ring->dev, "Wrong gso type %d:expecting " + "SKB_GSO_FCOE\n", skb_shinfo(skb)->gso_type); return -EINVAL; } +#endif /* resets the header to point fcoe/fc */ skb_set_network_header(skb, skb->mac_len); skb_set_transport_header(skb, skb->mac_len + @@ -635,7 +652,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, struct dma_pool *pool; char pool_name[32]; - snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN, PAGE_SIZE); @@ -662,11 +679,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) { struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_hw *hw = &adapter->hw; - int i, fcoe_q, fcoe_i, fcoe_q_h = 0; - int fcreta_size; + int i, fcoe_i; + u32 fcoe_q, fcoe_q_h = 0; u32 etqf; + int fcreta_size; - /* Minimal functionality for FCoE requires at least CRC offloads */ + /* Minimal funcionality for FCoE requires at least CRC offloads */ if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) return; @@ -679,7 +697,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); - /* leave registers un-configured if FCoE is disabled */ + /* leave remaining registers unconfigued if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return; @@ -696,7 +714,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & IXGBE_FCRETA_ENTRY_HIGH_MASK; } - fcoe_i = fcoe->offset + (i % fcoe->indices); fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; @@ -753,7 +770,7 @@ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) for_each_possible_cpu(cpu) ixgbe_fcoe_dma_pool_free(fcoe, cpu); - dma_unmap_single(&adapter->pdev->dev, + dma_unmap_single(pci_dev_to_dev(adapter->pdev), fcoe->extra_ddp_buffer_dma, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); @@ -774,7 +791,7 @@ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct device *dev = &adapter->pdev->dev; + struct device *dev = pci_dev_to_dev(adapter->pdev); void *buffer; dma_addr_t dma; unsigned int cpu; @@ -785,8 +802,10 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) /* Extra buffer to be shared by all DDPs for HW work around */ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); - if (!buffer) + if (!buffer) { + e_err(drv, "failed to allocate extra DDP buffer\n"); return -ENOMEM; + } dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma)) { @@ -812,7 +831,11 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) return 0; } +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +#else static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +#endif { struct ixgbe_fcoe *fcoe = &adapter->fcoe; @@ -827,6 +850,7 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) } adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + /* X550 has different DDP Max limit */ if (adapter->hw.mac.type == ixgbe_mac_X550) adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; @@ -834,7 +858,11 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) return 0; } +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +#else static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +#endif { struct ixgbe_fcoe *fcoe = &adapter->fcoe; @@ -847,6 +875,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) fcoe->ddp_pool = NULL; } +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE /** * ixgbe_fcoe_enable - turn on FCoE offload feature * @netdev: the corresponding netdev @@ -934,7 +963,28 @@ int ixgbe_fcoe_disable(struct net_device *netdev) return 0; } +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ +#if IS_ENABLED(CONFIG_DCB) +#ifdef HAVE_DCBNL_OPS_GETAPP +/** + * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE + * @netdev: the corresponding net_device + * + * Finds out the corresponding user priority bitmap from the current + * traffic class that FCoE belongs to. Returns 0 as the invalid user + * priority bitmap to indicate an error. + * + * Returns : 802.1p user priority bitmap for FCoE + */ +u8 ixgbe_fcoe_getapp(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return 1 << adapter->fcoe.up; +} +#endif /* HAVE_DCBNL_OPS_GETAPP */ +#endif /* CONFIG_DCB */ +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN /** * ixgbe_fcoe_get_wwn - get world wide name for the node or the port * @netdev : ixgbe adapter @@ -949,6 +999,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev) */ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) { + int rc = -EINVAL; u16 prefix = 0xffff; struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_mac_info *mac = &adapter->hw.mac; @@ -973,97 +1024,12 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) ((u64) mac->san_addr[3] << 16) | ((u64) mac->san_addr[4] << 8) | ((u64) mac->san_addr[5]); - return 0; - } - return -EINVAL; -} - -/** - * ixgbe_fcoe_get_hbainfo - get FCoE HBA information - * @netdev : ixgbe adapter - * @info : HBA information - * - * Returns ixgbe HBA information - * - * Returns : 0 on success - */ -int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, - struct netdev_fcoe_hbainfo *info) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int i, pos; - u8 buf[8]; - - if (!info) - return -EINVAL; - - /* Don't return information on unsupported devices */ - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return -EINVAL; - - /* Manufacturer */ - snprintf(info->manufacturer, sizeof(info->manufacturer), - "Intel Corporation"); - - /* Serial Number */ - - /* Get the PCI-e Device Serial Number Capability */ - pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); - if (pos) { - pos += 4; - for (i = 0; i < 8; i++) - pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); - - snprintf(info->serial_number, sizeof(info->serial_number), - "%02X%02X%02X%02X%02X%02X%02X%02X", - buf[7], buf[6], buf[5], buf[4], - buf[3], buf[2], buf[1], buf[0]); - } else - snprintf(info->serial_number, sizeof(info->serial_number), - "Unknown"); - - /* Hardware Version */ - snprintf(info->hardware_version, - sizeof(info->hardware_version), - "Rev %d", hw->revision_id); - /* Driver Name/Version */ - snprintf(info->driver_version, - sizeof(info->driver_version), - "%s v%s", - ixgbe_driver_name, - ixgbe_driver_version); - /* Firmware Version */ - snprintf(info->firmware_version, - sizeof(info->firmware_version), - "0x%08x", - (adapter->eeprom_verh << 16) | - adapter->eeprom_verl); - - /* Model */ - if (hw->mac.type == ixgbe_mac_82599EB) { - snprintf(info->model, - sizeof(info->model), - "Intel 82599"); - } else if (hw->mac.type == ixgbe_mac_X550) { - snprintf(info->model, - sizeof(info->model), - "Intel X550"); - } else { - snprintf(info->model, - sizeof(info->model), - "Intel X540"); + rc = 0; } - - /* Model Description */ - snprintf(info->model_description, - sizeof(info->model_description), - "%s", - ixgbe_default_device_descr); - - return 0; + return rc; } +#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */ /** * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to * @adapter - pointer to the device adapter structure @@ -1072,9 +1038,6 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, */ u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) { -#ifdef CONFIG_IXGBE_DCB return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); -#else - return 0; -#endif } +#endif /* CONFIG_FCOE */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h index 38385876effb..08de8d3dfc95 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,8 +22,10 @@ *******************************************************************************/ -#ifndef _IXGBE_FCOE_H -#define _IXGBE_FCOE_H +#ifndef _IXGBE_FCOE_H_ +#define _IXGBE_FCOE_H_ + +#if IS_ENABLED(CONFIG_FCOE) #include #include @@ -38,7 +36,7 @@ /* ddp user buffer */ #define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ #define IXGBE_FCPTR_ALIGN 16 -#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) #define IXGBE_FCBUFF_4KB 0x0 #define IXGBE_FCBUFF_8KB 0x1 #define IXGBE_FCBUFF_16KB 0x2 @@ -48,11 +46,16 @@ #define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ #define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ -/* Default traffic class to use for FCoE */ -#define IXGBE_FCOE_DEFTC 3 +/* Default user priority to use for FCoE */ +#define IXGBE_FCOE_DEFUP 3 /* fcerr */ -#define IXGBE_FCERR_BADCRC 0x00100000 +#define IXGBE_FCERR_BADCRC 0x00100000 +#define IXGBE_FCERR_EOFSOF 0x00200000 +#define IXGBE_FCERR_NOFIRST 0x00300000 +#define IXGBE_FCERR_OOOSEQ 0x00400000 +#define IXGBE_FCERR_NODMA 0x00500000 +#define IXGBE_FCERR_PKTLOST 0x00600000 /* FCoE DDP for target mode */ #define __IXGBE_FCOE_TARGET 1 @@ -83,6 +86,8 @@ struct ixgbe_fcoe { dma_addr_t extra_ddp_buffer_dma; unsigned long mode; u8 up; + u8 up_set; }; +#endif /* CONFIG_FCOE */ #endif /* _IXGBE_FCOE_H */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c new file mode 100644 index 000000000000..278646355210 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c @@ -0,0 +1,210 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe_hv_vf.h" + +/** + * Hyper-V variant - just a stub. + */ +s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + */ +s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) +{ + UNREFERENCED_2PARAMETER(hw, xcast_mode); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + */ +s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + UNREFERENCED_3PARAMETER(hw, index, addr); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant; there is no mailbox communication. + */ +s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + u32 links_reg; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Reserved for pre-x550 devices */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + * Hyper-V variant. + **/ +s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 reg; + + /* If we are on Hyper-V, we implement this functionality + * differently. + */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); + /* CRC == 4 */ + reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + * Hyper-V version - only ixgbe_mbox_api_10 supported. + **/ +int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) +{ + UNREFERENCED_1PARAMETER(hw); + + /* Hyper-V only supports api version ixgbe_mbox_api_10 */ + if (api != ixgbe_mbox_api_10) + return IXGBE_ERR_INVALID_ARGUMENT; + + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf + * @hw: pointer to hardware structure + * + * This will assign function pointers, adapter-specific functions can + * override the assignment of generic function pointers by assigning + * their own adapter-specific function pointers. + * Does not touch the hardware. + **/ +s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw) +{ + /* Set defaults for VF then override applicable Hyper-V + * specific functions + */ + ixgbe_init_ops_vf(hw); + + hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf; + hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf; + hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf; + hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf; + hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf; + hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode; + hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf; + hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf; + hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf; + + return IXGBE_SUCCESS; +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h new file mode 100644 index 000000000000..387ed1f36601 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h @@ -0,0 +1,51 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_HV_VF_H_ +#define _IXGBE_HV_VF_H_ + +/* On Hyper-V, to reset, we need to read from this offset + * from the PCI config space. This is the mechanism used on + * Hyper-V to support PF/VF communication. + */ +#define IXGBE_HV_RESET_OFFSET 0x201 + +#include "ixgbe_vf.h" + +s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete); +s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); +s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr, + bool clear); +s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode); +s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass); +s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api); + +extern s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw); +extern s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq, u32 enable_addr); +#endif /* _IXGBE_HV_VF_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c index 10d29678d65e..2a0d91dd454c 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,52 +25,79 @@ #include "ixgbe.h" #include "ixgbe_sriov.h" -#ifdef CONFIG_IXGBE_DCB +#ifdef HAVE_TX_MQ /** - * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV + * ixgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq * @adapter: board private structure to initialize * - * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It + * Cache the descriptor ring offsets for VMDq to the assigned rings. It * will also try to cache the proper offsets if RSS/FCoE are enabled along * with VMDq. * **/ -static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) +static bool ixgbe_cache_ring_dcb_vmdq(struct ixgbe_adapter *adapter) { -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; int i; u16 reg_idx; u8 tcs = netdev_get_num_tc(adapter->netdev); - /* verify we have DCB queueing enabled before proceeding */ + /* verify we have DCB enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; - /* start at VMDq register offset for SR-IOV enabled setups */ - reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { - /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= tcs) - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); - adapter->rx_ring[i]->reg_idx = reg_idx; - } + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* + * The bits on the 82598 are reversed compared to the other + * adapters. The DCB bits are the higher order bits and the + * lower bits belong to the VMDq pool. In order to sort + * this out we have to swap the bits to get the correct layout + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + reg_idx = ((i >> 3) | (i << 3)) & 0x3F; + adapter->rx_ring[i]->reg_idx = reg_idx; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + reg_idx = ((i >> 4) | (i << 2)) & 0x1F; + adapter->tx_ring[i]->reg_idx = reg_idx; + } + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } - reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { - /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= tcs) - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); - adapter->tx_ring[i]->reg_idx = reg_idx; + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + + break; + default: + break; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* nothing to do if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return true; @@ -102,8 +125,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) reg_idx++; } } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ return true; } @@ -111,8 +134,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, unsigned int *tx, unsigned int *rx) { - struct net_device *dev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; u8 num_tcs = netdev_get_num_tc(dev); *tx = 0; @@ -128,7 +151,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: +case ixgbe_mac_X550EM_a: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 @@ -168,12 +191,11 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, **/ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { - struct net_device *dev = adapter->netdev; - unsigned int tx_idx, rx_idx; int tc, offset, rss_i, i; + unsigned int tx_idx, rx_idx; + struct net_device *dev = adapter->netdev; u8 num_tcs = netdev_get_num_tc(dev); - /* verify we have DCB queueing enabled before proceeding */ if (num_tcs <= 1) return false; @@ -192,20 +214,21 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) return true; } -#endif +#endif /* HAVE_TX_MQ */ /** - * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq * @adapter: board private structure to initialize * - * SR-IOV doesn't use any descriptor rings but changes the default if - * no other mapping is used. + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. * - */ -static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) + **/ +static bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter) { -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; int i; @@ -218,26 +241,26 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) /* start at VMDq register offset for SR-IOV enabled setups */ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* Allow first FCoE queue to be mapped as RSS */ if (fcoe->offset && (i > fcoe->offset)) break; -#endif +#endif /* CONFIG_FCOE */ /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= rss->indices) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->rx_ring[i]->reg_idx = reg_idx; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE uses a linear block of queues so just assigning 1:1 */ for (; i < adapter->num_rx_queues; i++, reg_idx++) adapter->rx_ring[i]->reg_idx = reg_idx; +#endif /* CONFIG_FCOE */ -#endif reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* Allow first FCoE queue to be mapped as RSS */ if (fcoe->offset && (i > fcoe->offset)) break; @@ -248,12 +271,11 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) adapter->tx_ring[i]->reg_idx = reg_idx; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE uses a linear block of queues so just assigning 1:1 */ for (; i < adapter->num_tx_queues; i++, reg_idx++) adapter->tx_ring[i]->reg_idx = reg_idx; - -#endif +#endif /* CONFIG_FCOE */ return true; } @@ -262,7 +284,7 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS * @adapter: board private structure to initialize * - * Cache the descriptor ring offsets for RSS to the assigned rings. + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. * **/ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) @@ -271,6 +293,7 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->reg_idx = i; @@ -290,19 +313,15 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) **/ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) { - /* start with default case */ - adapter->rx_ring[0]->reg_idx = 0; - adapter->tx_ring[0]->reg_idx = 0; - -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_cache_ring_dcb_sriov(adapter)) +#ifdef HAVE_TX_MQ + if (ixgbe_cache_ring_dcb_vmdq(adapter)) return; if (ixgbe_cache_ring_dcb(adapter)) return; #endif - if (ixgbe_cache_ring_sriov(adapter)) + if (ixgbe_cache_ring_vmdq(adapter)) return; ixgbe_cache_ring_rss(adapter); @@ -315,54 +334,73 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) #define IXGBE_RSS_2Q_MASK 0x1 #define IXGBE_RSS_DISABLED_MASK 0x0 -#ifdef CONFIG_IXGBE_DCB +#ifdef HAVE_TX_MQ /** - * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB + * ixgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues * and VM pools where appropriate. Also assign queues based on DCB * priorities and map accordingly.. * **/ -static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) +static bool ixgbe_set_dcb_vmdq_queues(struct ixgbe_adapter *adapter) { int i; u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) u16 fcoe_i = 0; #endif u8 tcs = netdev_get_num_tc(adapter->netdev); - /* verify we have DCB queueing enabled before proceeding */ + /* verify we have DCB enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; - /* Add starting offset to total pool count */ - vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* 4 pools w/ 8TC per pool */ + vmdq_i = min_t(u16, vmdq_i, 4); + vmdq_m = 0x7; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + } - /* 16 pools w/ 8 TC per pool */ - if (tcs > 4) { - vmdq_i = min_t(u16, vmdq_i, 16); - vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; - /* 32 pools w/ 4 TC per pool */ - } else { - vmdq_i = min_t(u16, vmdq_i, 32); - vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; - } +#if IS_ENABLED(CONFIG_FCOE) + /* queues in the remaining pools are available for FCoE */ + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; +#endif /* CONFIG_FCOE */ -#ifdef IXGBE_FCOE - /* queues in the remaining pools are available for FCoE */ - fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; -#endif - /* remove the starting offset from the pool count */ - vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + break; + default: + /* unknown hardware, only support one pool w/ one queue */ + vmdq_i = 1; + tcs = 1; + break; + } /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; @@ -375,16 +413,16 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) adapter->ring_feature[RING_F_RSS].indices = 1; adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; - /* disable ATR as it is not supported when VMDq is enabled */ - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->num_rx_pools = vmdq_i; adapter->num_rx_queues_per_pool = tcs; adapter->num_tx_queues = vmdq_i * tcs; adapter->num_rx_queues = vmdq_i * tcs; -#ifdef IXGBE_FCOE + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *fcoe; @@ -412,8 +450,8 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) fcoe->offset = 0; } } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ /* configure TC to queue mapping */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(adapter->netdev, i, 1, i); @@ -421,6 +459,17 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) return true; } +/** + * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device + * @adapter: board private structure to initialize + * + * When DCB (Data Center Bridging) is enabled, allocate queues for + * each traffic class. If multiqueue isn't available,then abort DCB + * initialization. + * + * This function handles all combinations of DCB, RSS, and FCoE. + * + **/ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; @@ -431,12 +480,15 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) /* Map queue offset and counts onto allocated tx queues */ tcs = netdev_get_num_tc(dev); - /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* determine the upper limit for our current DCB mode */ +#ifndef HAVE_NETDEV_SELECT_QUEUE + rss_i = adapter->indices; +#else rss_i = dev->num_tx_queues / tcs; +#endif if (adapter->hw.mac.type == ixgbe_mac_82598EB) { /* 8 TC w/ 4 queues per TC */ rss_i = min_t(u16, rss_i, 4); @@ -453,16 +505,17 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) /* set RSS mask and indices */ f = &adapter->ring_feature[RING_F_RSS]; - rss_i = min_t(int, rss_i, f->limit); + rss_i = min_t(u16, rss_i, f->limit); f->indices = rss_i; f->mask = rss_m; - /* disable ATR as it is not supported when multiple TCs are enabled */ + /* disable ATR as it is not supported when DCB is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; -#ifdef IXGBE_FCOE - /* FCoE enabled queues require special configuration indexed - * by feature specific indices and offset. Here we map FCoE +#if IS_ENABLED(CONFIG_FCOE) + /* + * FCoE enabled queues require special configuration indexed + * by feature specific indices and mask. Here we map FCoE * indices onto the DCB queue pairs allowing FCoE to own * configuration later. */ @@ -473,8 +526,8 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) f->indices = min_t(u16, rss_i, f->limit); f->offset = rss_i * tc; } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(dev, i, rss_i, rss_i * i); @@ -486,55 +539,81 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) #endif /** - * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices + * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues * and VM pools where appropriate. If RSS is available, then also try and * enable RSS and map accordingly. * **/ -static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter) { u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; u16 rss_m = IXGBE_RSS_DISABLED_MASK; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) u16 fcoe_i = 0; #endif - bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); - /* only proceed if SR-IOV is enabled */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; - /* Add starting offset to total pool count */ - vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; - - /* double check we are limited to maximum pools */ - vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + vmdq_i = min_t(u16, vmdq_i, 16); + /* 16 pool mode with 1 queue per pool */ + if ((vmdq_i > 4) || (rss_i == 1)) { + vmdq_m = 0x0F; + rss_i = 1; + /* 4 pool mode with 8 queue per pool */ + } else { + vmdq_m = 0x18; + rss_m = IXGBE_RSS_8Q_MASK; + rss_i = min_t(u16, rss_i, 8); + } + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool */ + if (vmdq_i > 32) { + vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; + rss_m = IXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with up to 4 queues per pool */ + } else { + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + rss_m = IXGBE_RSS_4Q_MASK; + /* We can support 4, 2, or 1 queues */ + rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; + } - /* 64 pool mode with 2 queues per pool */ - if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) { - vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; - rss_m = IXGBE_RSS_2Q_MASK; - rss_i = min_t(u16, rss_i, 2); - /* 32 pool mode with up to 4 queues per pool */ - } else { - vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; - rss_m = IXGBE_RSS_4Q_MASK; - /* We can support 4, 2, or 1 queues */ - rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; - } +#if IS_ENABLED(CONFIG_FCOE) + /* queues in the remaining pools are available for FCoE */ + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); +#endif -#ifdef IXGBE_FCOE - /* queues in the remaining pools are available for FCoE */ - fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; -#endif - /* remove the starting offset from the pool count */ - vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + break; + default: + /* unknown hardware, support one pool w/ one queue */ + vmdq_i = 1; + rss_i = 1; + break; + } /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; @@ -548,12 +627,16 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues_per_pool = rss_i; adapter->num_rx_queues = vmdq_i * rss_i; +#ifdef HAVE_TX_MQ adapter->num_tx_queues = vmdq_i * rss_i; +#else + adapter->num_tx_queues = vmdq_i; +#endif /* HAVE_TX_MQ */ /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* * FCoE can use rings from adjacent buffers to allow RSS * like behavior. To account for this we need to add the @@ -582,7 +665,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) /* attempt to reserve some queues for just FCoE */ fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); fcoe->offset = fcoe_i - fcoe->indices; - fcoe_i -= rss_i; } @@ -590,13 +672,13 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_tx_queues += fcoe_i; adapter->num_rx_queues += fcoe_i; } +#endif /* CONFIG_FCOE */ -#endif return true; } /** - * ixgbe_set_rss_queues - Allocate queues for RSS + * ixgbe_set_rss_queues: Allocate queues for RSS * @adapter: board private structure to initialize * * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try @@ -614,7 +696,6 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = f->limit; f->indices = rss_i; - if (hw->mac.type < ixgbe_mac_X550) f->mask = IXGBE_RSS_16Q_MASK; else @@ -637,7 +718,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* * FCoE can exist on the same rings as standard network traffic * however it is preferred to avoid that if possible. In order @@ -665,16 +746,18 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) f->offset = fcoe_i - f->indices; rss_i = max_t(u16, fcoe_i, rss_i); } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ adapter->num_rx_queues = rss_i; +#ifdef HAVE_TX_MQ adapter->num_tx_queues = rss_i; +#endif return true; } -/** - * ixgbe_set_num_queues - Allocate queues for device, feature dependent +/* + * ixgbe_set_num_queues: Allocate queues for device, feature dependent * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very @@ -692,15 +775,15 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) adapter->num_rx_pools = adapter->num_rx_queues; adapter->num_rx_queues_per_pool = 1; -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_set_dcb_sriov_queues(adapter)) +#ifdef HAVE_TX_MQ + if (ixgbe_set_dcb_vmdq_queues(adapter)) return; if (ixgbe_set_dcb_queues(adapter)) return; #endif - if (ixgbe_set_sriov_queues(adapter)) + if (ixgbe_set_vmdq_queues(adapter)) return; ixgbe_set_rss_queues(adapter); @@ -719,6 +802,9 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; int i, vectors, vector_threshold; + if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + /* We start by asking for one vector per queue pair */ vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); @@ -759,8 +845,7 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) if (vectors < 0) { /* A negative count of allocated vectors indicates an error in - * acquiring within the specified range of MSI-X vectors - */ + * acquiring within the specified range of MSI-X vectors */ e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", vectors); @@ -785,6 +870,7 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) return 0; } + static void ixgbe_add_ring(struct ixgbe_ring *ring, struct ixgbe_ring_container *head) { @@ -806,23 +892,27 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, - int v_count, int v_idx, - int txr_count, int txr_idx, - int rxr_count, int rxr_idx) + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) { struct ixgbe_q_vector *q_vector; struct ixgbe_ring *ring; - int node = NUMA_NO_NODE; + int node = -1; +#ifdef HAVE_IRQ_AFFINITY_HINT int cpu = -1; - int ring_count, size; u8 tcs = netdev_get_num_tc(adapter->netdev); +#endif + int ring_count, size; + /* note this will allocate space for the ring structure as well! */ ring_count = txr_count + rxr_count; size = sizeof(struct ixgbe_q_vector) + (sizeof(struct ixgbe_ring) * ring_count); +#ifdef HAVE_IRQ_AFFINITY_HINT /* customize cpu for Flow Director mapping */ - if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) { u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; if (rss_i > 1 && adapter->atr_sample_rate) { if (cpu_online(v_idx)) { @@ -832,6 +922,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, } } +#endif /* allocate q_vector and rings */ q_vector = kzalloc_node(size, GFP_KERNEL, node); if (!q_vector) @@ -840,20 +931,25 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, return -ENOMEM; /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT if (cpu != -1) cpumask_set_cpu(cpu, &q_vector->affinity_mask); +#endif q_vector->numa_node = node; -#ifdef CONFIG_IXGBE_DCA /* initialize CPU for DCA */ q_vector->cpu = -1; -#endif /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); +#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif +#endif -#ifdef CONFIG_NET_RX_BUSY_POLL +#ifdef HAVE_NDO_BUSY_POLL /* initialize busy poll */ atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); @@ -865,6 +961,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize work limits */ q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; /* initialize pointer to rings */ ring = q_vector->ring; @@ -886,7 +983,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, while (txr_count) { /* assign generic ring traits */ - ring->dev = &adapter->pdev->dev; + ring->dev = pci_dev_to_dev(adapter->pdev); ring->netdev = adapter->netdev; /* configure backlink on ring */ @@ -897,11 +994,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; - if (adapter->num_rx_pools > 1) - ring->queue_index = - txr_idx % adapter->num_rx_queues_per_pool; - else - ring->queue_index = txr_idx; + ring->queue_index = txr_idx; /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -916,7 +1009,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, while (rxr_count) { /* assign generic ring traits */ - ring->dev = &adapter->pdev->dev; + ring->dev = pci_dev_to_dev(adapter->pdev); ring->netdev = adapter->netdev; /* configure backlink on ring */ @@ -932,23 +1025,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, if (adapter->hw.mac.type == ixgbe_mac_82599EB) set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); -#ifdef IXGBE_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) { +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; + if ((rxr_idx >= f->offset) && - (rxr_idx < f->offset + f->indices)) + (rxr_idx < f->offset + f->indices)) { set_bit(__IXGBE_RX_FCOE, &ring->state); + } } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; - if (adapter->num_rx_pools > 1) - ring->queue_index = - rxr_idx % adapter->num_rx_queues_per_pool; - else - ring->queue_index = rxr_idx; + ring->queue_index = rxr_idx; /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; @@ -985,13 +1076,10 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) adapter->rx_ring[ring->queue_index] = NULL; adapter->q_vector[v_idx] = NULL; +#ifdef HAVE_NDO_BUSY_POLL napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); - - /* - * ixgbe_get_stats64() might access the rings on this vector, - * we must wait a grace period before freeing it. - */ kfree_rcu(q_vector, rcu); } @@ -1004,21 +1092,16 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) **/ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) { - int q_vectors = adapter->num_q_vectors; - int rxr_remaining = adapter->num_rx_queues; - int txr_remaining = adapter->num_tx_queues; - int rxr_idx = 0, txr_idx = 0, v_idx = 0; + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; int err; - /* only one q_vector if MSI-X is disabled. */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; - if (q_vectors >= (rxr_remaining + txr_remaining)) { for (; rxr_remaining; v_idx++) { err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 0, 0, 1, rxr_idx); - if (err) goto err_out; @@ -1045,7 +1128,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) txr_idx++; } - return 0; + return IXGBE_SUCCESS; err_out: adapter->num_tx_queues = 0; @@ -1078,7 +1161,7 @@ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) ixgbe_free_q_vector(adapter, v_idx); } -static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; @@ -1098,7 +1181,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ -static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { int err; @@ -1127,9 +1210,15 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; + +#ifdef CONFIG_PCI_IOV /* Disable SR-IOV support */ e_dev_warn("Disabling SR-IOV support\n"); ixgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ /* Disable RSS */ e_dev_warn("Disabling RSS support\n"); @@ -1141,6 +1230,9 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; + if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE)) + return; + err = pci_enable_msi(adapter->pdev); if (err) e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", @@ -1169,25 +1261,19 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) /* Set interrupt mode */ ixgbe_set_interrupt_capability(adapter); + /* Allocate memory for queues */ err = ixgbe_alloc_q_vectors(adapter); if (err) { - e_dev_err("Unable to allocate memory for queue vectors\n"); - goto err_alloc_q_vectors; + e_err(probe, "Unable to allocate memory for queue vectors\n"); + ixgbe_reset_interrupt_capability(adapter); + return err; } ixgbe_cache_ring_register(adapter); - e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", - (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", - adapter->num_rx_queues, adapter->num_tx_queues); - set_bit(__IXGBE_DOWN, &adapter->state); - return 0; - -err_alloc_q_vectors: - ixgbe_reset_interrupt_capability(adapter); - return err; + return IXGBE_SUCCESS; } /** @@ -1199,9 +1285,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) **/ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) { - adapter->num_tx_queues = 0; - adapter->num_rx_queues = 0; - ixgbe_free_q_vectors(adapter); ixgbe_reset_interrupt_capability(adapter); } diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c index a5e3b62491e6..613c69250198 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,68 +22,70 @@ *******************************************************************************/ +/****************************************************************************** + Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code +******************************************************************************/ #include #include #include #include #include +#include #include #include -#include #include #include -#include #include #include -#include +#ifdef NETIF_F_TSO #include +#ifdef NETIF_F_TSO6 +#include #include -#include +#endif /* NETIF_F_TSO6 */ +#endif /* NETIF_F_TSO */ +#ifdef SIOCETHTOOL #include -#include -#include -#include +#endif + #include -#include -#include +#include "ixgbe.h" +#ifdef HAVE_UDP_ENC_RX_OFFLOAD #include -#include -#include -#include +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_VXLAN_RX_OFFLOAD +#include +#endif /* HAVE_VXLAN_RX_OFFLOAD */ -#include "ixgbe.h" -#include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" -#include "ixgbe_model.h" +#define DRV_HW_PERF + +#define FPGA + +#define DRIVERIOV + +#define BYPASS_TAG + +#define RELEASE_TAG + +#define DRV_VERSION "5.2.4" \ + DRIVERIOV DRV_HW_PERF FPGA \ + BYPASS_TAG RELEASE_TAG +#define DRV_SUMMARY "Intel(R) 10GbE PCI Express Linux Network Driver" +const char ixgbe_driver_version[] = DRV_VERSION; +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME char ixgbe_driver_name[] = "ixgbe"; -static const char ixgbe_driver_string[] = - "Intel(R) 10 Gigabit PCI Express Network Driver"; -#ifdef IXGBE_FCOE -char ixgbe_default_device_descr[] = - "Intel(R) 10 Gigabit Network Connection"; #else -static char ixgbe_default_device_descr[] = - "Intel(R) 10 Gigabit Network Connection"; +const char ixgbe_driver_name[] = "ixgbe"; #endif -#define DRV_VERSION "4.4.0-k" -const char ixgbe_driver_version[] = DRV_VERSION; -static const char ixgbe_copyright[] = - "Copyright (c) 1999-2016 Intel Corporation."; - -static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; - -static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_82598] = &ixgbe_82598_info, - [board_82599] = &ixgbe_82599_info, - [board_X540] = &ixgbe_X540_info, - [board_X550] = &ixgbe_X550_info, - [board_X550EM_x] = &ixgbe_X550EM_x_info, - [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, - [board_x550em_a] = &ixgbe_x550em_a_info, - [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, -}; +static const char ixgbe_driver_string[] = DRV_SUMMARY; +static const char ixgbe_copyright[] = "Copyright(c) 1999 - 2017 Intel Corporation."; +static const char ixgbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated. " + "Restart the computer. If the problem persists, " + "power off the system and replace the adapter"; /* ixgbe_pci_tbl - PCI Device ID Table * @@ -98,122 +96,119 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { * Class, Class Mask, private data (not used) } */ static const struct pci_device_id ixgbe_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_QSFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_QSFP_N), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), 0}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), 0}, /* required last entry */ - {0, } + { .device = 0 } }; MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, void *p); static struct notifier_block dca_notifier = { - .notifier_call = ixgbe_notify_dca, - .next = NULL, - .priority = 0 + .notifier_call = ixgbe_notify_dca, + .next = NULL, + .priority = 0 }; -#endif - -#ifdef CONFIG_PCI_IOV -static unsigned int max_vfs; -module_param(max_vfs, uint, 0); -MODULE_PARM_DESC(max_vfs, - "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); -#endif /* CONFIG_PCI_IOV */ - -static unsigned int allow_unsupported_sfp; -module_param(allow_unsupported_sfp, uint, 0); -MODULE_PARM_DESC(allow_unsupported_sfp, - "Allow unsupported and untested SFP+ modules on 82599-based adapters"); - -#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) -static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +#endif /* CONFIG_DCA */ +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); +MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + static struct workqueue_struct *ixgbe_wq; +static bool ixgbe_is_sfp(struct ixgbe_hw *hw); static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); -static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); -static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, +static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_hw *hw, u32 reg, u16 *value) { + struct ixgbe_adapter *adapter = hw->back; struct pci_dev *parent_dev; struct pci_bus *parent_bus; + int pos; parent_bus = adapter->pdev->bus->parent; if (!parent_bus) - return -1; + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; parent_dev = parent_bus->self; if (!parent_dev) - return -1; + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; - if (!pci_is_pcie(parent_dev)) - return -1; + pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP); + if (!pos) + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; - pcie_capability_read_word(parent_dev, reg, value); + pci_read_config_word(parent_dev, pos + reg, value); if (*value == IXGBE_FAILED_READ_CFG_WORD && - ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) - return -1; - return 0; + ixgbe_check_cfg_remove(hw, parent_dev)) + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + return IXGBE_SUCCESS; } -static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) +/** + * ixgbe_get_parent_bus_info - Set PCI bus info beyond switch + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + * when the device is behind a switch. + **/ +static s32 ixgbe_get_parent_bus_info(struct ixgbe_hw *hw) { - struct ixgbe_hw *hw = &adapter->hw; u16 link_status = 0; int err; @@ -222,32 +217,30 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) /* Get the negotiated link width and speed from PCI config space of the * parent, as this device is behind a switch */ - err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); + err = ixgbe_read_pci_cfg_word_parent(hw, 18, &link_status); - /* assume caller will handle error case */ + /* If the read fails, fallback to default */ if (err) - return err; + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); - hw->bus.width = ixgbe_convert_bus_width(link_status); - hw->bus.speed = ixgbe_convert_bus_speed(link_status); + ixgbe_set_pci_config_data_generic(hw, link_status); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * ixgbe_check_from_parent - determine whether to use parent for PCIe info * @hw: hw specific details * - * This function is used by probe to determine whether a device's PCI-Express - * bandwidth details should be gathered from the parent bus instead of from the - * device. Used to ensure that various locations all have the correct device ID - * checks. + * This function is used by probe to determine whether a device's PCIe info + * (speed, width, etc) should be obtained from the parent bus or directly. This + * is useful for specialized device configurations containing PCIe bridges. */ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) { switch (hw->device_id) { - case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599_QSFP_SF_QP: + case IXGBE_DEV_ID_82599_SFP_SF_QP: return true; default: return false; @@ -292,7 +285,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, max_gts = 4 * width; break; case PCIE_SPEED_8_0GT: - /* 128b/130b encoding reduces throughput by less than 2% */ + /* 128b/130b encoding has less than 2% impact on throughput */ max_gts = 8 * width; break; default: @@ -307,7 +300,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : "Unknown"), - width, + hw->bus.width, (speed == PCIE_SPEED_2_5GT ? "20%" : speed == PCIE_SPEED_5_0GT ? "20%" : speed == PCIE_SPEED_8_0GT ? "<2%" : @@ -321,14 +314,68 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, } } +/** + * ixgbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + /* Some cards can not use the generic count PCIe functions method, + * because they are behind a parent switch, so we hardcode these to + * correct number of ports. + */ + if (ixgbe_pcie_from_parent(&adapter->hw)) { + physfns = 4; + } else { + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { +#ifdef CONFIG_PCI_IOV + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; +#endif + + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; + + physfns++; + } + } + + return physfns; +} + static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) { if (!test_bit(__IXGBE_DOWN, &adapter->state) && - !test_bit(__IXGBE_REMOVING, &adapter->state) && + !test_bit(__IXGBE_REMOVE, &adapter->state) && !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) queue_work(ixgbe_wq, &adapter->service_task); } +static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) +{ + BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_atomic(); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); +} + static void ixgbe_remove_adapter(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; @@ -355,30 +402,44 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) ixgbe_remove_adapter(hw); return; } - value = ixgbe_read_reg(hw, IXGBE_STATUS); + value = IXGBE_READ_REG(hw, IXGBE_STATUS); if (value == IXGBE_FAILED_READ_REG) ixgbe_remove_adapter(hw); } -/** - * ixgbe_read_reg - Read from device register - * @hw: hw specific details - * @reg: offset of register to read - * - * Returns : value read or IXGBE_FAILED_READ_REG if removed - * - * This function is used to read device registers. It checks for device - * removal by confirming any read that returns all ones by checking the - * status register value for all ones. This function avoids reading from - * the hardware if a removal was previously detected in which case it - * returns IXGBE_FAILED_READ_REG (all ones). - */ -u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) +static u32 +ixgbe_validate_register_read(struct ixgbe_hw *_hw, u32 reg, bool quiet) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct ixgbe_adapter *adapter = _hw->back; + + reg_addr = ACCESS_ONCE(_hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return IXGBE_FAILED_READ_REG; + for (i = 0; i < IXGBE_DEAD_READ_RETRIES; ++i) { + value = readl(reg_addr + reg); + if (value != IXGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == IXGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg, bool quiet) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); u32 value; + u8 __iomem *reg_addr; - if (ixgbe_removed(reg_addr)) + reg_addr = ACCESS_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) return IXGBE_FAILED_READ_REG; if (unlikely(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { @@ -404,452 +465,11 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) ixgbe_check_remove(hw, reg); + if (unlikely(value == IXGBE_DEAD_READ_REG)) + value = ixgbe_validate_register_read(hw, reg, quiet); return value; } -static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) -{ - u16 value; - - pci_read_config_word(pdev, PCI_VENDOR_ID, &value); - if (value == IXGBE_FAILED_READ_CFG_WORD) { - ixgbe_remove_adapter(hw); - return true; - } - return false; -} - -u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) -{ - struct ixgbe_adapter *adapter = hw->back; - u16 value; - - if (ixgbe_removed(hw->hw_addr)) - return IXGBE_FAILED_READ_CFG_WORD; - pci_read_config_word(adapter->pdev, reg, &value); - if (value == IXGBE_FAILED_READ_CFG_WORD && - ixgbe_check_cfg_remove(hw, adapter->pdev)) - return IXGBE_FAILED_READ_CFG_WORD; - return value; -} - -#ifdef CONFIG_PCI_IOV -static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) -{ - struct ixgbe_adapter *adapter = hw->back; - u32 value; - - if (ixgbe_removed(hw->hw_addr)) - return IXGBE_FAILED_READ_CFG_DWORD; - pci_read_config_dword(adapter->pdev, reg, &value); - if (value == IXGBE_FAILED_READ_CFG_DWORD && - ixgbe_check_cfg_remove(hw, adapter->pdev)) - return IXGBE_FAILED_READ_CFG_DWORD; - return value; -} -#endif /* CONFIG_PCI_IOV */ - -void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) -{ - struct ixgbe_adapter *adapter = hw->back; - - if (ixgbe_removed(hw->hw_addr)) - return; - pci_write_config_word(adapter->pdev, reg, value); -} - -static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) -{ - BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); - - /* flush memory to make sure state is correct before next watchdog */ - smp_mb__before_atomic(); - clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); -} - -struct ixgbe_reg_info { - u32 ofs; - char *name; -}; - -static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { - - /* General Registers */ - {IXGBE_CTRL, "CTRL"}, - {IXGBE_STATUS, "STATUS"}, - {IXGBE_CTRL_EXT, "CTRL_EXT"}, - - /* Interrupt Registers */ - {IXGBE_EICR, "EICR"}, - - /* RX Registers */ - {IXGBE_SRRCTL(0), "SRRCTL"}, - {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, - {IXGBE_RDLEN(0), "RDLEN"}, - {IXGBE_RDH(0), "RDH"}, - {IXGBE_RDT(0), "RDT"}, - {IXGBE_RXDCTL(0), "RXDCTL"}, - {IXGBE_RDBAL(0), "RDBAL"}, - {IXGBE_RDBAH(0), "RDBAH"}, - - /* TX Registers */ - {IXGBE_TDBAL(0), "TDBAL"}, - {IXGBE_TDBAH(0), "TDBAH"}, - {IXGBE_TDLEN(0), "TDLEN"}, - {IXGBE_TDH(0), "TDH"}, - {IXGBE_TDT(0), "TDT"}, - {IXGBE_TXDCTL(0), "TXDCTL"}, - - /* List Terminator */ - { .name = NULL } -}; - - -/* - * ixgbe_regdump - register printout routine - */ -static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) -{ - int i = 0, j = 0; - char rname[16]; - u32 regs[64]; - - switch (reginfo->ofs) { - case IXGBE_SRRCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); - break; - case IXGBE_DCA_RXCTRL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - break; - case IXGBE_RDLEN(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); - break; - case IXGBE_RDH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); - break; - case IXGBE_RDT(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); - break; - case IXGBE_RXDCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); - break; - case IXGBE_RDBAL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); - break; - case IXGBE_RDBAH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); - break; - case IXGBE_TDBAL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); - break; - case IXGBE_TDBAH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); - break; - case IXGBE_TDLEN(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); - break; - case IXGBE_TDH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); - break; - case IXGBE_TDT(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); - break; - case IXGBE_TXDCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); - break; - default: - pr_info("%-15s %08x\n", reginfo->name, - IXGBE_READ_REG(hw, reginfo->ofs)); - return; - } - - for (i = 0; i < 8; i++) { - snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); - pr_err("%-15s", rname); - for (j = 0; j < 8; j++) - pr_cont(" %08x", regs[i*8+j]); - pr_cont("\n"); - } - -} - -/* - * ixgbe_dump - Print registers, tx-rings and rx-rings - */ -static void ixgbe_dump(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_reg_info *reginfo; - int n = 0; - struct ixgbe_ring *tx_ring; - struct ixgbe_tx_buffer *tx_buffer; - union ixgbe_adv_tx_desc *tx_desc; - struct my_u0 { u64 a; u64 b; } *u0; - struct ixgbe_ring *rx_ring; - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer_info; - u32 staterr; - int i = 0; - - if (!netif_msg_hw(adapter)) - return; - - /* Print netdevice Info */ - if (netdev) { - dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state " - "trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", - netdev->name, - netdev->state, - dev_trans_start(netdev), - netdev->last_rx); - } - - /* Print Registers */ - dev_info(&adapter->pdev->dev, "Register Dump\n"); - pr_info(" Register Name Value\n"); - for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; - reginfo->name; reginfo++) { - ixgbe_regdump(hw, reginfo); - } - - /* Print TX Ring Summary */ - if (!netdev || !netif_running(netdev)) - return; - - dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - pr_info(" %s %s %s %s\n", - "Queue [NTU] [NTC] [bi(ntc)->dma ]", - "leng", "ntw", "timestamp"); - for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - tx_buffer->next_to_watch, - (u64)tx_buffer->time_stamp); - } - - /* Print TX Rings */ - if (!netif_msg_tx_done(adapter)) - goto rx_ring_summary; - - dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); - - /* Transmit Descriptor Formats - * - * 82598 Advanced Transmit Descriptor - * +--------------------------------------------------------------+ - * 0 | Buffer Address [63:0] | - * +--------------------------------------------------------------+ - * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | - * +--------------------------------------------------------------+ - * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 - * - * 82598 Advanced Transmit Descriptor (Write-Back Format) - * +--------------------------------------------------------------+ - * 0 | RSV [63:0] | - * +--------------------------------------------------------------+ - * 8 | RSV | STA | NXTSEQ | - * +--------------------------------------------------------------+ - * 63 36 35 32 31 0 - * - * 82599+ Advanced Transmit Descriptor - * +--------------------------------------------------------------+ - * 0 | Buffer Address [63:0] | - * +--------------------------------------------------------------+ - * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | - * +--------------------------------------------------------------+ - * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 - * - * 82599+ Advanced Transmit Descriptor (Write-Back Format) - * +--------------------------------------------------------------+ - * 0 | RSV [63:0] | - * +--------------------------------------------------------------+ - * 8 | RSV | STA | RSV | - * +--------------------------------------------------------------+ - * 63 36 35 32 31 0 - */ - - for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("%s%s %s %s %s %s\n", - "T [desc] [address 63:0 ] ", - "[PlPOIdStDDt Ln] [bi->dma ] ", - "leng", "ntw", "timestamp", "bi->skb"); - - for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - tx_desc = IXGBE_TX_DESC(tx_ring, i); - tx_buffer = &tx_ring->tx_buffer_info[i]; - u0 = (struct my_u0 *)tx_desc; - if (dma_unmap_len(tx_buffer, len) > 0) { - pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", - i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - tx_buffer->next_to_watch, - (u64)tx_buffer->time_stamp, - tx_buffer->skb); - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - pr_cont(" NTC/U\n"); - else if (i == tx_ring->next_to_use) - pr_cont(" NTU\n"); - else if (i == tx_ring->next_to_clean) - pr_cont(" NTC\n"); - else - pr_cont("\n"); - - if (netif_msg_pktdata(adapter) && - tx_buffer->skb) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - tx_buffer->skb->data, - dma_unmap_len(tx_buffer, len), - true); - } - } - } - - /* Print RX Rings Summary */ -rx_ring_summary: - dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - pr_info("Queue [NTU] [NTC]\n"); - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - pr_info("%5d %5X %5X\n", - n, rx_ring->next_to_use, rx_ring->next_to_clean); - } - - /* Print RX Rings */ - if (!netif_msg_rx_status(adapter)) - return; - - dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); - - /* Receive Descriptor Formats - * - * 82598 Advanced Receive Descriptor (Read) Format - * 63 1 0 - * +-----------------------------------------------------+ - * 0 | Packet Buffer Address [63:1] |A0/NSE| - * +----------------------------------------------+------+ - * 8 | Header Buffer Address [63:1] | DD | - * +-----------------------------------------------------+ - * - * - * 82598 Advanced Receive Descriptor (Write-Back) Format - * - * 63 48 47 32 31 30 21 20 16 15 4 3 0 - * +------------------------------------------------------+ - * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | - * | Packet | IP | | | | Type | Type | - * | Checksum | Ident | | | | | | - * +------------------------------------------------------+ - * 8 | VLAN Tag | Length | Extended Error | Extended Status | - * +------------------------------------------------------+ - * 63 48 47 32 31 20 19 0 - * - * 82599+ Advanced Receive Descriptor (Read) Format - * 63 1 0 - * +-----------------------------------------------------+ - * 0 | Packet Buffer Address [63:1] |A0/NSE| - * +----------------------------------------------+------+ - * 8 | Header Buffer Address [63:1] | DD | - * +-----------------------------------------------------+ - * - * - * 82599+ Advanced Receive Descriptor (Write-Back) Format - * - * 63 48 47 32 31 30 21 20 17 16 4 3 0 - * +------------------------------------------------------+ - * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | - * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | - * |/ Flow Dir Flt ID | | | | | | - * +------------------------------------------------------+ - * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | - * +------------------------------------------------------+ - * 63 48 47 32 31 20 19 0 - */ - - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("%s%s%s", - "R [desc] [ PktBuf A0] ", - "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", - "<-- Adv Rx Read format\n"); - pr_info("%s%s%s", - "RWB[desc] [PcsmIpSHl PtRs] ", - "[vl er S cks ln] ---------------- [bi->skb ] ", - "<-- Adv Rx Write-Back format\n"); - - for (i = 0; i < rx_ring->count; i++) { - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - rx_desc = IXGBE_RX_DESC(rx_ring, i); - u0 = (struct my_u0 *)rx_desc; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - if (staterr & IXGBE_RXD_STAT_DD) { - /* Descriptor Done */ - pr_info("RWB[0x%03X] %016llX " - "%016llX ---------------- %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - rx_buffer_info->skb); - } else { - pr_info("R [0x%03X] %016llX " - "%016llX %016llX %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)rx_buffer_info->dma, - rx_buffer_info->skb); - - if (netif_msg_pktdata(adapter) && - rx_buffer_info->dma) { - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - page_address(rx_buffer_info->page) + - rx_buffer_info->page_offset, - ixgbe_rx_bufsz(rx_ring), true); - } - } - - if (i == rx_ring->next_to_use) - pr_cont(" NTU\n"); - else if (i == rx_ring->next_to_clean) - pr_cont(" NTC\n"); - else - pr_cont("\n"); - - } - } -} - static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) { u32 ctrl_ext; @@ -870,7 +490,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); } -/** +/* * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes @@ -898,7 +518,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -937,7 +557,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -967,7 +587,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); - /* tx_buffer must be completely set up in the transmit path */ + /* tx_buffer_info must be completely set up in the transmit path */ } static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) @@ -1004,13 +624,14 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u32 xoff[8] = {0}; - u8 tc; int i; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; +#ifdef HAVE_DCBNL_IEEE if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); +#endif if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { ixgbe_update_xoff_rx_lfc(adapter); return; @@ -1018,27 +639,22 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) /* update stats for each tc, only valid with PFC enabled */ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { - u32 pxoffrxc; - switch (hw->mac.type) { case ixgbe_mac_82598EB: - pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); break; default: - pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); } - hwstats->pxoffrxc[i] += pxoffrxc; - /* Get the TC for given UP */ - tc = netdev_get_prio_tc_map(adapter->netdev, i); - xoff[tc] += pxoffrxc; + hwstats->pxoffrxc[i] += xoff[i]; } /* disarm tx queues that have received xoff frames */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + u8 tc = tx_ring->dcb_tc; - tc = tx_ring->dcb_tc; - if (xoff[tc]) + if ((tc <= 7) && (xoff[tc])) clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } } @@ -1050,31 +666,21 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) { - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; - u32 head, tail; - - if (ring->l2_accel_priv) - adapter = ring->l2_accel_priv->real_adapter; - else - adapter = netdev_priv(ring->netdev); - - hw = &adapter->hw; - head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); - tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + struct ixgbe_adapter *adapter = ring->q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; - if (head != tail) - return (head < tail) ? - tail - head : (tail + ring->count - head); + u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); + u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); - return 0; + return ((head <= tail) ? tail : tail + ring->count) - head; } -static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) { u32 tx_done = ixgbe_get_tx_completed(tx_ring); u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = ixgbe_get_tx_pending(tx_ring); + bool ret = false; clear_check_for_tx_hang(tx_ring); @@ -1086,20 +692,22 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) * bit is cleared if a pause frame is received to remove * false hang detection due to PFC or 802.3x frames. By * requiring this to fail twice we avoid races with - * pfc clearing the ARMED bit and conditions where we + * PFC clearing the ARMED bit and conditions where we * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if (tx_done_old == tx_done && tx_pending) + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ - return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, - &tx_ring->state); - /* update completed stats and continue */ - tx_ring->tx_stats.tx_done_old = tx_done; - /* reset the countdown */ - clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } else { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } - return false; + return ret; } /** @@ -1112,49 +720,46 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - e_warn(drv, "initiating reset due to tx timeout\n"); ixgbe_service_event_schedule(adapter); } } /** - * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate + * ixgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure **/ -static int ixgbe_tx_maxrate(struct net_device *netdev, - int queue_index, u32 maxrate) +static void ixgbe_tx_timeout(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 bcnrc_val = ixgbe_link_mbps(adapter); - - if (!maxrate) - return 0; - - /* Calculate the rate factor values to set */ - bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; - bcnrc_val /= maxrate; +struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool real_tx_hang = false; + int i; - /* clear everything but the rate factor */ - bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | - IXGBE_RTTBCNRC_RF_DEC_MASK; - - /* enable the rate scheduler */ - bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) + real_tx_hang = true; + } - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + if (real_tx_hang) { + ixgbe_tx_timeout_reset(adapter); + } else { + e_info(drv, "Fake Tx hang detected with timeout of %d " + "seconds\n", netdev->watchdog_timeo/HZ); - return 0; + /* fake Tx hang - increase the kernel timeout */ + if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) + netdev->watchdog_timeo *= 2; + } } /** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean - * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring, int napi_budget) + struct ixgbe_ring *tx_ring) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_tx_buffer *tx_buffer; @@ -1178,7 +783,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - smp_rmb(); + read_barrier_depends(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) @@ -1192,7 +797,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - napi_consume_skb(tx_buffer->skb, napi_budget); + dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1258,23 +863,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "tx_buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", + " next_to_clean <%x>\n", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), - tx_ring->next_to_use, i, + tx_ring->next_to_use, i); + e_err(drv, "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", tx_ring->tx_buffer_info[i].time_stamp, jiffies); - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + netif_stop_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); e_info(probe, "tx hang %d detected on queue %d, resetting adapter\n", - adapter->tx_timeout_count + 1, tx_ring->queue_index); + adapter->tx_timeout_count + 1, tx_ring->queue_index); - /* schedule immediate reset if we believe we hung */ ixgbe_tx_timeout_reset(adapter); /* the adapter is about to reset, no point in enabling stuff */ @@ -1285,25 +890,33 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets, total_bytes); #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + if (unlikely(total_packets && netif_carrier_ok(netdev_ring(tx_ring)) && (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, - tx_ring->queue_index) - && !test_bit(__IXGBE_DOWN, &adapter->state)) { - netif_wake_subqueue(tx_ring->netdev, - tx_ring->queue_index); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(netdev_ring(tx_ring), + ring_queue_index(tx_ring)) + && !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) { + netif_wake_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); + ++tx_ring->tx_stats.restart_queue; + } +#else + if (netif_queue_stopped(netdev_ring(tx_ring)) && + !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) { + netif_wake_queue(netdev_ring(tx_ring)); ++tx_ring->tx_stats.restart_queue; } +#endif } return !!budget; } -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, int cpu) @@ -1395,7 +1008,7 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { - int i; + int v_idx; /* always use CB2 mode, difference is masked in the CB driver */ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) @@ -1405,9 +1018,9 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_DISABLE); - for (i = 0; i < adapter->num_q_vectors; i++) { - adapter->q_vector[i]->cpu = -1; - ixgbe_update_dca(adapter->q_vector[i]); + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + adapter->q_vector[v_idx]->cpu = -1; + ixgbe_update_dca(adapter->q_vector[v_idx]); } } @@ -1424,41 +1037,41 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) /* if we're already enabled, don't do it again */ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) break; - if (dca_add_requester(dev) == 0) { + if (dca_add_requester(dev) == IXGBE_SUCCESS) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, - IXGBE_DCA_CTRL_DCA_MODE_CB2); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); break; } - /* Fall Through since DCA is disabled. */ + /* fall through - DCA is disabled */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, - IXGBE_DCA_CTRL_DCA_DISABLE); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); } break; } - return 0; + return IXGBE_SUCCESS; } +#endif /* CONFIG_DCA */ -#endif /* CONFIG_IXGBE_DCA */ - +#ifdef NETIF_F_RXHASH #define IXGBE_RSS_L4_TYPES_MASK \ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ - (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX)) static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - u16 rss_type; + u16 rss_type; - if (!(ring->netdev->features & NETIF_F_RXHASH)) + if (!(netdev_ring(ring)->features & NETIF_F_RXHASH)) return; rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & @@ -1471,8 +1084,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } +#endif /* NETIF_F_RXHASH */ -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /** * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type * @ring: structure containing ring specific data @@ -1490,8 +1104,8 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ /** * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containing ring specific data @@ -1508,13 +1122,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, skb_checksum_none_assert(skb); /* Rx csum disabled */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) + if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) return; - /* check for VXLAN and Geneve packets */ + /* check for VXLAN or Geneve packet type */ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { encap_pkt = true; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) skb->encapsulation = 1; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + skb->ip_summed = CHECKSUM_NONE; } /* if IP and error */ @@ -1528,6 +1145,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { + /* * 82599 errata, UDP frames with a 0 checksum can be marked as * checksum errors. @@ -1545,14 +1163,81 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, if (encap_pkt) { if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) return; - + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { skb->ip_summed = CHECKSUM_NONE; return; } +#ifdef HAVE_SKBUFF_CSUM_LEVEL /* If we checked the outer header let the stack know */ skb->csum_level = 1; +#endif /* HAVE_SKBUFF_CSUM_LEVEL */ + } +} + +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; +#endif + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->skb = skb; + + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; } + + bi->dma = dma; + return true; +} + +#else /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ +static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; } static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, @@ -1566,15 +1251,18 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, return true; /* alloc new page for storage */ - page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); + page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, + ixgbe_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; return false; } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); /* * if mapping failed free memory back to system since @@ -1589,11 +1277,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = ixgbe_rx_offset(rx_ring); + bi->pagecnt_bias = 1; return true; } +#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ /** * ixgbe_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on @@ -1604,6 +1294,9 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + u16 bufsz; +#endif /* nothing to do */ if (!cleaned_count) @@ -1612,16 +1305,34 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) rx_desc = IXGBE_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + + bufsz = ixgbe_rx_bufsz(rx_ring); +#endif do { +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + if (!ixgbe_alloc_mapped_skb(rx_ring, bi)) + break; +#else if (!ixgbe_alloc_mapped_page(rx_ring, bi)) break; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); +#endif + /* * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +#else rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); +#endif rx_desc++; bi++; @@ -1632,34 +1343,129 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i -= rx_ring->count; } - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); i += rx_ring->count; - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; + if (rx_ring->next_to_use != i) + ixgbe_release_rx_desc(rx_ring, i); +} + +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +/** + * ixgbe_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *ixgbe_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = IXGBE_CB(tail)->head; - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = i; + if (!head) + return tail; - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(i, rx_ring->tail); + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->truesize; + + IXGBE_CB(tail)->head = NULL; + + return head; +} + +/** + * ixgbe_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via ixgbe_merge_active_tail. + **/ +static inline void ixgbe_add_active_tail(struct sk_buff *head, + struct sk_buff *tail) +{ + struct sk_buff *old_tail = IXGBE_CB(head)->tail; + + if (old_tail) { + ixgbe_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + IXGBE_CB(tail)->head = head; + IXGBE_CB(head)->tail = tail; +} + +/** + * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool ixgbe_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = IXGBE_CB(head)->tail; + + if (!tail) + return false; + + ixgbe_merge_active_tail(tail); + + IXGBE_CB(head)->tail = NULL; + + return true; +} + +#endif +#ifdef HAVE_VLAN_RX_REGISTER +/** + * ixgbe_receive_skb - Send a completed packet up the stack + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + u16 vlan_tag = IXGBE_CB(skb)->vid; + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + if (vlan_tag & VLAN_VID_MASK) { + /* by placing vlgrp at start of structure we can alias it */ + struct vlan_group **vlgrp = netdev_priv(skb->dev); + if (!*vlgrp) + dev_kfree_skb_any(skb); + else if (q_vector->netpoll_rx) + vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); + else + vlan_gro_receive(&q_vector->napi, + *vlgrp, vlan_tag, skb); + } else { +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ + if (q_vector->netpoll_rx) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) } +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ } -static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, +#endif /* HAVE_VLAN_RX_REGISTER */ +#ifdef NETIF_F_GSO +static void ixgbe_set_rsc_gso_size(struct ixgbe_ring __maybe_unused *ring, struct sk_buff *skb) { - u16 hdr_len = skb_headlen(skb); + u16 hdr_len = eth_get_headlen(skb->data, skb_headlen(skb)); /* set gso_size to avoid messing up TCP MSS */ skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), @@ -1667,6 +1473,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } +#endif /* NETIF_F_GSO */ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { @@ -1677,12 +1484,35 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; rx_ring->rx_stats.rsc_flush++; +#ifdef NETIF_F_GSO ixgbe_set_rsc_gso_size(rx_ring, skb); +#endif /* gso_size is computed using append_cnt so always clear it last */ IXGBE_CB(skb)->append_cnt = 0; } +static void ixgbe_rx_vlan(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_CTAG_RX) && +#else + if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_RX) && +#endif + ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) +#ifndef HAVE_VLAN_RX_REGISTER + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + le16_to_cpu(rx_desc->wb.upper.vlan)); +#else + IXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); + else + IXGBE_CB(skb)->vid = 0; +#endif +} + /** * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on @@ -1697,37 +1527,53 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct net_device *dev = rx_ring->netdev; +#ifdef HAVE_PTP_1588_CLOCK u32 flags = rx_ring->q_vector->adapter->flags; +#endif ixgbe_update_rsc_stats(rx_ring, skb); +#ifdef NETIF_F_RXHASH ixgbe_rx_hash(rx_ring, rx_desc, skb); +#endif /* NETIF_F_RXHASH */ ixgbe_rx_checksum(rx_ring, rx_desc, skb); - +#ifdef HAVE_PTP_1588_CLOCK if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); - if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && - ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { - u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); - } +#endif + ixgbe_rx_vlan(rx_ring, rx_desc, skb); - skb_record_rx_queue(skb, rx_ring->queue_index); + skb_record_rx_queue(skb, ring_queue_index(rx_ring)); - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring)); } static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { +#ifdef HAVE_NDO_BUSY_POLL skb_mark_napi_id(skb, &q_vector->napi); - if (ixgbe_qv_busy_polling(q_vector)) + + if (ixgbe_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { netif_receive_skb(skb); - else + /* exit early if we busy polled */ + return; + } +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + ixgbe_receive_skb(q_vector, skb); +#else napi_gro_receive(&q_vector->napi, skb); +#endif +#ifndef NETIF_F_GRO + + netdev_ring(rx_ring)->last_rx = jiffies; +#endif } /** @@ -1745,6 +1591,9 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + struct sk_buff *next_skb; +#endif u32 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ @@ -1776,15 +1625,22 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, return false; /* place skb in next buffer to be received */ +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + next_skb = rx_ring->rx_buffer_info[ntc].skb; + + ixgbe_add_active_tail(skb, next_skb); + IXGBE_CB(next_skb)->head = skb; +#else rx_ring->rx_buffer_info[ntc].skb = skb; +#endif rx_ring->rx_stats.non_eop_descs++; return true; } +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT /** * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being adjusted * * This function is an ixgbe specific version of __pskb_pull_tail. The @@ -1794,8 +1650,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, * As a result we can do things like drop a frag and maintain an accurate * truesize for the skb. */ -static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, - struct sk_buff *skb) +static void ixgbe_pull_tail(struct sk_buff *skb) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; @@ -1839,19 +1694,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, { /* if the page was released unmap it, else just sync our portion */ if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, frag->page_offset, - ixgbe_rx_bufsz(rx_ring), + skb_frag_size(frag), DMA_FROM_DEVICE); } - IXGBE_CB(skb)->dma = 0; } /** @@ -1872,30 +1727,27 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, * * Returns true if an error was encountered and skb was freed. **/ -static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, +static bool ixgbe_cleanup_headers(struct ixgbe_ring __maybe_unused *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct net_device *netdev = rx_ring->netdev; - /* verify that the packet does not have any known errors */ if (unlikely(ixgbe_test_staterr(rx_desc, - IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && - !(netdev->features & NETIF_F_RXALL))) { + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { dev_kfree_skb_any(skb); return true; } /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ixgbe_pull_tail(rx_ring, skb); + if (!skb_headlen(skb)) + ixgbe_pull_tail(skb); -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* do not attempt to pad FCoE Frames as this will disrupt DDP */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) return false; - #endif + /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -1922,14 +1774,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - /* transfer page from old buffer to new buffer */ - *new_buff = *old_buff; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } static inline bool ixgbe_page_is_reserved(struct page *page) @@ -1937,6 +1789,57 @@ static inline bool ixgbe_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } +static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(ixgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) +#else + if (unlikely((page_count(page) - pagecnt_bias) > 1)) +#endif + return false; +#else + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define IXGBE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) + if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) + return false; +#endif + +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } +#else + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + if (likely(!pagecnt_bias)) { + page_ref_inc(page); + rx_buffer->pagecnt_bias = 1; + } +#endif + + return true; +} + /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -1952,150 +1855,182 @@ static inline bool ixgbe_page_is_reserved(struct page *page) * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. **/ -static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, +static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbe_rx_bufsz(rx_ring); + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - - ixgbe_rx_bufsz(rx_ring); + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(size); #endif - if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!ixgbe_page_is_reserved(page))) - return true; - - /* this page cannot be reused so discard it */ - __free_pages(page, ixgbe_rx_pg_order(rx_ring)); - return false; - } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); - /* avoid re-using remote pages */ - if (unlikely(ixgbe_page_is_reserved(page))) - return false; - #if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; - - /* flip page offset to other buffer */ rx_buffer->page_offset ^= truesize; #else - /* move offset up to the next cache line */ rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > last_offset) - return false; #endif - - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - page_ref_inc(page); - - return true; } -static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc) +static struct ixgbe_rx_buffer * +ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff **skb, + const unsigned int size) { struct ixgbe_rx_buffer *rx_buffer; - struct sk_buff *skb; - struct page *page; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; - skb = rx_buffer->skb; + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + ixgbe_dma_sync_frag(rx_ring, *skb); + } - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif + return rx_buffer; +} - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, - IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - return NULL; +static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (ixgbe_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); } + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - goto dma_sync; +static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + struct sk_buff *skb; - IXGBE_CB(skb)->dma = rx_buffer->dma; - } else { - if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) - ixgbe_dma_sync_frag(rx_ring, skb); + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif -dma_sync: - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; - rx_buffer->skb = NULL; - } + if (size > IXGBE_RX_HDR_SIZE) { + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + IXGBE_CB(skb)->dma = rx_buffer->dma; - /* pull page into skb */ - if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { - /* hand second half of page back to the ring */ - ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset, + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; } - /* clear contents of buffer_info */ - rx_buffer->page = NULL; - return skb; } -/** - * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(va - IXGBE_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, IXGBE_SKB_PAD); + __skb_put(skb, size); + + /* record DMA address if this is the start of a chain of buffers */ + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + IXGBE_CB(skb)->dma = rx_buffer->dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ +/** + * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -2105,23 +2040,24 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the syste. * - * Returns amount of work completed + * Returns amount of work completed. **/ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, - const int budget) + int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; -#ifdef IXGBE_FCOE - struct ixgbe_adapter *adapter = q_vector->adapter; +#if IS_ENABLED(CONFIG_FCOE) int ddp_bytes; unsigned int mss = 0; -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { @@ -2130,8 +2066,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); - - if (!rx_desc->wb.upper.status_error) + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) break; /* This memory barrier is needed to keep us from reading @@ -2140,13 +2076,28 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ dma_rmb(); + rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + /* retrieve a buffer from the ring */ - skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); + if (skb) + ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + else if (ring_uses_build_skb(rx_ring)) + skb = ixgbe_build_skb(rx_ring, rx_buffer, + rx_desc, size); +#endif + else + skb = ixgbe_construct_skb(rx_ring, rx_buffer, rx_desc, + size); /* exit if we failed to retrieve a buffer */ - if (!skb) + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; break; + } + ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); cleaned_count++; /* place incomplete frames back on ring for completion */ @@ -2163,14 +2114,15 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* populate checksum, timestamp, VLAN, and protocol */ ixgbe_process_skb_fields(rx_ring, rx_desc, skb); -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { - ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, + rx_desc, skb); /* include DDPed FCoE data */ if (ddp_bytes > 0) { if (!mss) { - mss = rx_ring->netdev->mtu - + mss = netdev_ring(rx_ring)->mtu - sizeof(struct fcoe_hdr) - sizeof(struct fc_frame_header) - sizeof(struct fcoe_crc_eof); @@ -2183,12 +2135,15 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (!ddp_bytes) { dev_kfree_skb_any(skb); +#ifndef NETIF_F_GRO + netdev_ring(rx_ring)->last_rx = jiffies; +#endif continue; } } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ - ixgbe_rx_skb(q_vector, skb); + ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); /* update budget accounting */ total_rx_packets++; @@ -2204,9 +2159,165 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, return total_rx_packets; } -#ifdef CONFIG_NET_RX_BUSY_POLL +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ +/** + * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a legacy approach to Rx interrupt + * handling. This version will perform better on systems with a low cost + * dma mapping API. + * + * Returns amount of work completed. + **/ +static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#if IS_ENABLED(CONFIG_FCOE) + int ddp_bytes; + unsigned int mss = 0; +#endif /* CONFIG_FCOE */ + u16 len = 0; + u16 cleaned_count = ixgbe_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + struct ixgbe_rx_buffer *rx_buffer; + union ixgbe_adv_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + ntc = rx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC(rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; + + if (!rx_desc->wb.upper.length) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + len = le16_to_cpu(rx_desc->wb.upper.length); + /* pull the header of the skb in */ + __skb_put(skb, len); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header after + * the writeback. Only unmap it when EOP is reached + */ + if (!IXGBE_CB(skb)->head) { + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + skb = ixgbe_merge_active_tail(skb); + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + } + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + cleaned_count++; + + if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + dma_unmap_single(rx_ring->dev, + IXGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + IXGBE_CB(skb)->dma = 0; + + if (ixgbe_close_active_frag_list(skb) && + !IXGBE_CB(skb)->append_cnt) { + /* if we got here without RSC the packet is invalid */ + dev_kfree_skb_any(skb); + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(ixgbe_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { + dev_kfree_skb_any(skb); + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + +#if IS_ENABLED(CONFIG_FCOE) + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, + rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = netdev_ring(rx_ring)->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, + mss); + } + if (!ddp_bytes) { + dev_kfree_skb_any(skb); +#ifndef NETIF_F_GRO + netdev_ring(rx_ring)->last_rx = jiffies; +#endif + continue; + } + } + +#endif /* CONFIG_FCOE */ + ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (cleaned_count) + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_rx_packets; +} + +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ +#ifdef HAVE_NDO_BUSY_POLL /* must be called with local_bh_disable()d */ -static int ixgbe_low_latency_recv(struct napi_struct *napi) +static int ixgbe_busy_poll_recv(struct napi_struct *napi) { struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); @@ -2236,8 +2347,8 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi) return found; } -#endif /* CONFIG_NET_RX_BUSY_POLL */ +#endif /* HAVE_NDO_BUSY_POLL */ /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -2247,13 +2358,12 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi) **/ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) { - struct ixgbe_q_vector *q_vector; int v_idx; u32 mask; /* Populate MSIX to EITR Select */ - if (adapter->num_vfs > 32) { - u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; + if (adapter->num_vfs >= 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); } @@ -2262,8 +2372,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) * corresponding register. */ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_ring *ring; - q_vector = adapter->q_vector[v_idx]; ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); @@ -2283,7 +2393,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2343,23 +2453,25 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, timepassed_us = q_vector->itr >> 2; if (timepassed_us == 0) return; - bytes_perint = bytes / timepassed_us; /* bytes/usec */ switch (itr_setting) { case lowest_latency: - if (bytes_perint > 10) + if (bytes_perint > 10) { itr_setting = low_latency; + } break; case low_latency: - if (bytes_perint > 20) + if (bytes_perint > 20) { itr_setting = bulk_latency; - else if (bytes_perint <= 10) + } else if (bytes_perint <= 10) { itr_setting = lowest_latency; + } break; case bulk_latency: - if (bytes_perint <= 20) + if (bytes_perint <= 20) { itr_setting = low_latency; + } break; } @@ -2395,7 +2507,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2458,8 +2570,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) if (test_bit(__IXGBE_DOWN, &adapter->state)) return; - if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && - !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) return; adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; @@ -2473,7 +2584,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) * - We may have missed the interrupt so always have to * check if we got a LSC */ - if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && + if (!(eicr & IXGBE_EICR_GPI_SDP0) && !(eicr & IXGBE_EICR_LSC)) return; @@ -2501,7 +2612,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) default: if (adapter->hw.mac.type >= ixgbe_mac_X540) return; - if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) + if (!(eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw))) return; break; } @@ -2515,17 +2626,15 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) struct ixgbe_hw *hw = &adapter->hw; if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && - (eicr & IXGBE_EICR_GPI_SDP1(hw))) { + (eicr & IXGBE_EICR_GPI_SDP1)) { e_crit(probe, "Fan has stopped, replace the adapter\n"); /* write to clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); } } static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) { - struct ixgbe_hw *hw = &adapter->hw; - if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) return; @@ -2535,8 +2644,7 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) * Need to check link state so complete overtemp check * on service task */ - if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || - (eicr & IXGBE_EICR_LSC)) && + if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) && (!test_bit(__IXGBE_DOWN, &adapter->state))) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; @@ -2544,7 +2652,7 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) return; } return; - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; @@ -2567,37 +2675,13 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) e_crit(drv, "%s\n", ixgbe_overheat_msg); } -static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) -{ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - if (hw->phy.type == ixgbe_phy_nl) - return true; - return false; - case ixgbe_mac_82599EB: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - switch (hw->mac.ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - case ixgbe_media_type_fiber_qsfp: - return true; - default: - return false; - } - default: - return false; - } -} - static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; - u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); + u32 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); if (!ixgbe_is_sfp(hw)) return; - - /* Later MAC's use different SDP */ if (hw->mac.type >= ixgbe_mac_X540) eicr_mask = IXGBE_EICR_GPI_SDP0_X540; @@ -2612,9 +2696,9 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) } if (adapter->hw.mac.type == ixgbe_mac_82599EB && - (eicr & IXGBE_EICR_GPI_SDP1(hw))) { + (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; ixgbe_service_event_schedule(adapter); @@ -2636,8 +2720,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) } } -static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, - u64 qmask) +static void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; struct ixgbe_hw *hw = &adapter->hw; @@ -2651,7 +2734,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -2665,35 +2748,6 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, /* skip the flush */ } -static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; - struct ixgbe_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - mask = (qmask & 0xFFFFFFFF); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); - mask = (qmask >> 32); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); - break; - default: - break; - } - /* skip the flush */ -} - /** * ixgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure @@ -2701,7 +2755,6 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush) { - struct ixgbe_hw *hw = &adapter->hw; u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); /* don't reenable LSC while waiting for link */ @@ -2711,36 +2764,39 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP0(hw); + case ixgbe_mac_X550EM_a: + mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(&adapter->hw); break; case ixgbe_mac_X540: case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: mask |= IXGBE_EIMS_TS; break; default: break; } if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - mask |= IXGBE_EIMS_GPI_SDP1(hw); + mask |= IXGBE_EIMS_GPI_SDP1; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP1(hw); - mask |= IXGBE_EIMS_GPI_SDP2(hw); + mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP2; /* fall through */ case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) - mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); + mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(&adapter->hw); if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) mask |= IXGBE_EICR_GPI_SDP0_X540; mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; +#ifdef HAVE_PTP_1588_CLOCK + mask |= IXGBE_EIMS_TIMESYNC; +#endif + break; default: break; @@ -2757,22 +2813,22 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, IXGBE_WRITE_FLUSH(&adapter->hw); } -static irqreturn_t ixgbe_msix_other(int irq, void *data) +static irqreturn_t ixgbe_msix_other(int __always_unused irq, void *data) { struct ixgbe_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; u32 eicr; /* - * Workaround for Silicon errata. Use clear-by-write instead - * of clear-by-read. Reading with EICS will return the + * Workaround for Silicon errata #26 on 82598. Use clear-by-write + * instead of clear-by-read. Reading with EICS will return the * interrupt causes without clearing, which later be done * with the write to EICR. */ eicr = IXGBE_READ_REG(hw, IXGBE_EICS); /* The lower 16bits of the EICR register are for the queue interrupts - * which should be masked here in order to not accidentally clear them if + * which should be masked here in order to not accidently clear them if * the bits are high when ixgbe_msix_other is called. There is a race * condition otherwise which results in possible performance loss * especially if the ixgbe_msix_other interrupt is triggering @@ -2793,7 +2849,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; @@ -2802,28 +2858,36 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) IXGBE_EICR_GPI_SDP0_X540); } if (eicr & IXGBE_EICR_ECC) { - e_info(link, "Received ECC Err, initiating reset\n"); + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } +#ifdef HAVE_TX_MQ /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { int reinit_count = 0; int i; for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = adapter->tx_ring[i]; - if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, - &ring->state)) + if (test_and_clear_bit( + __IXGBE_TX_FDIR_INIT_DONE, + &ring->state)) reinit_count++; } if (reinit_count) { - /* no more flow director interrupts until after init */ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); - adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + /* no more flow director interrupts until + * after init + */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, + IXGBE_EIMC_FLOW_DIR); + adapter->flags2 |= + IXGBE_FLAG2_FDIR_REQUIRES_REINIT; ixgbe_service_event_schedule(adapter); } } +#endif ixgbe_check_sfp_event(adapter, eicr); ixgbe_check_overtemp_event(adapter, eicr); break; @@ -2833,8 +2897,10 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); +#ifdef HAVE_PTP_1588_CLOCK if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter); + ixgbe_ptp_check_pps_event(adapter); +#endif /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2843,7 +2909,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) +static irqreturn_t ixgbe_msix_clean_rings(int __always_unused irq, void *data) { struct ixgbe_q_vector *q_vector = data; @@ -2856,34 +2922,41 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) } /** - * ixgbe_poll - NAPI Rx polling callback - * @napi: structure for representing this polling device - * @budget: how many packets driver is allowed to clean + * ixgbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets * - * This function is used for legacy and MSI, NAPI mode + * This function will clean all queues associated with a q_vector. **/ int ixgbe_poll(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); + container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; int per_ring_budget, work_done = 0; bool clean_complete = true; -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_dca(q_vector); -#endif +#endif /* CONFIG_DCA */ - ixgbe_for_each_ring(ring, q_vector->tx) { - if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) - clean_complete = false; - } + ixgbe_for_each_ring(ring, q_vector->tx) + clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); + +#ifdef HAVE_NDO_BUSY_POLL + if (test_bit(NAPI_STATE_NPSVC, &napi->state)) + return budget; /* Exit if we are called by netpoll or busy polling is active */ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) return budget; +#else + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; +#endif /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ @@ -2895,23 +2968,29 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_for_each_ring(ring, q_vector->rx) { int cleaned = ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); - work_done += cleaned; - if (cleaned >= per_ring_budget) - clean_complete = false; + clean_complete &= (cleaned < per_ring_budget); } +#ifdef HAVE_NDO_BUSY_POLL ixgbe_qv_unlock_napi(q_vector); +#endif + +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; + +#endif /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* all work done, exit the polling mode */ napi_complete_done(napi, work_done); - if (adapter->rx_itr_setting & 1) + if (adapter->rx_itr_setting == 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); + ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); return min(work_done, budget - 1); } @@ -2926,23 +3005,23 @@ int ixgbe_poll(struct napi_struct *napi, int budget) static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + unsigned int ri = 0, ti = 0; int vector, err; - int ri = 0, ti = 0; for (vector = 0; vector < adapter->num_q_vectors; vector++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "TxRx", ri++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-TxRx-%u", netdev->name, ri++); ti++; } else if (q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "rx", ri++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-rx-%u", netdev->name, ri++); } else if (q_vector->tx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "tx", ti++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-tx-%u", netdev->name, ti++); } else { /* skip this unused q_vector */ continue; @@ -2950,16 +3029,18 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { - e_err(probe, "request_irq failed for MSIX interrupt " - "Error: %d\n", err); + e_err(probe, "request_irq failed for MSIX interrupt '%s' " + "Error: %d\n", q_vector->name, err); goto free_queue_irqs; } +#ifdef HAVE_IRQ_AFFINITY_HINT /* If Flow Director is enabled, set interrupt affinity */ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { /* assign the mask for this irq */ irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); } +#endif /* HAVE_IRQ_AFFINITY_HINT */ } err = request_irq(adapter->msix_entries[vector].vector, @@ -2969,13 +3050,15 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) goto free_queue_irqs; } - return 0; + return IXGBE_SUCCESS; free_queue_irqs: while (vector) { vector--; +#ifdef HAVE_IRQ_AFFINITY_HINT irq_set_affinity_hint(adapter->msix_entries[vector].vector, NULL); +#endif free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } @@ -2991,7 +3074,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) * @irq: interrupt number * @data: pointer to a network interface device structure **/ -static irqreturn_t ixgbe_intr(int irq, void *data) +static irqreturn_t ixgbe_intr(int __always_unused irq, void *data) { struct ixgbe_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; @@ -3025,18 +3108,19 @@ static irqreturn_t ixgbe_intr(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: - ixgbe_check_sfp_event(adapter, eicr); - /* Fall through */ case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: + if (eicr & IXGBE_EICR_ECC) { - e_info(link, "Received ECC Err, initiating reset\n"); + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } + ixgbe_check_sfp_event(adapter, eicr); ixgbe_check_overtemp_event(adapter, eicr); break; default: @@ -3044,8 +3128,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data) } ixgbe_check_fan_failure(adapter, eicr); +#ifdef HAVE_PTP_1588_CLOCK if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter); + ixgbe_ptp_check_pps_event(adapter); +#endif /* would disable interrupts here but EIAM disabled it */ napi_schedule_irqoff(&q_vector->napi); @@ -3075,10 +3161,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) err = ixgbe_request_msix_irqs(adapter); else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) - err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, + err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, netdev->name, adapter); else - err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, + err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, netdev->name, adapter); if (err) @@ -3096,6 +3182,9 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) return; } + if (!adapter->msix_entries) + return; + for (vector = 0; vector < adapter->num_q_vectors; vector++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; @@ -3104,9 +3193,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) if (!q_vector->rx.ring && !q_vector->tx.ring) continue; +#ifdef HAVE_IRQ_AFFINITY_HINT /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(entry->vector, NULL); +#endif free_irq(entry->vector, q_vector); } @@ -3127,7 +3218,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -3181,18 +3272,27 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), - (tdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), tdba >> 32); IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_tx_desc)); + + /* disable head writeback */ + IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(reg_idx), 0); + + /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + /* * set WTHRESH to encourage burst writeback, it should not be set * higher than 1 when: @@ -3204,15 +3304,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, * currently 40. */ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) - txdctl |= 1u << 16; /* WTHRESH = 1 */ + txdctl |= (1 << 16); /* WTHRESH = 1 */ else - txdctl |= 8u << 16; /* WTHRESH = 8 */ + txdctl |= (8 << 16); /* WTHRESH = 8 */ /* * Setting PTHRESH to 32 both improves performance * and avoids a TX hang with DFP enabled */ - txdctl |= (1u << 8) | /* HTHRESH = 1 */ + txdctl |= (1 << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ @@ -3229,7 +3329,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, struct ixgbe_q_vector *q_vector = ring->q_vector; if (q_vector) - netif_set_xps_queue(ring->netdev, + netif_set_xps_queue(adapter->netdev, &q_vector->affinity_mask, ring->queue_index); } @@ -3246,7 +3346,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, /* poll to verify queue is enabled */ do { - usleep_range(1000, 2000); + msleep(1); txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) @@ -3268,7 +3368,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); /* set transmit pool layout */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { mtqc = IXGBE_MTQC_VT_ENA; if (tcs > 4) mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; @@ -3314,6 +3414,13 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) u32 dmatxctl; u32 i; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (adapter->num_tx_queues > 1) + adapter->netdev->features |= NETIF_F_MULTI_QUEUE; + else + adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE; + +#endif ixgbe_setup_mtqc(adapter); if (hw->mac.type != ixgbe_mac_82598EB) { @@ -3352,18 +3459,16 @@ static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } -#ifdef CONFIG_IXGBE_DCB void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) -#else -static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) -#endif { int i; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; +#ifdef HAVE_DCBNL_IEEE if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); +#endif /* * We should set the drop enable bit if: * SR-IOV is enabled @@ -3383,8 +3488,6 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) } } -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 - static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { @@ -3395,18 +3498,34 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, if (hw->mac.type == ixgbe_mac_82598EB) { u16 mask = adapter->ring_feature[RING_F_RSS].mask; + /* program one srrctl register per VMDq index */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + mask = adapter->ring_feature[RING_F_VMDQ].mask; + /* * if VMDq is not active we must program one srrctl register * per RSS queue since we have enabled RDRXCTL.MVMEN */ reg_idx &= mask; + + /* divide by the first bit of the mask to get the indices */ + if (reg_idx) + reg_idx /= ((~mask) + 1) & mask; } /* configure header buffer length, needed for RSC */ srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ - srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#else + if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) + srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#endif /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -3432,6 +3551,43 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) return 512; } +/** + * ixgbe_store_key - Write the RSS key to HW + * @adapter: device handle + * + * Write the RSS key stored in adapter.rss_key to HW. + */ +void ixgbe_store_key(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); +} + +/** + * ixgbe_init_rss_key - Initialize adapter RSS key + * @adapter: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter) +{ + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE); + adapter->rss_key = rss_key; + } + + return 0; +} + /** * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle @@ -3497,7 +3653,6 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; u32 i, j; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; @@ -3510,8 +3665,7 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) rss_i = 4; /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + ixgbe_store_key(adapter); /* Fill out redirection table */ memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); @@ -3536,7 +3690,7 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) /* Fill out hash function seeds */ for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), - adapter->rss_key[i]); + *(adapter->rss_key + i)); /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { @@ -3549,11 +3703,13 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) ixgbe_store_vfreta(adapter); } + static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 mrqc = 0, rss_field = 0, vfmrqc = 0; u32 rxcsum; + u32 mrqc = 0, rss_field = 0; + u32 vfmrqc = 0; /* Disable indicating checksum in descriptor, enables RSS hash */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); @@ -3566,7 +3722,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) } else { u8 tcs = netdev_get_num_tc(adapter->netdev); - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { if (tcs > 4) mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ else if (tcs > 1) @@ -3584,10 +3740,13 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) else mrqc = IXGBE_MRQC_RSSEN; } + + /* Enable L3/L4 for Tx Switched packets */ + mrqc |= IXGBE_MRQC_L3L4TXSWEN; } /* Perform hash on these packet types */ - rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | + rss_field = IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; @@ -3597,7 +3756,6 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { unsigned int pf_pool = adapter->num_vfs; @@ -3618,13 +3776,32 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_clear_rscctl - disable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl; + u8 reg_idx = ring->reg_idx; + + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); + rscctrl &= ~IXGBE_RSCCTL_RSCEN; + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); + + clear_ring_rsc_enabled(ring); +} + /** * ixgbe_configure_rscctl - enable RSC for the indicated ring * @adapter: address of board private structure - * @index: index of ring to set + * @ring: structure containing ring specific data **/ -static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) +void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u32 rscctrl; @@ -3640,11 +3817,27 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, * total size of max desc * buf_len is not greater * than 65536 */ +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#if (MAX_SKB_FRAGS >= 16) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; +#elif (MAX_SKB_FRAGS >= 8) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; +#elif (MAX_SKB_FRAGS >= 4) + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#else + rscctrl |= IXGBE_RSCCTL_MAXDESC_1; +#endif +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + if (ring->rx_buf_len <= IXGBE_RXBUFFER_4K) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + else if (ring->rx_buf_len <= IXGBE_RXBUFFER_8K) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; + else + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } -#define IXGBE_MAX_RX_DESC_POLL 10 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { @@ -3653,7 +3846,7 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) return; /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && @@ -3661,13 +3854,13 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, return; do { - usleep_range(1000, 2000); + msleep(1); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " - "the polling period\n", reg_idx); + e_err(drv, "RXDCTL.ENABLE on Rx queue %d " + "not set within the polling period\n", reg_idx); } } @@ -3679,7 +3872,7 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) return; rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; @@ -3707,6 +3900,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; + union ixgbe_adv_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; u8 reg_idx = ring->reg_idx; @@ -3715,21 +3909,31 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); ixgbe_disable_rx_queue(adapter, ring); - IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), rdba >> 32); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); /* Force flushing of IXGBE_RDLEN to prevent MDD */ IXGBE_WRITE_FLUSH(hw); + /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + ring->next_to_alloc = 0; +#endif + ixgbe_configure_srrctl(adapter, ring); + /* In ESX, RSCCTL configuration is done by on demand */ ixgbe_configure_rscctl(adapter, ring); - if (hw->mac.type == ixgbe_mac_82598EB) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: /* * enable cache line friendly hardware writes: * PTHRESH=32 descriptors (half the internal cache), @@ -3739,8 +3943,37 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, */ rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#if (PAGE_SIZE < 8192) + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | + IXGBE_RXDCTL_RLPML_EN); + + /* Limit the maximum frame size so we don't overrun the skb */ + if (ring_uses_build_skb(ring) && + !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | + IXGBE_RXDCTL_RLPML_EN; +#endif +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + /* If operating in IOV mode set RLPML */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + break; + rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + break; + default: + break; } + /* initialize Rx descriptor 0 */ + rx_desc = IXGBE_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + /* enable receive descriptor ring */ rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); @@ -3753,7 +3986,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int rss_i = adapter->ring_feature[RING_F_RSS].indices; - u16 pool; + int p; /* PSRTYPE must be initialized in non 82598 adapters */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | @@ -3766,12 +3999,71 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) return; if (rss_i > 3) - psrtype |= 2u << 29; + psrtype |= 2 << 29; else if (rss_i > 1) - psrtype |= 1u << 29; + psrtype |= 1 << 29; + + for (p = 0; p < adapter->num_rx_pools; p++) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype); +} + +/** + * ixgbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter - the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw * hw = &adapter->hw; + unsigned int p; + u32 vmdctl; + + if (adapter->flags & IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { + /* disable Tx loopback, rely on switch hairpin mode */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, 0); + + /* must enable Rx switching replication to allow multicast + * packet reception on all VFs, and to enable source address + * pruning. + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + for (p = 0; p < (adapter->num_vfs + adapter->num_rx_pools); p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + true, + p); + } + } else { + /* enable Tx loopback for internal VF/PF communication */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* disable Rx switching replication unless we have SR-IOV + * virtual functions + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + if (!adapter->num_vfs) + vmdctl &= ~IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + for (p = 0; p < (adapter->num_vfs + adapter->num_rx_pools); p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + false, + p); + } + } - for_each_set_bit(pool, &adapter->fwd_bitmask, 32) - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); } static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) @@ -3781,32 +4073,60 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) u32 gcr_ext, vmdctl; int i; - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return; - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; - vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; - vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; - vmdctl |= IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - - vf_shift = VMDQ_P(0) % 32; - reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; - - /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - if (adapter->bridge_mode == BRIDGE_MODE_VEB) - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl |= IXGBE_VT_CTL_VT_ENABLE; + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; + if (adapter->num_vfs) + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ - hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); + for (i = 1; i < adapter->num_rx_pools; i++) { + u32 vmolr; + int pool = VMDQ_P(i); - /* clear VLAN promisc flag so VFTA will be updated if necessary */ - adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + vmolr |= IXGBE_VMOLR_AUPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); + } + + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + + /* Enable only the PF pools for Tx/Rx */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); + + /* clear VLAN promisc flag so VFTA + * will be updated if necessary + */ + adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; + break; + default: + break; + } + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; /* * Set up VF register offsets for selected VT Mode, @@ -3826,15 +4146,21 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + /* configure default bridge settings */ + ixgbe_configure_bridge_mode(adapter); +#if IS_ENABLED(CONFIG_PCI_IOV) for (i = 0; i < adapter->num_vfs; i++) { /* configure spoof checking */ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, adapter->vfinfo[i].spoofchk_enabled); +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN /* Enable/Disable RSS query feature */ ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, - adapter->vfinfo[i].rss_query_enabled); + adapter->vfinfo[i].rss_query_enabled); +#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ } +#endif /* CONFIG_PCI_IOV */ } static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) @@ -3845,14 +4171,25 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) struct ixgbe_ring *rx_ring; int i; u32 mhadd, hlreg0; +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + int rx_buf_len; +#endif + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + max_frame += IXGBE_TS_HDR_LEN; + default: + break; + } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* adjust max frame to be able to do baby jumbo for FCoE */ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; - -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ /* adjust max frame to be at least the size of a standard frame */ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) @@ -3866,8 +4203,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } +#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ + max_frame += VLAN_HLEN; + + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && + (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) { + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + /* + * Make best use of allocation by using all but 1K of a + * power of 2 allocation that will be used for skb->head. + */ + } else if (max_frame <= IXGBE_RXBUFFER_3K) { + rx_buf_len = IXGBE_RXBUFFER_3K; + } else if (max_frame <= IXGBE_RXBUFFER_7K) { + rx_buf_len = IXGBE_RXBUFFER_7K; + } else if (max_frame <= IXGBE_RXBUFFER_15K) { + rx_buf_len = IXGBE_RXBUFFER_15K; + } else { + rx_buf_len = IXGBE_MAX_RXBUFFER; + } + +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ + /* set jumbo enable since MHADD.MFS is keeping size locked at + * max_frame + */ hlreg0 |= IXGBE_HLREG0_JUMBOEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -3877,10 +4238,48 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; + + clear_ring_rsc_enabled(rx_ring); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); - else - clear_ring_rsc_enabled(rx_ring); + +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); +#if IS_ENABLED(CONFIG_FCOE) + + if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + continue; + + set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#if (PAGE_SIZE < 8192) + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + + if (IXGBE_2K_TOO_SMALL_WITH_PADDING || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif +#else /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ + + adapter->flags2 |= IXGBE_FLAG2_RX_LEGACY; +#endif /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + + rx_ring->rx_buf_len = rx_buf_len; +#if IS_ENABLED(CONFIG_FCOE) + + if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state) && + (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + rx_ring->rx_buf_len = IXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ } } @@ -3890,25 +4289,12 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { - case ixgbe_mac_82598EB: - /* - * For VMDq support of different descriptor types or - * buffer sizes through the use of multiple SRRCTL - * registers, RDRXCTL.MVMEN must be set to 1 - * - * also, the manual doesn't mention it clearly but DCA hints - * will only use queue 0's tags unless this bit is set. Side - * effects of setting this bit are only that SRRCTL must be - * fully programmed [0..15] - */ - rdrxctl |= IXGBE_RDRXCTL_MVMEN; - break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; - /* fall through for older HW */ + /* fall through */ case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* Disable RSC for ACK packets */ @@ -3919,6 +4305,19 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; break; + case ixgbe_mac_82598EB: + /* + * For VMDq support of different descriptor types or + * buffer sizes through the use of multiple SRRCTL + * registers, RDRXCTL.MVMEN must be set to 1 + * + * also, the manual doesn't mention it clearly but DCA hints + * will only use queue 0's tags unless this bit is set. Side + * effects of setting this bit are only that SRRCTL must be + * fully programmed [0..15] + */ + rdrxctl |= IXGBE_RDRXCTL_MVMEN; + break; default: /* We should do nothing since we don't know this hardware */ return; @@ -3940,7 +4339,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) u32 rxctrl, rfctl; /* disable receives while setting up the descriptors */ - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); ixgbe_setup_psrtype(adapter); ixgbe_setup_rdrxctl(adapter); @@ -3978,22 +4377,73 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) hw->mac.ops.enable_rx_dma(hw, rxctrl); } +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_TX static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_TX */ +static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_TX */ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); /* add VID to filter table */ - if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); + if (hw->mac.ops.set_vfta) { +#ifndef HAVE_VLAN_RX_REGISTER + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); +#endif + + if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(hw, vid, pool_ndx, true, !!vid); + + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED && + adapter->hw.mac.type != ixgbe_mac_82598EB) { + int i; - set_bit(vid, adapter->active_vlans); + /* enable vlan id for all pools */ + for (i = 1; i < adapter->num_rx_pools; i++) + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), true, +#ifdef HAVE_VLAN_RX_REGISTER + false); +#else + true); +#endif + } + } +#ifndef HAVE_NETDEV_VLAN_FEATURES + /* + * Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so + * we will not have a netdev that needs updating. + */ + if (adapter->vlgrp) { + struct vlan_group *vlgrp = adapter->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + if (v_netdev) { + v_netdev->features |= netdev->features; + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID return 0; +#endif } +#if defined(HAVE_VLAN_RX_REGISTER) && defined(CONFIG_PCI_IOV) +int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +#else static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +#endif { u32 vlvf; int idx; @@ -4026,7 +4476,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) * entry other than the PF. */ word = idx * 2 + (VMDQ_P(0) / 32); - bits = ~BIT(VMDQ_P(0) % 32); + bits = ~(1 << (VMDQ_P(0)) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* Disable the filter so this falls into the default pool. */ @@ -4037,30 +4487,77 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) } } +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_RX */ +static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#else +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* User is not allowed to remove vlan ID 0 */ + if (!vid) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#else + return; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); + +#endif /* HAVE_VLAN_RX_REGISTER */ /* remove VID from filter table */ - if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); + if (hw->mac.ops.set_vfta) { + if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(hw, vid, pool_ndx, false, true); - clear_bit(vid, adapter->active_vlans); + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED && + adapter->hw.mac.type != ixgbe_mac_82598EB) { + int i; + + /* remove vlan id from all pools */ + for (i = 1; i < adapter->num_rx_pools; i++) + hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), false, + true); + } + } +#ifndef HAVE_VLAN_RX_REGISTER + clear_bit(vid, adapter->active_vlans); +#endif +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID return 0; +#endif } +#ifdef HAVE_8021P_SUPPORT /** - * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping + * ixgbe_vlan_strip_disable - helper to disable vlan tag stripping * @adapter: driver data */ -static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) +void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; - int i, j; + int i; + + /* leave vlan tag stripping enabled for DCB */ + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return; switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -4072,16 +4569,12 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; - - if (ring->l2_accel_priv) - continue; - j = ring->reg_idx; - vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + u8 reg_idx = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); vlnctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl); } break; default: @@ -4089,15 +4582,16 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) } } +#endif /* HAVE_8021P_SUPPORT */ /** - * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping + * ixgbe_vlan_strip_enable - helper to enable vlan tag stripping * @adapter: driver data */ -static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) +void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; - int i, j; + int i; switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -4109,16 +4603,12 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; - - if (ring->l2_accel_priv) - continue; - j = ring->reg_idx; - vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + u8 reg_idx = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); vlnctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl); } break; default: @@ -4126,6 +4616,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) } } +#ifndef HAVE_VLAN_RX_REGISTER static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -4134,7 +4625,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { - /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ + /* we need to keep the VLAN filter on in SRIOV */ vlnctrl |= IXGBE_VLNCTRL_VFE; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); } else { @@ -4159,7 +4650,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); - vlvfb |= BIT(VMDQ_P(0) % 32); + vlvfb |= 1 << (VMDQ_P(0) % 32); IXGBE_WRITE_REG(hw, reg_offset, vlvfb); } @@ -4189,7 +4680,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) if (vlvf) { /* record VLAN ID in VFTA */ - vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); /* if PF is part of this then continue */ if (test_bit(vid, adapter->active_vlans)) @@ -4198,7 +4689,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) /* remove PF from the pool */ word = i * 2 + VMDQ_P(0) / 32; - bits = ~BIT(VMDQ_P(0) % 32); + bits = ~(1 << (VMDQ_P(0) % 32)); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); } @@ -4220,7 +4711,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl, i; - /* Set VLAN filtering to enabled */ + /* configure vlan filtering */ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl |= IXGBE_VLNCTRL_VFE; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); @@ -4239,44 +4730,184 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) ixgbe_scrub_vfta(adapter, i); } +#endif /* HAVE_VLAN_RX_REGISTER */ -static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) +#ifdef HAVE_VLAN_RX_REGISTER +static void ixgbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp) +#else +void ixgbe_vlan_mode(struct net_device *netdev, u32 features) +#endif { - u16 vid = 1; +#if defined(HAVE_VLAN_RX_REGISTER) || defined(HAVE_8021P_SUPPORT) + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#endif +#ifdef HAVE_8021P_SUPPORT + bool enable; +#endif - ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_disable(adapter); - for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) - ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); + adapter->vlgrp = grp; + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); +#endif +#ifdef HAVE_8021P_SUPPORT +#ifdef HAVE_VLAN_RX_REGISTER + enable = (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED)); +#else +#ifdef NETIF_F_HW_VLAN_CTAG_RX + enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); +#else + enable = !!(features & NETIF_F_HW_VLAN_RX); +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#endif /* HAVE_VLAN_RX_REGISTER */ + if (enable) + /* enable VLAN tag insert/strip */ + ixgbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + ixgbe_vlan_strip_disable(adapter); + +#endif /* HAVE_8021P_SUPPORT */ } -/** - * ixgbe_write_mc_addr_list - write multicast addresses to MTA - * @netdev: network interface device structure - * - * Writes multicast address list to the MTA hash table. - * Returns: -ENOMEM on failure - * 0 on no addresses written - * X on writing X addresses to MTA - **/ -static int ixgbe_write_mc_addr_list(struct net_device *netdev) +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; + u16 vid = 1; +#ifdef HAVE_VLAN_RX_REGISTER - if (!netif_running(netdev)) - return 0; + ixgbe_vlan_mode(adapter->netdev, adapter->vlgrp); - if (hw->mac.ops.update_mc_addr_list) - hw->mac.ops.update_mc_addr_list(hw, netdev); + /* + * add vlan ID 0 and enable vlan tag stripping so we + * always accept priority-tagged traffic + */ +#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#else + ixgbe_vlan_rx_add_vid(adapter->netdev, 0); +#endif +#ifndef HAVE_8021P_SUPPORT + ixgbe_vlan_strip_enable(adapter); +#endif + + if (adapter->vlgrp) { + for (; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbe_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); +#endif + } + } +#else /* !HAVE_VLAN_RX_REGISTER */ + +#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#else + ixgbe_vlan_rx_add_vid(adapter->netdev, 0); +#endif + + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) +#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +#else + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); +#endif +#endif /* HAVE_VLAN_RX_REGISTER */ +} + +#endif +static u8 *ixgbe_addr_list_itr(struct ixgbe_hw __maybe_unused *hw, u8 **mc_addr_ptr, u32 *vmdq) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *mc_ptr; +#else + struct dev_mc_list *mc_ptr; +#endif +#ifdef CONFIG_PCI_IOV + struct ixgbe_adapter *adapter = hw->back; +#endif /* CONFIG_PCI_IOV */ + u8 *addr = *mc_addr_ptr; + + /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is + * defined, so we have to wrap the pointer above correctly to prevent + * a warning. + */ + *vmdq = VMDQ_P(0); + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } +#else + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); + if (mc_ptr->next) + *mc_addr_ptr = mc_ptr->next->dmi_addr; +#endif else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * ixgbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +int ixgbe_write_mc_addr_list(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#endif + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) return -ENOMEM; + if (!netif_running(netdev)) + return 0; + + + if (netdev_mc_empty(netdev)) { + hw->mac.ops.update_mc_addr_list(hw, NULL, 0, + ixgbe_addr_list_itr, true); + } else { +#ifdef NETDEV_HW_ADDR_T_MULTICAST + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; +#else + addr_list = netdev->mc_list->dmi_addr; +#endif + addr_count = netdev_mc_count(netdev); + + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, + ixgbe_addr_list_itr, true); + } + #ifdef CONFIG_PCI_IOV ixgbe_restore_vf_multicasts(adapter); #endif - - return netdev_mc_count(netdev); + return addr_count; } #ifdef CONFIG_PCI_IOV @@ -4298,8 +4929,8 @@ void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) hw->mac.ops.clear_rar(hw, i); } } - #endif + static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; @@ -4322,21 +4953,7 @@ static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) } } -static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - mac_table->state |= IXGBE_MAC_STATE_MODIFIED; - mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; - } - - ixgbe_sync_mac_table(adapter); -} - -static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) +int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; @@ -4365,7 +4982,7 @@ static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; - memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); + ether_addr_copy(mac_table->addr, hw->mac.addr); mac_table->pool = VMDQ_P(0); mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; @@ -4385,8 +5002,9 @@ int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, return -EINVAL; for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - if (mac_table->state & IXGBE_MAC_STATE_IN_USE) + if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { continue; + } ether_addr_copy(mac_table->addr, addr); mac_table->pool = pool; @@ -4402,6 +5020,20 @@ int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, return -ENOMEM; } +static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) +{ + struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { + mac_table->state |= IXGBE_MAC_STATE_MODIFIED; + mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; + } + + ixgbe_sync_mac_table(adapter); +} + int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, const u8 *addr, u16 pool) { @@ -4412,7 +5044,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, if (is_zero_ether_addr(addr)) return -EINVAL; - /* search table for addr, if found clear IN_USE flag and sync */ + /* search table for addr, if found clear IN USE flag and sync */ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { /* we can only delete an entry if it is in use */ if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) @@ -4434,6 +5066,8 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, return -ENOMEM; } + +#ifdef HAVE_SET_RX_MODE /** * ixgbe_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure @@ -4443,7 +5077,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, * 0 on no addresses written * X on writing X addresses to the RAR table **/ -static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) +int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int count = 0; @@ -4453,10 +5087,19 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) return -ENOMEM; if (!netdev_uc_empty(netdev)) { +#ifdef NETDEV_HW_ADDR_T_UNICAST struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif netdev_for_each_uc_addr(ha, netdev) { +#ifdef NETDEV_HW_ADDR_T_UNICAST ixgbe_del_mac_filter(adapter, ha->addr, vfn); ixgbe_add_mac_filter(adapter, ha->addr, vfn); +#else + ixgbe_del_mac_filter(adapter, ha->da_addr, vfn); + ixgbe_add_mac_filter(adapter, ha->da_addr, vfn); +#endif count++; } } @@ -4482,6 +5125,7 @@ static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) return 0; } +#endif /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -4496,33 +5140,62 @@ void ixgbe_set_rx_mode(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; +#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + u32 vlnctrl; +#endif +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_FILTER) netdev_features_t features = netdev->features; +#endif int count; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); +#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +#endif /* set all bits that we expect to always be set */ - fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ fctrl |= IXGBE_FCTRL_BAM; fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ fctrl |= IXGBE_FCTRL_PMCF; /* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); +#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); +#endif if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; +#ifdef HAVE_VLAN_RX_REGISTER + /* Only disable hardware filter vlans in promiscuous mode + * if SR-IOV and VMDQ are disabled - otherwise ensure + * that hardware VLAN filters remain enabled. + */ + if ((adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED))) + vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); +#endif +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + features &= ~NETIF_F_HW_VLAN_FILTER; +#endif } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } hw->addr_ctrl.user_set_promisc = false; +#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + /* enable hardware vlan filtering */ + vlnctrl |= IXGBE_VLNCTRL_VFE; +#endif } +#ifdef HAVE_SET_RX_MODE /* * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable @@ -4533,7 +5206,9 @@ void ixgbe_set_rx_mode(struct net_device *netdev) vmolr |= IXGBE_VMOLR_ROPE; } - /* Write addresses to the MTA, if the attempt fails +#endif + /* + * Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ @@ -4552,55 +5227,102 @@ void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); } - /* This is useful for sniffing bad packets. */ - if (features & NETIF_F_RXALL) { - /* UPE and MPE will be handled by normal PROMISC logic - * in e1000e_set_rx_mode */ - fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ - IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ - IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ - - fctrl &= ~(IXGBE_FCTRL_DPF); - /* NOTE: VLAN filtering is disabled by setting PROMISC */ - } - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +#ifdef HAVE_8021P_SUPPORT +#ifdef NETIF_F_HW_VLAN_CTAG_RX if (features & NETIF_F_HW_VLAN_CTAG_RX) +#else + if (features & NETIF_F_HW_VLAN_RX) +#endif ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); +#endif /* HAVE_8021P_SUPPORT */ +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) if (features & NETIF_F_HW_VLAN_CTAG_FILTER) ixgbe_vlan_promisc_disable(adapter); else ixgbe_vlan_promisc_enable(adapter); +#elif defined(NETIF_F_HW_VLAN_FILTER) && !defined(HAVE_VLAN_RX_REGISTER) + if (features & NETIF_F_HW_VLAN_FILTER) + ixgbe_vlan_promisc_disable(adapter); + else + ixgbe_vlan_promisc_enable(adapter); +#elif defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */ } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { + struct ixgbe_q_vector *q_vector; int q_idx; for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef HAVE_NDO_BUSY_POLL ixgbe_qv_init_lock(adapter->q_vector[q_idx]); - napi_enable(&adapter->q_vector[q_idx]->napi); +#endif + napi_enable(&q_vector->napi); } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { + struct ixgbe_q_vector *q_vector; int q_idx; for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { - napi_disable(&adapter->q_vector[q_idx]->napi); - while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef HAVE_NDO_BUSY_POLL + while(!ixgbe_qv_disable(adapter->q_vector[q_idx])) { pr_info("QV %d locked\n", q_idx); usleep_range(1000, 20000); } +#endif + } +} + +#ifdef HAVE_DCBNL_IEEE +s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) +{ + __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + int i; + + /* naively give each TC a bwg to map onto CEE hardware */ + __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } } + + ixgbe_dcb_calculate_tc_credits(ets->tc_tx_bw, refill, max, max_frame); + return ixgbe_dcb_hw_config(hw, refill, max, + bwg_id, prio_type, ets->prio_tc); } +#endif /* HAVE_DCBNL_IEEE */ -static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) +void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) { struct ixgbe_hw *hw = &adapter->hw; u32 vxlanctrl; @@ -4614,53 +5336,127 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) adapter->vxlan_port = 0; - +#ifdef HAVE_UDP_ENC_RX_OFFLOAD if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) adapter->geneve_port = 0; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ } +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ -#ifdef CONFIG_IXGBE_DCB -/** - * ixgbe_configure_dcb - Configure DCB hardware +#ifdef NETIF_F_GSO_PARTIAL +/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) +#endif /* NETIF_F_GSO_PARTIAL */ + +static inline unsigned long ixgbe_tso_features(void) +{ + unsigned long features = 0; + +#ifdef NETIF_F_TSO + features |= NETIF_F_TSO; +#endif /* NETIF_F_TSO */ +#ifdef NETIF_F_TSO6 + features |= NETIF_F_TSO6; +#endif /* NETIF_F_TSO6 */ +#ifdef NETIF_F_GSO_PARTIAL + features |= NETIF_F_GSO_PARTIAL | IXGBE_GSO_PARTIAL_FEATURES; +#endif + + return features; +} + +/* + * ixgbe_configure_dcb - Configure DCB hardware support * @adapter: ixgbe adapter struct * - * This is called by the driver on open to configure the DCB hardware. - * This is also called by the gennetlink interface when reconfiguring - * the DCB state. + * Called when the driver opens or needs to reconfigure DCB related bits. */ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct net_device *netdev = adapter->netdev; + + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + /* The following workaround for 82598EB was originaly hidden inside a + * kcompat definition of netif_set_gso_max_size. This workaround is + * necessary as the 82598EB hardware does not support TSO and DCB + * unless the stack TSO maximum segment size can be reduced. Older + * kernels do not support the requisite interface, and thus need TSO + * disabled if we want to support DCB. + */ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { - if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 65536); + if (hw->mac.type == ixgbe_mac_82598EB) { +#ifdef NETDEV_CAN_SET_GSO_MAX_SIZE + netif_set_gso_max_size(netdev, 65536); +#else + /* We previously disabled TSO, so we should enable it + * now. */ + netdev->features |= ixgbe_tso_features(); +#ifdef NETIF_F_GSO_PARTIAL + netdev->gso_partial_features = + IXGBE_GSO_PARTIAL_FEATURES; +#endif +#endif /* NETDEV_CAN_SET_GSO_MAX_SIZE */ + } return; } - if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 32768); - -#ifdef IXGBE_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) - max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); + if (hw->mac.type == ixgbe_mac_82598EB) { +#ifdef NETDEV_CAN_SET_GSO_MAX_SIZE + netif_set_gso_max_size(netdev, 32768); +#else + /* Simply disable TSO since we cannot change the maximum + * segment size. */ + netdev->features &= ~ixgbe_tso_features(); +#ifdef NETIF_F_GSO_PARTIAL + netdev->gso_partial_features = 0; #endif +#endif /* NETDEV_CAN_SET_GSO_MAX_SIZE */ + } + +#if IS_ENABLED(CONFIG_FCOE) + if (netdev->features & NETIF_F_FCOE_MTU) + max_frame = max_t(int, max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + +#ifdef HAVE_DCBNL_IEEE + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { + if (adapter->ixgbe_ieee_ets) + ixgbe_dcb_hw_ets(&adapter->hw, + adapter->ixgbe_ieee_ets, + max_frame); + + if (adapter->ixgbe_ieee_pfc && adapter->ixgbe_ieee_ets) { + struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc; + u8 *tc = adapter->ixgbe_ieee_ets->prio_tc; - /* reconfigure the hardware */ - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_RX_CONFIG); - ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); - } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { - ixgbe_dcb_hw_ets(&adapter->hw, - adapter->ixgbe_ieee_ets, - max_frame); - ixgbe_dcb_hw_pfc_config(&adapter->hw, - adapter->ixgbe_ieee_pfc->pfc_en, - adapter->ixgbe_ieee_ets->prio_tc); + ixgbe_dcb_config_pfc(&adapter->hw, pfc->pfc_en, tc); + } + } else +#endif /* HAVE_DCBNL_IEEE */ + { + ixgbe_dcb_calculate_tc_credits_cee(hw, + &adapter->dcb_cfg, + max_frame, + IXGBE_DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits_cee(hw, + &adapter->dcb_cfg, + max_frame, + IXGBE_DCB_RX_CONFIG); + ixgbe_dcb_hw_config_cee(hw, &adapter->dcb_cfg); } /* Enable RSS Hash per TC */ @@ -4677,16 +5473,149 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); } } -#endif +#ifndef IXGBE_NO_LLI +static void ixgbe_configure_lli_82599(struct ixgbe_adapter *adapter) +{ + u16 port; + + if (adapter->lli_etype) { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_SIZE_BP_82599 | + IXGBE_IMIR_CTRL_BP_82599)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQS(0), IXGBE_ETQS_LLI); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0), + (adapter->lli_etype | IXGBE_ETQF_FILTER_EN)); + } + + if (adapter->lli_port) { + port = swab16(adapter->lli_port); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_SIZE_BP_82599 | + IXGBE_IMIR_CTRL_BP_82599)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), + (IXGBE_FTQF_POOL_MASK_EN | + (IXGBE_FTQF_PRIORITY_MASK << + IXGBE_FTQF_PRIORITY_SHIFT) | + (IXGBE_FTQF_DEST_PORT_MASK << + IXGBE_FTQF_5TUPLE_MASK_SHIFT))); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SDPQF(0), (port << 16)); + } + + if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_SIZE_BP_82599 | + IXGBE_IMIR_CTRL_PSH_82599 | + IXGBE_IMIR_CTRL_SYN_82599 | + IXGBE_IMIR_CTRL_URG_82599 | + IXGBE_IMIR_CTRL_ACK_82599 | + IXGBE_IMIR_CTRL_RST_82599 | + IXGBE_IMIR_CTRL_FIN_82599)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, + 0xfc000000); + break; + case ixgbe_mac_X540: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_SIZE_BP_82599 | + IXGBE_IMIR_CTRL_PSH_82599)); + break; + default: + break; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), + (IXGBE_FTQF_POOL_MASK_EN | + (IXGBE_FTQF_PRIORITY_MASK << + IXGBE_FTQF_PRIORITY_SHIFT) | + (IXGBE_FTQF_5TUPLE_MASK_MASK << + IXGBE_FTQF_5TUPLE_MASK_SHIFT))); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYNQF, 0x80000100); + } + + if (adapter->lli_size) { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), + (IXGBE_IMIR_LLI_EN_82599 | + IXGBE_IMIR_CTRL_BP_82599)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, + adapter->lli_size); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), + (IXGBE_FTQF_POOL_MASK_EN | + (IXGBE_FTQF_PRIORITY_MASK << + IXGBE_FTQF_PRIORITY_SHIFT) | + (IXGBE_FTQF_5TUPLE_MASK_MASK << + IXGBE_FTQF_5TUPLE_MASK_SHIFT))); + } + + if (adapter->lli_vlan_pri) { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIRVP, + (IXGBE_IMIRVP_PRIORITY_EN | + adapter->lli_vlan_pri)); + } +} + +static void ixgbe_configure_lli(struct ixgbe_adapter *adapter) +{ + u16 port; + + /* lli should only be enabled with MSI-X and MSI */ + if (!(adapter->flags & IXGBE_FLAG_MSI_ENABLED) && + !(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + return; + /* LLI not supported on X550 and X550EM_x*/ + if ((adapter->hw.mac.type == ixgbe_mac_X550) || + (adapter->hw.mac.type == ixgbe_mac_X550EM_x)) + return; + /* LLI not supported on X550EM_a */ + if (adapter->hw.mac.type == ixgbe_mac_X550EM_a) + return; + if (adapter->hw.mac.type != ixgbe_mac_82598EB) { + ixgbe_configure_lli_82599(adapter); + return; + } + + if (adapter->lli_port) { + /* use filter 0 for port */ + port = swab16(adapter->lli_port); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(0), + (port | IXGBE_IMIR_PORT_IM_EN)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(0), + (IXGBE_IMIREXT_SIZE_BP | + IXGBE_IMIREXT_CTRL_BP)); + } + + if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { + /* use filter 1 for push flag */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(1), + (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(1), + (IXGBE_IMIREXT_SIZE_BP | + IXGBE_IMIREXT_CTRL_PSH)); + } + + if (adapter->lli_size) { + /* use filter 2 for size */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(2), + (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(2), + (adapter->lli_size | IXGBE_IMIREXT_CTRL_BP)); + } +} + +#endif /* IXGBE_NO_LLI */ /* Additional bittime to account for IXGBE framing */ #define IXGBE_ETH_FRAMING 20 -/** +/* * ixgbe_hpbthresh - calculate high water mark for flow control * * @adapter: board private structure to calculate for - * @pb: packet buffer to calculate + * @pb - packet buffer to calculate */ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) { @@ -4698,20 +5627,20 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate max LAN frame size */ tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE traffic class uses FCOE jumbo frames */ if ((dev->features & NETIF_F_FCOE_MTU) && (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && - (pb == ixgbe_fcoe_get_tc(adapter))) + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; -#endif +#endif /* CONFIG_FCOE */ /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -4735,7 +5664,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) */ if (marker < 0) { e_warn(drv, "Packet Buffer(%i) can not provide enough" - "headroom to support flow control." + "headroom to suppport flow control." "Decrease MTU or number of traffic classes\n", pb); marker = tc + 1; } @@ -4743,13 +5672,13 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) return marker; } -/** +/* * ixgbe_lpbthresh - calculate low water mark for for flow control * * @adapter: board private structure to calculate for - * @pb: packet buffer to calculate + * @pb - packet buffer to calculate */ -static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) +static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int __maybe_unused pb) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *dev = adapter->netdev; @@ -4759,20 +5688,20 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate max LAN frame size */ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE traffic class uses FCOE jumbo frames */ if ((dev->features & NETIF_F_FCOE_MTU) && (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; -#endif +#endif /* CONFIG_FCOE */ /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -4796,6 +5725,7 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) if (!num_tc) num_tc = 1; + for (i = 0; i < num_tc; i++) { hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); @@ -4805,7 +5735,7 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) hw->fc.low_water[i] = 0; } - for (; i < MAX_TRAFFIC_CLASS; i++) + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) hw->fc.high_water[i] = 0; } @@ -4821,7 +5751,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) else hdrm = 0; - hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); + hw->mac.ops.setup_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); ixgbe_pbthresh_setup(adapter); } @@ -4834,7 +5764,8 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) spin_lock(&adapter->fdir_perfect_lock); if (!hlist_empty(&adapter->fdir_filter_list)) - ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); + ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask, + adapter->cloud_mode); hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { @@ -4843,277 +5774,94 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) filter->sw_idx, (filter->action == IXGBE_FDIR_DROP_QUEUE) ? IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->reg_idx); + adapter->rx_ring[filter->action]->reg_idx, + adapter->cloud_mode); } spin_unlock(&adapter->fdir_perfect_lock); } -static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, - struct ixgbe_adapter *adapter) +static void ixgbe_configure(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 vmolr; - /* No unicast promiscuous support for VMDQ devices. */ - vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); - vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); - - /* clear the affected bit */ - vmolr &= ~IXGBE_VMOLR_MPE; - - if (dev->flags & IFF_ALLMULTI) { - vmolr |= IXGBE_VMOLR_MPE; - } else { - vmolr |= IXGBE_VMOLR_ROMPE; - hw->mac.ops.update_mc_addr_list(hw, dev); - } - ixgbe_write_uc_addr_list(adapter->netdev, pool); - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); -} + ixgbe_configure_pb(adapter); + ixgbe_configure_dcb(adapter); -static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) -{ - struct ixgbe_adapter *adapter = vadapter->real_adapter; - int rss_i = adapter->num_rx_queues_per_pool; - struct ixgbe_hw *hw = &adapter->hw; - u16 pool = vadapter->pool; - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_L2HDR | - IXGBE_PSRTYPE_IPV6HDR; + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter); - if (hw->mac.type == ixgbe_mac_82598EB) - return; + ixgbe_set_rx_mode(adapter->netdev); +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + ixgbe_restore_vlan(adapter); +#endif - if (rss_i > 3) - psrtype |= 2u << 29; - else if (rss_i > 1) - psrtype |= 1u << 29; - - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); -} - -/** - * ixgbe_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from - **/ -static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - - if (rx_buffer->skb) { - struct sk_buff *skb = rx_buffer->skb; - if (IXGBE_CB(skb)->page_released) - dma_unmap_page(dev, - IXGBE_CB(skb)->dma, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - dev_kfree_skb(skb); - rx_buffer->skb = NULL; - } - - if (!rx_buffer->page) - continue; - - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); - - rx_buffer->page = NULL; - } - - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_alloc = 0; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -} - -static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, - struct ixgbe_ring *rx_ring) -{ - struct ixgbe_adapter *adapter = vadapter->real_adapter; - int index = rx_ring->queue_index + vadapter->rx_base_queue; - - /* shutdown specific queue receive and wait for dma to settle */ - ixgbe_disable_rx_queue(adapter, rx_ring); - usleep_range(10000, 20000); - ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); - ixgbe_clean_rx_ring(rx_ring); - rx_ring->l2_accel_priv = NULL; -} - -static int ixgbe_fwd_ring_down(struct net_device *vdev, - struct ixgbe_fwd_adapter *accel) -{ - struct ixgbe_adapter *adapter = accel->real_adapter; - unsigned int rxbase = accel->rx_base_queue; - unsigned int txbase = accel->tx_base_queue; - int i; - - netif_tx_stop_all_queues(vdev); - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); - adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; - } - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; - adapter->tx_ring[txbase + i]->netdev = adapter->netdev; - } - - - return 0; -} - -static int ixgbe_fwd_ring_up(struct net_device *vdev, - struct ixgbe_fwd_adapter *accel) -{ - struct ixgbe_adapter *adapter = accel->real_adapter; - unsigned int rxbase, txbase, queues; - int i, baseq, err = 0; - - if (!test_bit(accel->pool, &adapter->fwd_bitmask)) - return 0; - - baseq = accel->pool * adapter->num_rx_queues_per_pool; - netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", - accel->pool, adapter->num_rx_pools, - baseq, baseq + adapter->num_rx_queues_per_pool, - adapter->fwd_bitmask); - - accel->netdev = vdev; - accel->rx_base_queue = rxbase = baseq; - accel->tx_base_queue = txbase = baseq; - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) - ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->rx_ring[rxbase + i]->netdev = vdev; - adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; - ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); - } - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->tx_ring[txbase + i]->netdev = vdev; - adapter->tx_ring[txbase + i]->l2_accel_priv = accel; - } - - queues = min_t(unsigned int, - adapter->num_rx_queues_per_pool, vdev->num_tx_queues); - err = netif_set_real_num_tx_queues(vdev, queues); - if (err) - goto fwd_queue_err; - - err = netif_set_real_num_rx_queues(vdev, queues); - if (err) - goto fwd_queue_err; - - if (is_valid_ether_addr(vdev->dev_addr)) - ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); - - ixgbe_fwd_psrtype(accel); - ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); - return err; -fwd_queue_err: - ixgbe_fwd_ring_down(vdev, accel); - return err; -} - -static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) -{ - struct net_device *upper; - struct list_head *iter; - int err; - - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; - - if (dfwd->fwd_priv) { - err = ixgbe_fwd_ring_up(upper, vadapter); - if (err) - continue; - } - } - } -} - -static void ixgbe_configure(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - ixgbe_configure_pb(adapter); -#ifdef CONFIG_IXGBE_DCB - ixgbe_configure_dcb(adapter); -#endif - /* - * We must restore virtualization before VLANs or else - * the VLVF registers will not be populated - */ - ixgbe_configure_virtualization(adapter); - ixgbe_set_rx_mode(adapter->netdev); - ixgbe_restore_vlan(adapter); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - hw->mac.ops.disable_rx_buff(hw); - break; - default: - break; - } + if (adapter->hw.mac.type == ixgbe_mac_82599EB || + adapter->hw.mac.type == ixgbe_mac_X540) + hw->mac.ops.disable_sec_rx_path(hw); if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ixgbe_init_fdir_signature_82599(&adapter->hw, adapter->fdir_pballoc); } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { ixgbe_init_fdir_perfect_82599(&adapter->hw, - adapter->fdir_pballoc); + adapter->fdir_pballoc, adapter->cloud_mode); ixgbe_fdir_filter_restore(adapter); } - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - hw->mac.ops.enable_rx_buff(hw); - break; - default: - break; + if (adapter->hw.mac.type == ixgbe_mac_82599EB || + adapter->hw.mac.type == ixgbe_mac_X540) + hw->mac.ops.enable_sec_rx_path(hw); + + /* Enable EEE only when supported and enabled */ + if (hw->mac.ops.setup_eee && + (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) { + bool eee_enable = !!(adapter->flags2 & IXGBE_FLAG2_EEE_ENABLED); + + hw->mac.ops.setup_eee(hw, eee_enable); } -#ifdef CONFIG_IXGBE_DCA + +#if IS_ENABLED(CONFIG_DCA) /* configure DCA */ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) ixgbe_setup_dca(adapter); -#endif /* CONFIG_IXGBE_DCA */ +#endif -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* configure FCoE L2 filters, redirection table, and Rx control */ ixgbe_configure_fcoe(adapter); +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); - ixgbe_configure_dfwd(adapter); +} + +static bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + if (hw->phy.type == ixgbe_phy_nl) + return true; + return false; + case ixgbe_mac_82599EB: + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + return true; + default: + return false; + } + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) + return true; + return false; + default: + return false; + } } /** @@ -5123,7 +5871,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) { /* - * We are assuming the worst case scenario here, and that + * We are assuming the worst case scenerio here, and that * is that an SFP was inserted/removed after the reset * but before SFP detection was enabled. As such the best * solution is to just start searching as soon as we start @@ -5145,27 +5893,68 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { u32 speed; bool autoneg, link_up = false; - int ret = IXGBE_ERR_LINK_SETUP; + u32 ret = IXGBE_ERR_LINK_SETUP; if (hw->mac.ops.check_link) ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); if (ret) - return ret; + goto link_cfg_out; speed = hw->phy.autoneg_advertised; if ((!speed) && (hw->mac.ops.get_link_capabilities)) ret = hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); if (ret) - return ret; + goto link_cfg_out; if (hw->mac.ops.setup_link) ret = hw->mac.ops.setup_link(hw, speed, link_up); - +link_cfg_out: return ret; } +/** + * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset + * @adapter: board private structure + * + * On a reset we need to clear out the VF stats or accounting gets + * messed up because they're not clear on read. + **/ +static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + adapter->vfinfo[i].last_vfstats.gprc = + IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i)); + adapter->vfinfo[i].saved_rst_vfstats.gprc += + adapter->vfinfo[i].vfstats.gprc; + adapter->vfinfo[i].vfstats.gprc = 0; + adapter->vfinfo[i].last_vfstats.gptc = + IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i)); + adapter->vfinfo[i].saved_rst_vfstats.gptc += + adapter->vfinfo[i].vfstats.gptc; + adapter->vfinfo[i].vfstats.gptc = 0; + adapter->vfinfo[i].last_vfstats.gorc = + IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i)); + adapter->vfinfo[i].saved_rst_vfstats.gorc += + adapter->vfinfo[i].vfstats.gorc; + adapter->vfinfo[i].vfstats.gorc = 0; + adapter->vfinfo[i].last_vfstats.gotc = + IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i)); + adapter->vfinfo[i].saved_rst_vfstats.gotc += + adapter->vfinfo[i].vfstats.gotc; + adapter->vfinfo[i].vfstats.gotc = 0; + adapter->vfinfo[i].last_vfstats.mprc = + IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i)); + adapter->vfinfo[i].saved_rst_vfstats.mprc += + adapter->vfinfo[i].vfstats.mprc; + adapter->vfinfo[i].vfstats.mprc = 0; + } +} + static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -5187,7 +5976,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -5199,9 +5988,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); } - /* XXX: to interrupt immediately for EICS writes, enable this */ - /* gpie |= IXGBE_GPIE_EIMEN; */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { gpie &= ~IXGBE_GPIE_VTMODE_MASK; @@ -5219,26 +6005,25 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) } /* Enable Thermal over heat sensor interrupt */ - if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - gpie |= IXGBE_SDP0_GPIEN_8259X; + gpie |= IXGBE_SDP0_GPIEN; break; default: break; } - } /* Enable fan failure interrupt */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - gpie |= IXGBE_SDP1_GPIEN(hw); + gpie |= IXGBE_SDP1_GPIEN; switch (hw->mac.type) { case ixgbe_mac_82599EB: - gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; + gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; break; case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: gpie |= IXGBE_SDP0_GPIEN_X540; break; default: @@ -5265,17 +6050,18 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable the optics for 82599 SFP+ fiber */ if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); - - if (hw->phy.ops.set_phy_power) - hw->phy.ops.set_phy_power(hw, true); + ixgbe_set_phy_power(hw, true); smp_mb__before_atomic(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); +#ifndef IXGBE_NO_LLI + ixgbe_configure_lli(adapter); +#endif if (ixgbe_is_sfp(hw)) { ixgbe_sfp_link_config(adapter); - } else { + } else if (!hw->phy.reset_disable) { err = ixgbe_non_sfp_link_config(hw); if (err) e_err(probe, "link_config FAILED %d\n", err); @@ -5295,12 +6081,16 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) e_crit(drv, "Fan has stopped, replace the adapter\n"); } + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; mod_timer(&adapter->service_timer, jiffies); + ixgbe_clear_vf_stats_counters(adapter); /* Set PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; @@ -5311,7 +6101,11 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) { WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); @@ -5341,10 +6135,12 @@ void ixgbe_up(struct ixgbe_adapter *adapter) void ixgbe_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; +#ifdef HAVE_SET_RX_MODE struct net_device *netdev = adapter->netdev; +#endif int err; - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) return; /* lock SFP init bit to prevent race conditions with the watchdog */ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) @@ -5357,7 +6153,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) err = hw->mac.ops.init_hw(hw); switch (err) { - case 0: + case IXGBE_SUCCESS: case IXGBE_ERR_SFP_NOT_PRESENT: case IXGBE_ERR_SFP_NOT_SUPPORTED: break; @@ -5367,12 +6163,15 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " - "Please be aware there may be issues associated with " - "your hardware. If you are experiencing problems " - "please contact your Intel or hardware " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your Intel or hardware " "representative who provided you with this " "hardware.\n"); break; + case IXGBE_ERR_OVERTEMP: + e_crit(drv, "%s\n", ixgbe_overheat_msg); + break; default: e_dev_err("Hardware Error: %d\n", err); } @@ -5381,7 +6180,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) /* flush entries out of MAC table */ ixgbe_flush_sw_mac_table(adapter); +#ifdef HAVE_SET_RX_MODE __dev_uc_unsync(netdev, NULL); +#endif /* do not flush user set addresses */ ixgbe_mac_set_default_filter(adapter); @@ -5390,15 +6191,107 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) if (hw->mac.san_mac_rar_index) hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + /* Clear saved DMA coalescing values except for watchdog_timer */ + hw->mac.dmac_config.fcoe_en = false; + hw->mac.dmac_config.link_speed = 0; + hw->mac.dmac_config.fcoe_tc = 0; + hw->mac.dmac_config.num_tcs = 0; + +#ifdef HAVE_PTP_1588_CLOCK if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_reset(adapter); +#endif - if (hw->phy.ops.set_phy_power) { - if (!netif_running(adapter->netdev) && !adapter->wol) - hw->phy.ops.set_phy_power(hw, false); - else - hw->phy.ops.set_phy_power(hw, true); + if (!netif_running(adapter->netdev) && !adapter->wol) + ixgbe_set_phy_power(hw, false); + else + ixgbe_set_phy_power(hw, true); +} + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + if (IXGBE_CB(skb)->page_released) + dma_unmap_page_attrs(dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); +#else + /* We need to clean up RSC frag lists */ + skb = ixgbe_merge_active_tail(skb); + if (ixgbe_close_active_frag_list(skb)) + dma_unmap_single(dev, + IXGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + IXGBE_CB(skb)->dma = 0; +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + if (!rx_buffer->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + + rx_buffer->page = NULL; +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + if (!rx_buffer->dma) + continue; + + dma_unmap_single(dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; +#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +#endif } /** @@ -5428,9 +6321,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; } /** @@ -5478,8 +6368,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - struct net_device *upper; - struct list_head *iter; int i; /* signal that we are down to the interrupt handler */ @@ -5487,7 +6375,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) return; /* do nothing if already down */ /* disable receives */ - hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx(hw); /* disable all enabled rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) @@ -5502,25 +6390,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); netif_tx_disable(netdev); - /* disable any upper devices */ - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) { - netif_tx_stop_all_queues(upper); - netif_carrier_off(upper); - netif_tx_disable(upper); - } - } - } - ixgbe_irq_disable(adapter); ixgbe_napi_disable_all(adapter); + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT); clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; del_timer_sync(&adapter->service_timer); @@ -5531,7 +6406,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* Mark all the VFs as inactive */ for (i = 0 ; i < adapter->num_vfs; i++) - adapter->vfinfo[i].clear_to_send = false; + adapter->vfinfo[i].clear_to_send = 0; /* ping all the active vfs to let them know we are going down */ ixgbe_ping_all_vfs(adapter); @@ -5546,13 +6421,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } - /* Disable the Tx DMA engine on 82599 and later MAC */ + /* Disable the Tx DMA engine on 82599 and X540 */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -5561,7 +6436,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) break; } +#ifdef HAVE_PCI_ERS if (!pci_channel_offline(adapter->pdev)) +#endif ixgbe_reset(adapter); /* power down the optics for 82599 SFP+ fiber */ @@ -5573,68 +6450,71 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /** - * ixgbe_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure + * ixgbe_eee_capable - helper function to determine EEE support on X550 + * **/ -static void ixgbe_tx_timeout(struct net_device *netdev) +static inline void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; - /* Do the reset outside of interrupt context */ - ixgbe_tx_timeout_reset(adapter); + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + if (!hw->phy.eee_speeds_supported) + break; + adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; + if (!hw->phy.eee_speeds_advertised) + break; + adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; + break; + default: + adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; + adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; + break; + } } -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; - struct tc_configuration *tc; - int j; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - case ixgbe_mac_82599EB: - adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; - break; - case ixgbe_mac_X540: - case ixgbe_mac_X550: - adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; - break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - default: - adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; - break; - } + struct ixgbe_dcb_tc_config *tc; + int j, bwg_pct; /* Configure DCB traffic classes */ - for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + bwg_pct = 100 / adapter->dcb_cfg.num_tcs.pg_tcs; + for (j = 0; j < adapter->dcb_cfg.num_tcs.pg_tcs; j++) { tc = &adapter->dcb_cfg.tc_config[j]; - tc->path[DCB_TX_CONFIG].bwg_id = 0; - tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); - tc->path[DCB_RX_CONFIG].bwg_id = 0; - tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); - tc->dcb_pfc = pfc_disabled; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + tc->pfc = ixgbe_dcb_pfc_disabled; } - /* Initialize default user to priority mapping, UPx->TC0 */ + /* reset back to TC 0 */ tc = &adapter->dcb_cfg.tc_config[0]; - tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; - tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; - adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + /* total of all TCs bandwidth needs to be 100 */ + bwg_pct += 100 % adapter->dcb_cfg.num_tcs.pg_tcs; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal; adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.round_robin_enable = false; adapter->dcb_set_bitmap = 0x00; if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, sizeof(adapter->temp_dcb_cfg)); } -#endif +#endif /*CONFIG_DCB*/ /** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) @@ -5644,93 +6524,107 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ -static int ixgbe_sw_init(struct ixgbe_adapter *adapter) +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - unsigned int rss, fdir; + int err; + unsigned int fdir; u32 fwsm; - int i; + u16 device_caps; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; - hw->revision_id = pdev->revision; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + if (hw->revision_id == IXGBE_FAILED_READ_CFG_BYTE && + ixgbe_check_cfg_remove(hw, pdev)) { + e_err(probe, "read of revision id failed\n"); + err = -ENODEV; + goto out; + } hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; + err = ixgbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) { + err = IXGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } + + if (ixgbe_init_rss_key(adapter)) { + err = IXGBE_ERR_OUT_OF_MEM; + e_err(probe, "rss_key allocation failed: %d\n", err); + goto out; + } + /* Set common capability flags and settings */ - rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); - adapter->ring_feature[RING_F_RSS].limit = rss; - adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->max_q_vectors = MAX_Q_VECTORS_82599; - adapter->atr_sample_rate = 20; - fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); - adapter->ring_feature[RING_F_FDIR].limit = fdir; - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; #endif -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; #endif -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) /* Default traffic class to use for FCoE */ - adapter->fcoe.up = IXGBE_FCOE_DEFTC; -#endif /* CONFIG_IXGBE_DCB */ -#endif /* IXGBE_FCOE */ - - /* initialize static ixgbe jump table entries */ - adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), - GFP_KERNEL); - if (!adapter->jump_tables[0]) - return -ENOMEM; - adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; - - for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) - adapter->jump_tables[i] = NULL; - - adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * - hw->mac.num_rar_entries, - GFP_ATOMIC); - if (!adapter->mac_table) - return -ENOMEM; + adapter->fcoe.up = IXGBE_FCOE_DEFUP; + adapter->fcoe.up_set = IXGBE_FCOE_DEFUP; +#endif /* CONFIG_DCB */ +#endif /* CONFIG_FCOE */ + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599; /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: + adapter->flags |= IXGBE_FLAGS_82598_INIT; adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; - adapter->max_q_vectors = MAX_Q_VECTORS_82598; + adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598; adapter->ring_feature[RING_F_FDIR].limit = 0; - adapter->atr_sample_rate = 0; - adapter->fdir_pballoc = 0; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) adapter->fcoe.up = 0; + adapter->fcoe.up_set = 0; #endif /* IXGBE_DCB */ -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ break; case ixgbe_mac_82599EB: + adapter->flags |= IXGBE_FLAGS_82599_INIT; if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; +#ifndef IXGBE_NO_SMART_SPEED + hw->phy.smart_speed = ixgbe_smart_speed_on; +#else + hw->phy.smart_speed = ixgbe_smart_speed_off; +#endif break; case ixgbe_mac_X540: - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + adapter->flags |= IXGBE_FLAGS_X540_INIT; + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: @@ -5740,68 +6634,78 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) default: break; } - /* fall through */ + /* fall through */ case ixgbe_mac_X550EM_x: -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; #endif -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) adapter->fcoe.up = 0; -#endif /* IXGBE_DCB */ -#endif /* IXGBE_FCOE */ - /* Fall Through */ + adapter->fcoe.up_set = 0; +#endif /* CONFIG_DCB */ +#endif /* CONFIG_FCOE */ + /* fall through */ case ixgbe_mac_X550: if (hw->mac.type == ixgbe_mac_X550) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; -#ifdef CONFIG_IXGBE_DCA + ixgbe_set_eee_capable(adapter); + adapter->flags |= IXGBE_FLAGS_X550_INIT; +#if IS_ENABLED(CONFIG_DCA) adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; -#endif - adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; - break; +#endif /* CONFIG_DCA */ default: break; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* FCoE support exists, always init the FCoE lock */ spin_lock_init(&adapter->fcoe.lock); +#endif /* CONFIG_FCOE */ -#endif /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); -#ifdef CONFIG_IXGBE_DCB +#if IS_ENABLED(CONFIG_DCB) + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + adapter->dcb_cfg.num_tcs.pg_tcs = 8; + adapter->dcb_cfg.num_tcs.pfc_tcs = 8; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + default: + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + break; + } ixgbe_init_dcb(adapter); -#endif + +#endif /* CONFIG_DCB */ + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a || + hw->mac.type == ixgbe_mac_X540) + hw->mbx.ops.init_params(hw); /* default flow control settings */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; - hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); - -#ifdef CONFIG_PCI_IOV - if (max_vfs > 0) - e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); - - /* assign number of SR-IOV VFs */ - if (hw->mac.type != ixgbe_mac_82598EB) { - if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { - adapter->num_vfs = 0; - e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); - } else { - adapter->num_vfs = max_vfs; - } - } -#endif /* CONFIG_PCI_IOV */ - - /* enable itr by default in dynamic mode */ - adapter->rx_itr_setting = 1; - adapter->tx_itr_setting = 1; + hw->fc.disable_fc_autoneg = false; /* set default ring sizes */ adapter->tx_ring_count = IXGBE_DEFAULT_TXD; @@ -5809,18 +6713,26 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; + adapter->rx_work_limit = IXGBE_DEFAULT_RX_WORK; - /* initialize eeprom parameters */ - if (ixgbe_init_eeprom_params_generic(hw)) { - e_dev_err("EEPROM initialization failed\n"); - return -EIO; + /* Cache bit indicating need for crosstalk fix */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) + adapter->need_crosstalk_fix = false; + else + adapter->need_crosstalk_fix = true; + break; + default: + adapter->need_crosstalk_fix = false; + break; } - - /* PF holds first pool slot */ - set_bit(0, &adapter->fwd_bitmask); set_bit(__IXGBE_DOWN, &adapter->state); - - return 0; +out: + return err; } /** @@ -5833,27 +6745,25 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { struct device *dev = tx_ring->dev; int orig_node = dev_to_node(dev); - int ring_node = -1; + int numa_node = -1; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; if (tx_ring->q_vector) - ring_node = tx_ring->q_vector->numa_node; + numa_node = tx_ring->q_vector->numa_node; - tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); if (!tx_ring->tx_buffer_info) tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err; - u64_stats_init(&tx_ring->syncp); - /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - set_dev_node(dev, ring_node); + set_dev_node(dev, numa_node); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, @@ -5865,8 +6775,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) if (!tx_ring->desc) goto err; - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; return 0; err: @@ -5891,6 +6799,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; @@ -5917,27 +6826,25 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); - int ring_node = -1; + int numa_node = -1; int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; if (rx_ring->q_vector) - ring_node = rx_ring->q_vector->numa_node; + numa_node = rx_ring->q_vector->numa_node; - rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) goto err; - u64_stats_init(&rx_ring->syncp); - /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - set_dev_node(dev, ring_node); + set_dev_node(dev, numa_node); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, @@ -5949,9 +6856,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) if (!rx_ring->desc) goto err; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - return 0; err: vfree(rx_ring->rx_buffer_info); @@ -5976,14 +6880,15 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); - if (!err) + if (!err) { continue; + } e_err(probe, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) err = ixgbe_setup_fcoe_ddp_resources(adapter); if (!err) #endif @@ -6014,7 +6919,6 @@ void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); - tx_ring->desc = NULL; } @@ -6029,8 +6933,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - if (adapter->tx_ring[i]->desc) - ixgbe_free_tx_resources(adapter->tx_ring[i]); + ixgbe_free_tx_resources(adapter->tx_ring[i]); } /** @@ -6066,13 +6969,12 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) { int i; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) ixgbe_free_fcoe_ddp_resources(adapter); - #endif + for (i = 0; i < adapter->num_rx_queues; i++) - if (adapter->rx_ring[i]->desc) - ixgbe_free_rx_resources(adapter->rx_ring[i]); + ixgbe_free_rx_resources(adapter->rx_ring[i]); } /** @@ -6085,12 +6987,16 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; +#endif +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU /* MTU < 68 is an error and causes problems on some kernels */ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) return -EINVAL; +#endif /* * For 82599EB we cannot allow legacy VFs to enable their receive * paths when MTU greater than 1500 is configured. So display a @@ -6098,7 +7004,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (adapter->hw.mac.type == ixgbe_mac_82599EB) && +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) +#else + (new_mtu > ETH_DATA_LEN)) +#endif e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@ -6124,67 +7034,68 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -int ixgbe_open(struct net_device *netdev) +static int ixgbe_open(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int err, queues; - + int err; + /* disallow open during test */ if (test_bit(__IXGBE_TESTING, &adapter->state)) return -EBUSY; - + netif_carrier_off(netdev); - + /* allocate transmit descriptors */ err = ixgbe_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; - + /* allocate receive descriptors */ err = ixgbe_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; - + ixgbe_configure(adapter); + err = ixgbe_request_irq(adapter); if (err) goto err_req_irq; - - /* Notify the stack of the actual queue counts. */ - if (adapter->num_rx_pools > 1) - queues = adapter->num_rx_queues_per_pool; - else - queues = adapter->num_tx_queues; - err = netif_set_real_num_tx_queues(netdev, queues); + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_tx_queues); if (err) goto err_set_queues; - if (adapter->num_rx_pools > 1 && - adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) - queues = IXGBE_MAX_L2A_QUEUES; - else - queues = adapter->num_rx_queues; - err = netif_set_real_num_rx_queues(netdev, queues); + err = netif_set_real_num_rx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_rx_queues); if (err) goto err_set_queues; +#ifdef HAVE_PTP_1588_CLOCK ixgbe_ptp_init(adapter); +#endif /* HAVE_PTP_1588_CLOCK*/ ixgbe_up_complete(adapter); +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); +#endif +#ifdef HAVE_UDP_ENC_RX_OFFLOAD udp_tunnel_get_rx_info(netdev); - - return 0; +#elif defined(HAVE_VXLAN_RX_OFFLOAD) + vxlan_get_rx_port(netdev); +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + return IXGBE_SUCCESS; err_set_queues: ixgbe_free_irq(adapter); err_req_irq: ixgbe_free_all_rx_resources(adapter); - if (hw->phy.ops.set_phy_power && !adapter->wol) - hw->phy.ops.set_phy_power(&adapter->hw, false); + if (!adapter->wol) + ixgbe_set_phy_power(&adapter->hw, false); err_setup_rx: ixgbe_free_all_tx_resources(adapter); err_setup_tx: @@ -6193,23 +7104,31 @@ int ixgbe_open(struct net_device *netdev) return err; } +/** + * ixgbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) { +#ifdef HAVE_PTP_1588_CLOCK ixgbe_ptp_suspend(adapter); +#endif if (adapter->hw.phy.ops.enter_lplu) { adapter->hw.phy.reset_disable = true; ixgbe_down(adapter); - adapter->hw.phy.ops.enter_lplu(&adapter->hw); + ixgbe_enter_lplu(&adapter->hw); adapter->hw.phy.reset_disable = false; } else { ixgbe_down(adapter); } - ixgbe_free_irq(adapter); - ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); + ixgbe_free_all_tx_resources(adapter); } /** @@ -6223,11 +7142,13 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ -int ixgbe_close(struct net_device *netdev) +static int ixgbe_close(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_PTP_1588_CLOCK ixgbe_ptp_stop(adapter); +#endif if (netif_device_present(netdev)) ixgbe_close_suspend(adapter); @@ -6240,12 +7161,21 @@ int ixgbe_close(struct net_device *netdev) } #ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static int ixgbe_resume(struct device *dev) +#else static int ixgbe_resume(struct pci_dev *pdev) +#endif /* USE_LEGACY_PM_SUPPORT */ { - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; + struct ixgbe_adapter *adapter; + struct net_device *netdev; u32 err; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; adapter->hw.hw_addr = adapter->io_addr; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); @@ -6271,6 +7201,7 @@ static int ixgbe_resume(struct pci_dev *pdev) IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); rtnl_lock(); + err = ixgbe_init_interrupt_scheme(adapter); if (!err && netif_running(netdev)) err = ixgbe_open(netdev); @@ -6278,12 +7209,82 @@ static int ixgbe_resume(struct pci_dev *pdev) if (!err) netif_device_attach(netdev); + rtnl_unlock(); return err; } + +#ifndef USE_LEGACY_PM_SUPPORT +/** + * ixgbe_freeze - quiesce the device (no IRQ's or DMA) + * @dev: The port's netdev + */ +static int ixgbe_freeze(struct device *dev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + bool lplu_enabled = !!adapter->hw.phy.ops.enter_lplu; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) { + if (lplu_enabled) { + adapter->hw.phy.reset_disable = true; + ixgbe_down(adapter); + adapter->hw.phy.reset_disable = false; + } else { + ixgbe_down(adapter); + } + ixgbe_free_irq(adapter); + } + + ixgbe_reset_interrupt_capability(adapter); + rtnl_unlock(); + + return 0; +} + +/** + * ixgbe_thaw - un-quiesce the device + * @dev: The port's netdev + */ +static int ixgbe_thaw(struct device *dev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + bool lplu_enabled = !!adapter->hw.phy.ops.enter_lplu; + + ixgbe_set_interrupt_capability(adapter); + + if (netif_running(netdev)) { + u32 err = ixgbe_request_irq(adapter); + if (err) + return err; + + if (lplu_enabled) { + adapter->hw.phy.reset_disable = true; + ixgbe_up(adapter); + adapter->hw.phy.reset_disable = false; + } else { + ixgbe_up(adapter); + } + } + + netif_device_attach(netdev); + + return 0; +} +#endif /* USE_LEGACY_PM_SUPPORT */ #endif /* CONFIG_PM */ +/* + * __ixgbe_shutdown is not used when power management + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); @@ -6310,8 +7311,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return retval; #endif - if (hw->mac.ops.stop_link_on_d3) - hw->mac.ops.stop_link_on_d3(hw); + + /* this won't stop link of managebility or WoL is enabled */ + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_stop_mac_link_on_d3_82599(hw); if (wufc) { ixgbe_set_rx_mode(netdev); @@ -6345,7 +7348,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -6353,8 +7356,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) } *enable_wake = !!wufc; - if (hw->phy.ops.set_phy_power && !*enable_wake) - hw->phy.ops.set_phy_power(hw, false); + if (!*enable_wake) + ixgbe_set_phy_power(hw, false); ixgbe_release_hw_control(adapter); @@ -6363,12 +7366,21 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return 0; } +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ #ifdef CONFIG_PM -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +#ifndef USE_LEGACY_PM_SUPPORT +static int ixgbe_suspend(struct device *dev) +#else +static int ixgbe_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +#endif /* USE_LEGACY_PM_SUPPORT */ { int retval; bool wake; +#ifndef USE_LEGACY_PM_SUPPORT + struct pci_dev *pdev = to_pci_dev(dev); +#endif retval = __ixgbe_shutdown(pdev, &wake); if (retval) @@ -6385,6 +7397,7 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) } #endif /* CONFIG_PM */ +#ifndef USE_REBOOT_NOTIFIER static void ixgbe_shutdown(struct pci_dev *pdev) { bool wake; @@ -6397,13 +7410,105 @@ static void ixgbe_shutdown(struct pci_dev *pdev) } } +#endif +#ifdef HAVE_NDO_GET_STATS64 +/** + * ixgbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces ixgbe_get_stats for kernels which support it. + */ +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} +#else +/** + * ixgbe_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* update the stats data */ + ixgbe_update_stats(adapter); + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} +#endif /** * ixgbe_update_stats - Update the board statistics counters. * @adapter: board private structure **/ void ixgbe_update_stats(struct ixgbe_adapter *adapter) { - struct net_device *netdev = adapter->netdev; +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u64 total_mpc = 0; @@ -6435,13 +7540,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hw_csum_rx_error += rx_ring->rx_stats.csum_err; bytes += rx_ring->stats.bytes; packets += rx_ring->stats.packets; + } adapter->non_eop_descs = non_eop_descs; adapter->alloc_rx_page_failed = alloc_rx_page_failed; adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; adapter->hw_csum_rx_error = hw_csum_rx_error; - netdev->stats.rx_bytes = bytes; - netdev->stats.rx_packets = packets; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; bytes = 0; packets = 0; @@ -6455,8 +7561,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } adapter->restart_queue = restart_queue; adapter->tx_busy = tx_busy; - netdev->stats.tx_bytes = bytes; - netdev->stats.tx_packets = packets; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); @@ -6481,7 +7587,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -6495,10 +7601,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540) || (hw->mac.type == ixgbe_mac_X550) || (hw->mac.type == ixgbe_mac_X550EM_x) || - (hw->mac.type == ixgbe_mac_x550em_a)) { + (hw->mac.type == ixgbe_mac_X550EM_a) || + (hw->mac.type == ixgbe_mac_X540)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -6523,12 +7629,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - /* OS2BMC stats are X540 and later */ + case ixgbe_mac_X550EM_a: + /* OS2BMC stats are X540 only*/ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); + /* fall through */ case ixgbe_mac_82599EB: for (i = 0; i < 16; i++) adapter->hw_rx_no_dma_resources += @@ -6540,16 +7647,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); +#ifdef HAVE_TX_MQ hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); -#ifdef IXGBE_FCOE +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hwstats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); - /* Add up per cpu counters for total ddp aloc fail */ + /* Add up per cpu counters for total ddp alloc fail */ if (adapter->fcoe.ddp_pool) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe_ddp_pool *ddp_pool; @@ -6563,7 +7673,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->fcoe_noddp = noddp; hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; } -#endif /* IXGBE_FCOE */ + +#endif /* CONFIG_FCOE */ break; default: break; @@ -6606,21 +7717,49 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); - /* Fill out the OS statistics structure */ - netdev->stats.multicast = hwstats->mprc; + net_stats->multicast = hwstats->mprc; /* Rx Errors */ - netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; - netdev->stats.rx_dropped = 0; - netdev->stats.rx_length_errors = hwstats->rlec; - netdev->stats.rx_crc_errors = hwstats->crcerrs; - netdev->stats.rx_missed_errors = total_mpc; + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + net_stats->rx_missed_errors = total_mpc; + + /* + * VF Stats Collection - skip while resetting because these + * are not clear on read and otherwise you'll sometimes get + * crazy values. + */ + if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), \ + adapter->vfinfo[i].last_vfstats.gprc, \ + adapter->vfinfo[i].vfstats.gprc); + UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), \ + adapter->vfinfo[i].last_vfstats.gptc, \ + adapter->vfinfo[i].vfstats.gptc); + UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), \ + IXGBE_PVFGORC_MSB(i), \ + adapter->vfinfo[i].last_vfstats.gorc, \ + adapter->vfinfo[i].vfstats.gorc); + UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), \ + IXGBE_PVFGOTC_MSB(i), \ + adapter->vfinfo[i].last_vfstats.gotc, \ + adapter->vfinfo[i].vfstats.gotc); + UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), \ + adapter->vfinfo[i].last_vfstats.mprc, \ + adapter->vfinfo[i].vfstats.mprc); + } + } } +#ifdef HAVE_TX_MQ /** * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table - * @adapter: pointer to the device adapter structure + * @adapter - pointer to the device adapter structure **/ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) { @@ -6642,7 +7781,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) adapter->fdir_overflow++; - if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { + if (ixgbe_reinit_fdir_tables_82599(hw) == IXGBE_SUCCESS) { for (i = 0; i < adapter->num_tx_queues; i++) set_bit(__IXGBE_TX_FDIR_INIT_DONE, &(adapter->tx_ring[i]->state)); @@ -6654,9 +7793,10 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) } } +#endif /* HAVE_TX_MQ */ /** * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts - * @adapter: pointer to the device adapter structure + * @adapter - pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the @@ -6669,9 +7809,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) u64 eics = 0; int i; - /* If we're down, removing or resetting, just bail */ + /* If we're down or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_REMOVE, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; @@ -6694,7 +7834,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= BIT_ULL(i); + eics |= ((u64)1 << i); } } @@ -6704,8 +7844,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_update_link - update the link status - * @adapter: pointer to the device adapter structure - * @link_speed: pointer to a u32 to store the link_speed + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed **/ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) { @@ -6725,9 +7865,37 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) link_up = true; } + /* If Crosstalk fix enabled do the sanity check of making sure + * the SFP+ cage is empty. + */ + if (adapter->need_crosstalk_fix) { + u32 sfp_cage_full; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP0; + break; + default: + /* Non-SFP+ system - sanity check */ + sfp_cage_full = false; + break; + } + + if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) + link_up = false; + } + +#ifdef HAVE_DCBNL_IEEE if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); +#endif if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { hw->mac.ops.fc_enable(hw); ixgbe_set_rx_drop_en(adapter); @@ -6743,36 +7911,58 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) adapter->link_up = link_up; adapter->link_speed = link_speed; + if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { + u8 num_tcs = netdev_get_num_tc(adapter->netdev); +#if IS_ENABLED(CONFIG_FCOE) + u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); + bool fcoe_en = !!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED); +#endif /* CONFIG_FCOE */ + + if (hw->mac.dmac_config.link_speed != link_speed || +#if IS_ENABLED(CONFIG_FCOE) + hw->mac.dmac_config.fcoe_tc != fcoe_tc || + hw->mac.dmac_config.fcoe_en != fcoe_en || +#endif /* CONFIG_FCOE */ + hw->mac.dmac_config.num_tcs != num_tcs) { + hw->mac.dmac_config.link_speed = link_speed; + hw->mac.dmac_config.num_tcs = num_tcs; +#if IS_ENABLED(CONFIG_FCOE) + hw->mac.dmac_config.fcoe_en = fcoe_en; + hw->mac.dmac_config.fcoe_tc = fcoe_tc; +#endif /* CONFIG_FCOE */ + hw->mac.ops.dmac_config(hw); + } + } } static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) { -#ifdef CONFIG_IXGBE_DCB + u8 up = 0; +#ifdef HAVE_DCBNL_IEEE struct net_device *netdev = adapter->netdev; struct dcb_app app = { - .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, + .selector = DCB_APP_IDTYPE_ETHTYPE, .protocol = 0, }; - u8 up = 0; - - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) - up = dcb_ieee_getapp_mask(netdev, &app); + up = dcb_getapp(netdev, &app); +#endif +#if IS_ENABLED(CONFIG_FCOE) adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; +#else + adapter->default_up = up; #endif } /** * ixgbe_watchdog_link_is_up - update netif_carrier status and * print link up message - * @adapter: pointer to the device adapter structure + * @adapter - pointer to the device adapter structure **/ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - struct net_device *upper; - struct list_head *iter; u32 link_speed = adapter->link_speed; const char *speed_str; bool flow_rx, flow_tx; @@ -6791,11 +7981,11 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); } break; - case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - case ixgbe_mac_82599EB: { + case ixgbe_mac_X550EM_a: + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); @@ -6808,15 +7998,20 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) break; } +#ifdef HAVE_PTP_1588_CLOCK adapter->last_rx_ptp_check = jiffies; if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); +#endif switch (link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: speed_str = "10 Gbps"; break; + case IXGBE_LINK_SPEED_5GB_FULL: + speed_str = "5 Gbps"; + break; case IXGBE_LINK_SPEED_2_5GB_FULL: speed_str = "2.5 Gbps"; break; @@ -6839,22 +8034,15 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) (flow_tx ? "TX" : "None")))); netif_carrier_on(netdev); +#ifdef IFLA_VF_MAX ixgbe_check_vf_rate_limit(adapter); +#endif /* IFLA_VF_MAX */ + /* Turn on malicious driver detection */ + if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && + (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) + hw->mac.ops.enable_mdd(hw); - /* enable transmits */ - netif_tx_wake_all_queues(adapter->netdev); - - /* enable any upper devices */ - rtnl_lock(); - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) - netif_tx_wake_all_queues(upper); - } - } - rtnl_unlock(); + netif_tx_wake_all_queues(netdev); /* update the default user priority for VFs */ ixgbe_update_default_up(adapter); @@ -6866,7 +8054,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_link_is_down - update netif_carrier status and * print link down message - * @adapter: pointer to the adapter structure + * @adapter - pointer to the adapter structure **/ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) { @@ -6884,11 +8072,14 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; +#ifdef HAVE_PTP_1588_CLOCK if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); +#endif e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); /* ping all the active vfs to let them know link has changed */ ixgbe_ping_all_vfs(adapter); @@ -6919,16 +8110,15 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) if (!adapter->num_vfs) return false; - /* resetting the PF is only needed for MAC before X550 */ + /* resetting the PF is only needed for MACs < X550 */ if (hw->mac.type >= ixgbe_mac_X550) return false; - for (i = 0; i < adapter->num_vfs; i++) { for (j = 0; j < q_per_pool; j++) { u32 h, t; - h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); - t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); + h = IXGBE_READ_REG(hw, IXGBE_PVFTDHn(q_per_pool, i, j)); + t = IXGBE_READ_REG(hw, IXGBE_PVFTDTn(q_per_pool, i, j)); if (h != t) return true; @@ -6940,7 +8130,7 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_flush_tx - flush queues on link down - * @adapter: pointer to the device adapter structure + * @adapter - pointer to the device adapter structure **/ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) { @@ -6952,7 +8142,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ - e_warn(drv, "initiating reset to clear Tx work after link loss\n"); + e_warn(drv, "initiating reset due to lost link with pending Tx work\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); } } @@ -6962,12 +8152,29 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, struct pci_dev *vfdev) { - if (!pci_wait_for_pending_transaction(vfdev)) - e_dev_warn("Issuing VFLR with pending transactions\n"); + int pos, i; + u16 status; - e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); - pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + /* wait for pending transactions on the bus */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + e_dev_warn("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); + if (!pos) + return; + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); msleep(100); } @@ -6984,7 +8191,8 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); if (gpc) /* If incrementing then no need for the check below */ return; - /* Check to see if a bad DMA write target from an errant or + /* + * Check to see if a bad DMA write target from an errant or * malicious VF has caused a PCIe error. If so then we can * issue a VFLR to the offending VF(s) and then resume without * requesting a full slot reset. @@ -7025,29 +8233,20 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) if (!ssvpc) return; - e_warn(drv, "%u Spoofed packets detected\n", ssvpc); -} -#else -static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) -{ + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); } -static void -ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) -{ -} #endif /* CONFIG_PCI_IOV */ - /** * ixgbe_watchdog_subtask - check and bring link up - * @adapter: pointer to the device adapter structure + * @adapter - pointer to the device adapter structure **/ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) { - /* if interface is down, removing or resetting, do nothing */ + /* if interface is down do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_REMOVE, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; @@ -7057,9 +8256,10 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) ixgbe_watchdog_link_is_up(adapter); else ixgbe_watchdog_link_is_down(adapter); - - ixgbe_check_for_bad_vf(adapter); +#ifdef CONFIG_PCI_IOV ixgbe_spoof_check(adapter); + ixgbe_check_for_bad_vf(adapter); +#endif /* CONFIG_PCI_IOV */ ixgbe_update_stats(adapter); ixgbe_watchdog_flush_tx(adapter); @@ -7067,13 +8267,36 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) /** * ixgbe_sfp_detection_subtask - poll for SFP+ cable - * @adapter: the ixgbe adapter structure + * @adapter - the ixgbe adapter structure **/ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; s32 err; + /* If crosstalk fix enabled verify the SFP+ cage is full */ + if (adapter->need_crosstalk_fix) { + u32 sfp_cage_full; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP0; + break; + default: + /* Non-SFP+ system - sanity check */ + sfp_cage_full = false; + break; + } + if (!sfp_cage_full) + return; + } + /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) @@ -7081,7 +8304,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) if (adapter->sfp_poll_time && time_after(adapter->sfp_poll_time, jiffies)) - return; /* If not yet time to poll for SFP */ + return; /* If not yet time to poll for SFP */ /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) @@ -7129,18 +8352,19 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && - (adapter->netdev->reg_state == NETREG_REGISTERED)) { + adapter->netdev_registered) { e_dev_err("failed to initialize because an unsupported " "SFP+ module type was detected.\n"); e_dev_err("Reload the driver after installing a " "supported module.\n"); unregister_netdev(adapter->netdev); + adapter->netdev_registered = false; } } /** * ixgbe_sfp_link_config_subtask - set up link SFP after module install - * @adapter: the ixgbe adapter structure + * @adapter - the ixgbe adapter structure **/ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { @@ -7160,7 +8384,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) speed = hw->phy.autoneg_advertised; if ((!speed) && (hw->mac.ops.get_link_capabilities)) { hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); - /* setup the highest link when no autoneg */ if (!autoneg) { if (speed & IXGBE_LINK_SPEED_10GB_FULL) @@ -7199,21 +8422,14 @@ static void ixgbe_service_timer(unsigned long data) static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; u32 status; if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) return; - adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; - - if (!hw->phy.ops.handle_lasi) - return; - - status = hw->phy.ops.handle_lasi(&adapter->hw); + status = ixgbe_handle_lasi(&adapter->hw); if (status != IXGBE_ERR_OVERTEMP) return; - e_crit(drv, "%s\n", ixgbe_overheat_msg); } @@ -7222,13 +8438,12 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) return; - /* If we're already down, removing or resetting, just bail */ + /* If we're already down or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_REMOVE, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; - ixgbe_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count++; @@ -7246,7 +8461,7 @@ static void ixgbe_service_task(struct work_struct *work) struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, service_task); - if (ixgbe_removed(adapter->hw.hw_addr)) { + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { if (!test_bit(__IXGBE_DOWN, &adapter->state)) { rtnl_lock(); ixgbe_down(adapter); @@ -7255,29 +8470,41 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; +#ifdef HAVE_UDP_ENC_RX_OFFLOAD udp_tunnel_get_rx_info(adapter->netdev); +#else + vxlan_get_rx_port(adapter->netdev); +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ rtnl_unlock(); } +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); ixgbe_check_overtemp_subtask(adapter); ixgbe_watchdog_subtask(adapter); +#ifdef HAVE_TX_MQ ixgbe_fdir_reinit_subtask(adapter); +#endif ixgbe_check_hang_subtask(adapter); - +#ifdef HAVE_PTP_1588_CLOCK if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); - ixgbe_ptp_rx_hang(adapter); + if (unlikely(adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) + ixgbe_ptp_rx_hang(adapter); + ixgbe_ptp_tx_hang(adapter); } +#endif /* HAVE_PTP_1588_CLOCK */ ixgbe_service_event_complete(adapter); } +#ifdef NETIF_F_GSO_PARTIAL static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len) @@ -7314,15 +8541,11 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* initialize outer IP header fields */ if (ip.v4->version == 4) { - unsigned char *csum_start = skb_checksum_start(skb); - unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); - /* IP header will have to cancel out any data that * is not a part of the outer IP header */ - ip.v4->check = csum_fold(csum_partial(trans_start, - csum_start - trans_start, - 0)); + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; @@ -7420,13 +8643,232 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); } +#else +static int ixgbe_tso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len) +{ +#ifndef NETIF_F_TSO + return 0; +#else + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + + if (first->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; +#ifdef NETIF_F_TSO6 + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; +#endif /* NETIF_F_TSO6 */ + } + + /* compute header lengths */ + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, + mss_l4len_idx); + + return 1; +#endif /* !NETIF_F_TSO */ +} + +static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & IXGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; + } else { + u8 l4_hdr = 0; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + __be16 frag_off; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_inner_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; + } + + /* use first 4 bits to determine IP version */ + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; + l4_hdr = network_hdr.ipv6->nexthdr; + if (likely((transport_hdr.raw - network_hdr.raw) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; + break; + default: + break; + } + +#else /* HAVE_ENCAP_TSO_OFFLOAD */ + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; +#ifdef NETIF_F_IPV6_CSUM + case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; +#endif /* NETIF_F_IPV6_CSUM */ + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + first->protocol); + } + break; + } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; +#ifdef HAVE_ENCAP_TSO_OFFLOAD + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + IXGBE_ADVTXD_L4LEN_SHIFT; +#else + mss_l4len_idx = tcp_hdrlen(skb) << + IXGBE_ADVTXD_L4LEN_SHIFT; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; +#endif /* HAVE_SCTP */ + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + default: +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum, version=%d, l4 proto=%x\n", + network_hdr.ipv4->version, l4_hdr); + } + skb_checksum_help(skb); + goto no_csum; +#else + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + break; + } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + } + +#ifdef HAVE_ENCAP_TSO_OFFLOAD +no_csum: +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + /* vlan_macip_lens: MACLEN, VLAN tag */ +#ifndef HAVE_ENCAP_TSO_OFFLOAD + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; +#endif /* !HAVE_ENCAP_TSO_OFFLOAD */ + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, + type_tucmd, mss_l4len_idx); +} +#endif /* NETIF_F_GSO_PARTIAL */ #define IXGBE_SET_FLAG(_input, _flag, _result) \ ((_flag <= _result) ? \ ((u32)(_input & _flag) * (_result / _flag)) : \ ((u32)(_input & _flag) / (_flag / _result))) -static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +static u32 ixgbe_tx_cmd_type(u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | @@ -7445,9 +8887,6 @@ static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, IXGBE_ADVTXD_MAC_TSTAMP); - /* insert frame checksum */ - cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); - return cmd_type; } @@ -7510,9 +8949,9 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) -static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - const u8 hdr_len) +static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + const u8 hdr_len) { struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; @@ -7521,7 +8960,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; - u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); + u32 cmd_type = ixgbe_tx_cmd_type(tx_flags); u16 i = tx_ring->next_to_use; tx_desc = IXGBE_TX_DESC(tx_ring, i); @@ -7531,7 +8970,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, size = skb_headlen(skb); data_len = skb->data_len; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (tx_flags & IXGBE_TX_FLAGS_FCOE) { if (data_len < sizeof(struct fcoe_crc_eof)) { size -= sizeof(struct fcoe_crc_eof) - data_len; @@ -7540,8 +8979,8 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, data_len -= sizeof(struct fcoe_crc_eof); } } +#endif /* CONFIG_FCOE */ -#endif dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); tx_buffer = first; @@ -7587,7 +9026,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, } tx_desc->read.olinfo_status = 0; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) size = min_t(unsigned int, data_len, skb_frag_size(frag)); #else size = skb_frag_size(frag); @@ -7609,6 +9048,9 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = first->time_stamp; +#endif /* * Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered @@ -7630,6 +9072,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); +#ifdef HAVE_SKB_XMIT_MORE if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); @@ -7638,10 +9081,20 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, */ mmiowb(); } +#else + /* notify HW of packet */ + writel(i, tx_ring->tail); - return; + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +#endif /* HAVE_SKB_XMIT_MORE */ + + return 0; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; /* clear dma mappings for failed tx_buffer_info map */ for (;;) { @@ -7655,6 +9108,8 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, } tx_ring->next_to_use = i; + + return -1; } static void ixgbe_atr(struct ixgbe_ring *ring, @@ -7669,10 +9124,15 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct ipv6hdr *ipv6; } hdr; struct tcphdr *th; - unsigned int hlen; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) struct sk_buff *skb; +#else +#define IXGBE_NO_VXLAN +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef IXGBE_NO_VXLAN + unsigned int hlen; +#endif /* IXGBE_NO_VXLAN */ __be16 vlan_id; - int l4_proto; /* if ring doesn't have a interrupt vector, cannot perform ATR */ if (!q_vector) @@ -7684,52 +9144,85 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ring->atr_count++; - /* currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != htons(ETH_P_IP)) && - (first->protocol != htons(ETH_P_IPV6))) - return; - /* snag network header to get L4 type and address */ +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) skb = first->skb; hdr.network = skb_network_header(skb); + th = tcp_hdr(skb); + if (skb->encapsulation && first->protocol == htons(ETH_P_IP) && - hdr.ipv4->protocol != IPPROTO_UDP) { + hdr.ipv4->protocol == IPPROTO_UDP) { struct ixgbe_adapter *adapter = q_vector->adapter; - /* verify the port is recognized as VXLAN */ + /* verify the port is recognized as VXLAN or GENEVE*/ if (adapter->vxlan_port && - udp_hdr(skb)->dest == adapter->vxlan_port) + udp_hdr(skb)->dest == adapter->vxlan_port) { hdr.network = skb_inner_network_header(skb); + th = inner_tcp_hdr(skb); + } +#ifdef HAVE_UDP_ENC_RX_OFFLOAD if (adapter->geneve_port && - udp_hdr(skb)->dest == adapter->geneve_port) + udp_hdr(skb)->dest == adapter->geneve_port) { hdr.network = skb_inner_network_header(skb); + th = inner_tcp_hdr(skb); + } +#endif } /* Currently only IPv4/IPv6 with TCP is supported */ switch (hdr.ipv4->version) { case IPVERSION: - /* access ihl as u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - l4_proto = hdr.ipv4->protocol; + if (hdr.ipv4->protocol != IPPROTO_TCP) + return; break; case 6: - hlen = hdr.network - skb->data; - l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); - hlen -= hdr.network - skb->data; + if (likely((unsigned char *)th - hdr.network == + sizeof(struct ipv6hdr))) { + if (hdr.ipv6->nexthdr != IPPROTO_TCP) + return; + } else { + __be16 frag_off; + u8 l4_hdr; + + ipv6_skip_exthdr(skb, hdr.network - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + return; + if (l4_hdr != IPPROTO_TCP) + return; + } break; default: return; } - if (l4_proto != IPPROTO_TCP) +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef IXGBE_NO_VXLAN + hdr.network = skb_network_header(first->skb); + /* Currently only IPv4/IPv6 with TCP is supported */ + if (first->protocol == htons(ETH_P_IP)) { + if (hdr.ipv4->protocol != IPPROTO_TCP) + return; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + } else if (first->protocol == htons(ETH_P_IPV6)) { + if (hdr.ipv6->nexthdr != IPPROTO_TCP) + return; + + hlen = sizeof(struct ipv6hdr); + } else { return; + } th = (struct tcphdr *)(hdr.network + hlen); - /* skip this packet since the socket is closing */ - if (th->fin) +#endif /* IXGBE_NO_VXLAN */ + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) return; /* sample on all syn packets or once every atr sample count */ @@ -7760,6 +9253,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) switch (hdr.ipv4->version) { case IPVERSION: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; @@ -7782,40 +9276,64 @@ static void ixgbe_atr(struct ixgbe_ring *ring, if (hdr.network != skb_network_header(skb)) input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef IXGBE_NO_VXLAN + if (first->protocol == htons(ETH_P_IP)) { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } else { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } +#endif /* IXGBE_NO_VXLAN */ /* This assumes the Rx queue and Tx queue are bound to the same CPU */ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, input, common, ring->queue_index); } +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) +#if defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + __always_unused void *accel, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL */ { - struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; -#ifdef IXGBE_FCOE - struct ixgbe_adapter *adapter; + struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring_feature *f; int txq; -#endif - - if (fwd_adapter) - return skb->queue_mapping + fwd_adapter->tx_base_queue; - -#ifdef IXGBE_FCOE /* * only execute the code below if protocol is FCoE * or FIP and we have FCoE enabled on the adapter */ switch (vlan_get_protocol(skb)) { - case htons(ETH_P_FCOE): - case htons(ETH_P_FIP): + case __constant_htons(ETH_P_FCOE): + case __constant_htons(ETH_P_FIP): adapter = netdev_priv(dev); if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) break; + /* fall through */ default: +#ifdef HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK return fallback(dev, skb); +#else + return __netdev_pick_tx(dev, skb); +#endif } f = &adapter->ring_feature[RING_F_FCOE]; @@ -7827,14 +9345,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, txq -= f->indices; return txq + f->offset; -#else - return fallback(dev, skb); -#endif } +#endif /* CONFIG_FCOE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, - struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_adapter __maybe_unused *adapter, + struct ixgbe_ring *tx_ring) { struct ixgbe_tx_buffer *first; int tso; @@ -7876,47 +9393,70 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (!vhdr) goto out_drop; + protocol = vhdr->h_vlan_encapsulated_proto; tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } - protocol = vlan_get_protocol(skb); + skb_tx_timestamp(skb); + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SKB_SHARED_TX_IS_UNION + if (unlikely(skb_tx(skb)->hardware) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_tx(skb)->in_progress = 1; +#else if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - adapter->ptp_clock && - !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +#endif + tx_flags |= IXGBE_TX_FLAGS_TSTAMP; - /* schedule check for Tx timestamp */ - adapter->ptp_tx_skb = skb_get(skb); - adapter->ptp_tx_start = jiffies; - schedule_work(&adapter->ptp_tx_work); + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } } - skb_tx_timestamp(skb); - +#endif #ifdef CONFIG_PCI_IOV /* * Use the l2switch_enable flag - would be false if the DMA * Tx switch had been disabled. */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + if (adapter->flags & IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE) tx_flags |= IXGBE_TX_FLAGS_CC; #endif - /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ +#ifdef HAVE_TX_MQ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || (skb->priority != TC_PRIO_CONTROL))) { tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; - tx_flags |= (skb->priority & 0x7) << - IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; +#if IS_ENABLED(CONFIG_FCOE) + /* for FCoE with DCB, we force the priority to what + * was specified by the switch */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + ((protocol == htons(ETH_P_FCOE)) || + (protocol == htons(ETH_P_FIP)))) + tx_flags |= adapter->fcoe.up << + IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + else +#endif /* CONFIG_FCOE */ + tx_flags |= skb->priority << + IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { struct vlan_ethhdr *vhdr; - - if (skb_cow_head(skb, 0)) + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto out_drop; vhdr = (struct vlan_ethhdr *)skb->data; vhdr->h_vlan_TCI = htons(tx_flags >> @@ -7926,11 +9466,12 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, } } +#endif /* HAVE_TX_MQ */ /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = protocol; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) /* setup tx offload for FCoE */ if ((protocol == htons(ETH_P_FCOE)) && (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { @@ -7939,9 +9480,25 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, goto out_drop; goto xmit_fcoe; + } else if (protocol == htons(ETH_P_FIP)) { + /* FCoE stack has a bug where it does not set the network + * header offset for FIP frames sent resulting into MACLEN + * being set to ZERO in the Tx context descriptor. + * This will cause MDD events when trying to Tx such frames. + */ + if (!skb_network_offset(skb)) { + if (tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | + IXGBE_TX_FLAGS_SW_VLAN)) + skb_set_network_header(skb, + sizeof(struct ethhdr) + + sizeof(struct vlan_hdr)); + else + skb_set_network_header(skb, + sizeof(struct ethhdr)); + } } +#endif /* CONFIG_FCOE */ -#endif /* IXGBE_FCOE */ tso = ixgbe_tso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; @@ -7952,26 +9509,47 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) ixgbe_atr(tx_ring, first); -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) xmit_fcoe: -#endif /* IXGBE_FCOE */ +#endif /* CONFIG_FCOE */ +#ifdef HAVE_PTP_1588_CLOCK + if (ixgbe_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; +#else ixgbe_tx_map(tx_ring, first, hdr_len); +#endif return NETDEV_TX_OK; out_drop: dev_kfree_skb_any(first->skb); first->skb = NULL; +#ifdef HAVE_PTP_1588_CLOCK +cleanup_tx_tstamp: + if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } +#endif return NETDEV_TX_OK; } -static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev, - struct ixgbe_ring *ring) +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; +#ifdef HAVE_TX_MQ + unsigned int r_idx = skb->queue_mapping; +#endif + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } /* * The minimum packet size for olinfo paylen is 17 so pad the skb @@ -7980,17 +9558,16 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, if (skb_put_padto(skb, 17)) return NETDEV_TX_OK; - tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; - +#ifdef HAVE_TX_MQ + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; +#else + tx_ring = adapter->tx_ring[0]; +#endif return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - return __ixgbe_xmit_frame(skb, netdev, NULL); -} - /** * ixgbe_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -8015,67 +9592,24 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) return 0; } -static int -ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u16 value; - int rc; - - if (prtad != hw->phy.mdio.prtad) - return -EINVAL; - rc = hw->phy.ops.read_reg(hw, addr, devad, &value); - if (!rc) - rc = value; - return rc; -} - -static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, - u16 addr, u16 value) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (prtad != hw->phy.mdio.prtad) - return -EINVAL; - return hw->phy.ops.write_reg(hw, addr, devad, value); -} - -static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return ixgbe_ptp_set_ts_config(adapter, req); - case SIOCGHWTSTAMP: - return ixgbe_ptp_get_ts_config(adapter, req); - case SIOCGMIIPHY: - if (!adapter->hw.phy.ops.read_reg) - return -EOPNOTSUPP; - /* fall through */ - default: - return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); - } -} - +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) /** * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding - * netdev->dev_addrs + * netdev->dev_addr_list * @netdev: network interface device structure * * Returns non-zero on failure **/ static int ixgbe_add_sanmac_netdev(struct net_device *dev) { - int err = 0; + int err = IXGBE_SUCCESS; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; if (is_valid_ether_addr(hw->mac.san_addr)) { rtnl_lock(); - err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); + err = dev_addr_add(dev, hw->mac.san_addr, + NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); /* update SAN MAC vmdq pool selection */ @@ -8086,14 +9620,14 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev) /** * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding - * netdev->dev_addrs + * netdev->dev_addr_list * @netdev: network interface device structure * * Returns non-zero on failure **/ static int ixgbe_del_sanmac_netdev(struct net_device *dev) { - int err = 0; + int err = IXGBE_SUCCESS; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_mac_info *mac = &adapter->hw.mac; @@ -8105,6 +9639,83 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) return err; } +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */ + +static int ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, + u16 addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 value; + int rc; + + if (prtad != hw->phy.addr) + return -EINVAL; + rc = hw->phy.ops.read_reg(hw, addr, devad, &value); + if (!rc) + rc = value; + return rc; +} + +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (prtad != hw->phy.addr) + return -EINVAL; + return hw->phy.ops.write_reg(hw, addr, devad, value); +} + +static int ixgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; + int prtad, devad, ret; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + ret = ixgbe_mdio_read(netdev, prtad, devad, mii->reg_num); + if (ret < 0) + return ret; + mii->val_out = ret; + return 0; + } else { + return ixgbe_mdio_write(netdev, prtad, devad, mii->reg_num, + mii->val_in); + } +} + +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ +#ifdef HAVE_PTP_1588_CLOCK + struct ixgbe_adapter *adapter = netdev_priv(netdev); + +#endif + switch (cmd) { +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SIOCGHWTSTAMP + case SIOCGHWTSTAMP: + return ixgbe_ptp_get_ts_config(adapter, ifr); +#endif + case SIOCSHWTSTAMP: + return ixgbe_ptp_set_ts_config(adapter, ifr); +#endif +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + case SIOCGMIIREG: + case SIOCSMIIREG: + return ixgbe_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs @@ -8114,69 +9725,25 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) static void ixgbe_netpoll(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; /* if interface is down do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state)) return; - /* loop through and schedule all active queues */ - for (i = 0; i < adapter->num_q_vectors; i++) - ixgbe_msix_clean_rings(0, adapter->q_vector[i]); -} - -#endif -static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - - rcu_read_lock(); - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->rx_packets += packets; - stats->rx_bytes += bytes; - } - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->tx_packets += packets; - stats->tx_bytes += bytes; + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->netpoll_rx = true; + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); + adapter->q_vector[i]->netpoll_rx = false; } + } else { + ixgbe_intr(0, adapter); } - rcu_read_unlock(); - /* following stats updated by ixgbe_watchdog_task() */ - stats->multicast = netdev->stats.multicast; - stats->rx_errors = netdev->stats.rx_errors; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; - stats->rx_missed_errors = netdev->stats.rx_missed_errors; - return stats; } +#endif /* CONFIG_NET_POLL_CONTROLLER */ -#ifdef CONFIG_IXGBE_DCB -/** - * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. +/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. * @adapter: pointer to ixgbe_adapter * @tc: number of traffic classes currently enabled * @@ -8198,7 +9765,7 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); rsave = reg; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); /* If up2tc is out of bounds default to zero */ @@ -8218,14 +9785,15 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) * * Populate the netdev user priority to tc map */ -static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) +static void ixgbe_set_prio_tc_map(struct ixgbe_adapter __maybe_unused *adapter) { +#ifdef HAVE_DCBNL_IEEE struct net_device *dev = adapter->netdev; struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; struct ieee_ets *ets = adapter->ixgbe_ieee_ets; u8 prio; - for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { + for (prio = 0; prio < IXGBE_DCB_MAX_USER_PRIORITY; prio++) { u8 tc = 0; if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) @@ -8235,11 +9803,30 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) netdev_set_prio_tc_map(dev, prio, tc); } +#endif } -#endif /* CONFIG_IXGBE_DCB */ +#ifdef NETIF_F_HW_TC +static int +__ixgbe_setup_tc(struct net_device *dev, __always_unused u32 handle, + __always_unused __be16 proto, struct tc_to_netdev *tc) +{ + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return ixgbe_setup_tc(dev, tc->mqprio->num_tc); +#else + return ixgbe_setup_tc(dev, tc->tc); +#endif +} +#endif /* NETIF_F_HW_TC */ + /** - * ixgbe_setup_tc - configure net_device for multiple traffic classes + * ixgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. * * @netdev: net device to configure * @tc: number of traffic classes to enable @@ -8248,19 +9835,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - bool pools; /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; - if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) + if (tc && hw->mac.type == ixgbe_mac_82598EB && + tc < IXGBE_DCB_MAX_TRAFFIC_CLASS) return -EINVAL; - pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); - if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) - return -EBUSY; - /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. @@ -8272,7 +9855,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ixgbe_clear_interrupt_scheme(adapter); -#ifdef CONFIG_IXGBE_DCB if (tc) { netdev_set_num_tc(dev, tc); ixgbe_set_prio_tc_map(adapter); @@ -8297,455 +9879,13 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ixgbe_validate_rtr(adapter, tc); -#endif /* CONFIG_IXGBE_DCB */ ixgbe_init_interrupt_scheme(adapter); - if (netif_running(dev)) - return ixgbe_open(dev); - - return 0; -} - -static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, - struct tc_cls_u32_offload *cls) -{ - u32 hdl = cls->knode.handle; - u32 uhtid = TC_U32_USERHTID(cls->knode.handle); - u32 loc = cls->knode.handle & 0xfffff; - int err = 0, i, j; - struct ixgbe_jump_table *jump = NULL; - - if (loc > IXGBE_MAX_HW_ENTRIES) - return -EINVAL; - - if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) - return -EINVAL; - - /* Clear this filter in the link data it is associated with */ - if (uhtid != 0x800) { - jump = adapter->jump_tables[uhtid]; - if (!jump) - return -EINVAL; - if (!test_bit(loc - 1, jump->child_loc_map)) - return -EINVAL; - clear_bit(loc - 1, jump->child_loc_map); - } - - /* Check if the filter being deleted is a link */ - for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { - jump = adapter->jump_tables[i]; - if (jump && jump->link_hdl == hdl) { - /* Delete filters in the hardware in the child hash - * table associated with this link - */ - for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { - if (!test_bit(j, jump->child_loc_map)) - continue; - spin_lock(&adapter->fdir_perfect_lock); - err = ixgbe_update_ethtool_fdir_entry(adapter, - NULL, - j + 1); - spin_unlock(&adapter->fdir_perfect_lock); - clear_bit(j, jump->child_loc_map); - } - /* Remove resources for this link */ - kfree(jump->input); - kfree(jump->mask); - kfree(jump); - adapter->jump_tables[i] = NULL; - return err; - } - } - - spin_lock(&adapter->fdir_perfect_lock); - err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); - spin_unlock(&adapter->fdir_perfect_lock); - return err; -} - -static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, - __be16 protocol, - struct tc_cls_u32_offload *cls) -{ - u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); - - if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - /* This ixgbe devices do not support hash tables at the moment - * so abort when given hash tables. - */ - if (cls->hnode.divisor > 0) - return -EINVAL; + ixgbe_open(dev); - set_bit(uhtid - 1, &adapter->tables); return 0; } -static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, - struct tc_cls_u32_offload *cls) -{ - u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); - - if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - clear_bit(uhtid - 1, &adapter->tables); - return 0; -} - -#ifdef CONFIG_NET_CLS_ACT -static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, - u8 *queue, u64 *action) -{ - unsigned int num_vfs = adapter->num_vfs, vf; - struct net_device *upper; - struct list_head *iter; - - /* redirect to a SRIOV VF */ - for (vf = 0; vf < num_vfs; ++vf) { - upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); - if (upper->ifindex == ifindex) { - if (adapter->num_rx_pools > 1) - *queue = vf * 2; - else - *queue = vf * adapter->num_rx_queues_per_pool; - - *action = vf + 1; - *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; - return 0; - } - } - - /* redirect to a offloaded macvlan netdev */ - netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; - - if (vadapter && vadapter->netdev->ifindex == ifindex) { - *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; - *action = *queue; - return 0; - } - } - } - - return -EINVAL; -} - -static int parse_tc_actions(struct ixgbe_adapter *adapter, - struct tcf_exts *exts, u64 *action, u8 *queue) -{ - const struct tc_action *a; - LIST_HEAD(actions); - int err; - - if (tc_no_actions(exts)) - return -EINVAL; - - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { - - /* Drop action */ - if (is_tcf_gact_shot(a)) { - *action = IXGBE_FDIR_DROP_QUEUE; - *queue = IXGBE_FDIR_DROP_QUEUE; - return 0; - } - - /* Redirect to a VF or a offloaded macvlan */ - if (is_tcf_mirred_redirect(a)) { - int ifindex = tcf_mirred_ifindex(a); - - err = handle_redirect_action(adapter, ifindex, queue, - action); - if (err == 0) - return err; - } - } - - return -EINVAL; -} -#else -static int parse_tc_actions(struct ixgbe_adapter *adapter, - struct tcf_exts *exts, u64 *action, u8 *queue) -{ - return -EINVAL; -} -#endif /* CONFIG_NET_CLS_ACT */ - -static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, - union ixgbe_atr_input *mask, - struct tc_cls_u32_offload *cls, - struct ixgbe_mat_field *field_ptr, - struct ixgbe_nexthdr *nexthdr) -{ - int i, j, off; - __be32 val, m; - bool found_entry = false, found_jump_field = false; - - for (i = 0; i < cls->knode.sel->nkeys; i++) { - off = cls->knode.sel->keys[i].off; - val = cls->knode.sel->keys[i].val; - m = cls->knode.sel->keys[i].mask; - - for (j = 0; field_ptr[j].val; j++) { - if (field_ptr[j].off == off) { - field_ptr[j].val(input, mask, val, m); - input->filter.formatted.flow_type |= - field_ptr[j].type; - found_entry = true; - break; - } - } - if (nexthdr) { - if (nexthdr->off == cls->knode.sel->keys[i].off && - nexthdr->val == cls->knode.sel->keys[i].val && - nexthdr->mask == cls->knode.sel->keys[i].mask) - found_jump_field = true; - else - continue; - } - } - - if (nexthdr && !found_jump_field) - return -EINVAL; - - if (!found_entry) - return 0; - - mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | - IXGBE_ATR_L4TYPE_MASK; - - if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) - mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; - - return 0; -} - -static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, - __be16 protocol, - struct tc_cls_u32_offload *cls) -{ - u32 loc = cls->knode.handle & 0xfffff; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_mat_field *field_ptr; - struct ixgbe_fdir_filter *input = NULL; - union ixgbe_atr_input *mask = NULL; - struct ixgbe_jump_table *jump = NULL; - int i, err = -EINVAL; - u8 queue; - u32 uhtid, link_uhtid; - - uhtid = TC_U32_USERHTID(cls->knode.handle); - link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); - - /* At the moment cls_u32 jumps to network layer and skips past - * L2 headers. The canonical method to match L2 frames is to use - * negative values. However this is error prone at best but really - * just broken because there is no way to "know" what sort of hdr - * is in front of the network layer. Fix cls_u32 to support L2 - * headers when needed. - */ - if (protocol != htons(ETH_P_IP)) - return err; - - if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { - e_err(drv, "Location out of range\n"); - return err; - } - - /* cls u32 is a graph starting at root node 0x800. The driver tracks - * links and also the fields used to advance the parser across each - * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map - * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h - * To add support for new nodes update ixgbe_model.h parse structures - * this function _should_ be generic try not to hardcode values here. - */ - if (uhtid == 0x800) { - field_ptr = (adapter->jump_tables[0])->mat; - } else { - if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return err; - if (!adapter->jump_tables[uhtid]) - return err; - field_ptr = (adapter->jump_tables[uhtid])->mat; - } - - if (!field_ptr) - return err; - - /* At this point we know the field_ptr is valid and need to either - * build cls_u32 link or attach filter. Because adding a link to - * a handle that does not exist is invalid and the same for adding - * rules to handles that don't exist. - */ - - if (link_uhtid) { - struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; - - if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) - return err; - - if (!test_bit(link_uhtid - 1, &adapter->tables)) - return err; - - /* Multiple filters as links to the same hash table are not - * supported. To add a new filter with the same next header - * but different match/jump conditions, create a new hash table - * and link to it. - */ - if (adapter->jump_tables[link_uhtid] && - (adapter->jump_tables[link_uhtid])->link_hdl) { - e_err(drv, "Link filter exists for link: %x\n", - link_uhtid); - return err; - } - - for (i = 0; nexthdr[i].jump; i++) { - if (nexthdr[i].o != cls->knode.sel->offoff || - nexthdr[i].s != cls->knode.sel->offshift || - nexthdr[i].m != cls->knode.sel->offmask) - return err; - - jump = kzalloc(sizeof(*jump), GFP_KERNEL); - if (!jump) - return -ENOMEM; - input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!input) { - err = -ENOMEM; - goto free_jump; - } - mask = kzalloc(sizeof(*mask), GFP_KERNEL); - if (!mask) { - err = -ENOMEM; - goto free_input; - } - jump->input = input; - jump->mask = mask; - jump->link_hdl = cls->knode.handle; - - err = ixgbe_clsu32_build_input(input, mask, cls, - field_ptr, &nexthdr[i]); - if (!err) { - jump->mat = nexthdr[i].jump; - adapter->jump_tables[link_uhtid] = jump; - break; - } - } - return 0; - } - - input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!input) - return -ENOMEM; - mask = kzalloc(sizeof(*mask), GFP_KERNEL); - if (!mask) { - err = -ENOMEM; - goto free_input; - } - - if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { - if ((adapter->jump_tables[uhtid])->input) - memcpy(input, (adapter->jump_tables[uhtid])->input, - sizeof(*input)); - if ((adapter->jump_tables[uhtid])->mask) - memcpy(mask, (adapter->jump_tables[uhtid])->mask, - sizeof(*mask)); - - /* Lookup in all child hash tables if this location is already - * filled with a filter - */ - for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { - struct ixgbe_jump_table *link = adapter->jump_tables[i]; - - if (link && (test_bit(loc - 1, link->child_loc_map))) { - e_err(drv, "Filter exists in location: %x\n", - loc); - err = -EINVAL; - goto err_out; - } - } - } - err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); - if (err) - goto err_out; - - err = parse_tc_actions(adapter, cls->knode.exts, &input->action, - &queue); - if (err < 0) - goto err_out; - - input->sw_idx = loc; - - spin_lock(&adapter->fdir_perfect_lock); - - if (hlist_empty(&adapter->fdir_filter_list)) { - memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, mask); - if (err) - goto err_out_w_lock; - } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { - err = -EINVAL; - goto err_out_w_lock; - } - - ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); - err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, - input->sw_idx, queue); - if (!err) - ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); - spin_unlock(&adapter->fdir_perfect_lock); - - if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) - set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); - - kfree(mask); - return err; -err_out_w_lock: - spin_unlock(&adapter->fdir_perfect_lock); -err_out: - kfree(mask); -free_input: - kfree(input); -free_jump: - kfree(jump); - return err; -} - -static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, - struct tc_to_netdev *tc) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - tc->type == TC_SETUP_CLSU32) { - switch (tc->cls_u32->command) { - case TC_CLSU32_NEW_KNODE: - case TC_CLSU32_REPLACE_KNODE: - return ixgbe_configure_clsu32(adapter, - proto, tc->cls_u32); - case TC_CLSU32_DELETE_KNODE: - return ixgbe_delete_clsu32(adapter, tc->cls_u32); - case TC_CLSU32_NEW_HNODE: - case TC_CLSU32_REPLACE_HNODE: - return ixgbe_configure_clsu32_add_hnode(adapter, proto, - tc->cls_u32); - case TC_CLSU32_DELETE_HNODE: - return ixgbe_configure_clsu32_del_hnode(adapter, - tc->cls_u32); - default: - return -EINVAL; - } - } - - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; - - return ixgbe_setup_tc(dev, tc->tc); -} - #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) { @@ -8755,8 +9895,8 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); rtnl_unlock(); } - #endif + void ixgbe_do_reset(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -8767,11 +9907,25 @@ void ixgbe_do_reset(struct net_device *netdev) ixgbe_reset(adapter); } +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static u32 ixgbe_fix_features(struct net_device *netdev, u32 features) +#else static netdev_features_t ixgbe_fix_features(struct net_device *netdev, netdev_features_t features) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); +#if IS_ENABLED(CONFIG_DCB) + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) +#ifdef NETIF_F_HW_VLAN_CTAG_RX + features |= NETIF_F_HW_VLAN_CTAG_RX; +#else + features |= NETIF_F_HW_VLAN_RX; +#endif +#endif /* CONFIG_DCB */ + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; @@ -8783,12 +9937,16 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, return features; } +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int ixgbe_set_features(struct net_device *netdev, u32 features) +#else static int ixgbe_set_features(struct net_device *netdev, netdev_features_t features) +#endif { struct ixgbe_adapter *adapter = netdev_priv(netdev); - netdev_features_t changed = netdev->features ^ features; bool need_reset = false; + netdev_features_t changed = netdev->features ^ features; /* Make sure RSC matches LRO, reset if change */ if (!(features & NETIF_F_LRO)) { @@ -8801,77 +9959,95 @@ static int ixgbe_set_features(struct net_device *netdev, adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; need_reset = true; - } else if ((changed ^ features) & NETIF_F_LRO) { + } else if (changed & NETIF_F_LRO) { e_info(probe, "rx-usecs set too low, " "disabling RSC\n"); } } /* - * Check if Flow Director n-tuple support or hw_tc support was - * enabled or disabled. If the state changed, we need to reset. + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. */ - if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: /* turn off ATR, enable perfect filters and reset */ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) need_reset = true; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - } else { + break; + default: /* turn off perfect filters, enable ATR and reset */ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) need_reset = true; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - /* We cannot enable ATR if SR-IOV is enabled */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || - /* We cannot enable ATR if we have 2 or more tcs */ - (netdev_get_num_tc(netdev) > 1) || - /* We cannot enable ATR if RSS is disabled */ - (adapter->ring_feature[RING_F_RSS].limit <= 1) || - /* A sample rate of 0 indicates ATR disabled */ - (!adapter->atr_sample_rate)) - ; /* do nothing not supported */ - else /* otherwise supported and set the flag */ - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - } + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) + break; - if (changed & NETIF_F_RXALL) - need_reset = true; + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + } netdev->features = features; - if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) { +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + features & NETIF_F_RXCSUM) { + if (!need_reset) adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + } else { + u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } + ixgbe_clear_udp_tunnel_port(adapter, port_mask); } +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ - if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) { +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + if (adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE && + features & NETIF_F_RXCSUM) { + if (!need_reset) adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + } else { + u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } + ixgbe_clear_udp_tunnel_port(adapter, port_mask); } - +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ if (need_reset) ixgbe_do_reset(netdev); +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) ixgbe_set_rx_mode(netdev); - +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + else if (changed & (NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER)) + ixgbe_set_rx_mode(netdev); +#endif return 0; + } +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD /** * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports * @dev: The port's netdev @@ -8980,14 +10156,95 @@ static void ixgbe_del_udp_tunnel_port(struct net_device *dev, return; } - ixgbe_clear_udp_tunnel_port(adapter, port_mask); + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; +} +#elif defined(HAVE_VXLAN_RX_OFFLOAD) +/** + * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifiying us about + * @port: New UDP port number that VXLAN started listening to + * @type: Enumerated type specifying UDP tunnel type + */ +static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + if (sa_family != AF_INET) + return; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "Hit Max num of VXLAN ports, not adding port %d\n", + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port)); +} + +/** + * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to + * @type: Enumerated type specifying UDP tunnel type + */ +static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) + return; + + if (sa_family != AF_INET) + return; + + if (adapter->vxlan_port != port) { + netdev_info(dev, "Port %d was not found, not deleting\n", + ntohs(port)); + return; + } + + ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; } +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_NDO_GSO_CHECK +static bool +ixgbe_gso_check(struct sk_buff *skb, __always_unused struct net_device *dev) +{ + return vxlan_gso_check(skb); +} +#endif /* HAVE_NDO_GSO_CHECK */ + +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 vid, + const unsigned char *addr, +#ifdef HAVE_NDO_FDB_ADD_VID + u16 vid, +#endif + u16 flags) +#else +static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, u16 flags) +#endif /* USE_CONST_DEV_UC_CHAR */ { /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { @@ -8998,85 +10255,26 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -ENOMEM; } +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_NDO_FDB_ADD_VID return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +#else + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); +#endif /* HAVE_NDO_FDB_ADD_VID */ +#else + return ndo_dflt_fdb_add(ndm, dev, addr, flags); +#endif /* USE_CONST_DEV_UC_CHAR */ } -/** - * ixgbe_configure_bridge_mode - set various bridge modes - * @adapter - the private structure - * @mode - requested bridge mode - * - * Configure some settings require for various bridge modes. - **/ -static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, - __u16 mode) -{ - struct ixgbe_hw *hw = &adapter->hw; - unsigned int p, num_pools; - u32 vmdctl; - - switch (mode) { - case BRIDGE_MODE_VEPA: - /* disable Tx loopback, rely on switch hairpin mode */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); - - /* must enable Rx switching replication to allow multicast - * packet reception on all VFs, and to enable source address - * pruning. - */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); - vmdctl |= IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); - - /* enable Rx source address pruning. Note, this requires - * replication to be enabled or else it does nothing. - */ - num_pools = adapter->num_vfs + adapter->num_rx_pools; - for (p = 0; p < num_pools; p++) { - if (hw->mac.ops.set_source_address_pruning) - hw->mac.ops.set_source_address_pruning(hw, - true, - p); - } - break; - case BRIDGE_MODE_VEB: - /* enable Tx loopback for internal VF/PF communication */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, - IXGBE_PFDTXGSWC_VT_LBEN); - - /* disable Rx switching replication unless we have SR-IOV - * virtual functions - */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); - if (!adapter->num_vfs) - vmdctl &= ~IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); - - /* disable Rx source address pruning, since we don't expect to - * be receiving external loopback of our transmitted frames. - */ - num_pools = adapter->num_vfs + adapter->num_rx_pools; - for (p = 0; p < num_pools; p++) { - if (hw->mac.ops.set_source_address_pruning) - hw->mac.ops.set_source_address_pruning(hw, - false, - p); - } - break; - default: - return -EINVAL; - } - - adapter->bridge_mode = mode; - - e_info(drv, "enabling bridge mode: %s\n", - mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); - - return 0; -} - +#ifdef HAVE_BRIDGE_ATTRIBS +#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS static int ixgbe_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags) + struct nlmsghdr *nlh, + __always_unused u16 flags) +#else +static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */ { struct ixgbe_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; @@ -9086,140 +10284,73 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); - if (!br_spec) - return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { - int status; __u16 mode; if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; - if (nla_len(attr) < sizeof(mode)) + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags |= IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags &= ~IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { return -EINVAL; + } - mode = nla_get_u16(attr); - status = ixgbe_configure_bridge_mode(adapter, mode); - if (status) - return status; + adapter->bridge_mode = mode; - break; + /* re-configure settings related to bridge mode */ + ixgbe_configure_bridge_mode(adapter); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); } return 0; } +#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, + int nlflags) +#elif defined(HAVE_BRIDGE_FILTER) static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask, int nlflags) + u32 __always_unused filter_mask) +#else +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ { struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 mode; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, - adapter->bridge_mode, 0, 0, nlflags, + mode = adapter->bridge_mode; +#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, filter_mask, NULL); +#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); +#elif defined(HAVE_NDO_FDB_ADD_VID) || \ + defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); +#else + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ } +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ -static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) -{ - struct ixgbe_fwd_adapter *fwd_adapter = NULL; - struct ixgbe_adapter *adapter = netdev_priv(pdev); - int used_pools = adapter->num_vfs + adapter->num_rx_pools; - unsigned int limit; - int pool, err; - - /* Hardware has a limited number of available pools. Each VF, and the - * PF require a pool. Check to ensure we don't attempt to use more - * then the available number of pools. - */ - if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) - return ERR_PTR(-EINVAL); - -#ifdef CONFIG_RPS - if (vdev->num_rx_queues != vdev->num_tx_queues) { - netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", - vdev->name); - return ERR_PTR(-EINVAL); - } -#endif - /* Check for hardware restriction on number of rx/tx queues */ - if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || - vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { - netdev_info(pdev, - "%s: Supports RX/TX Queue counts 1,2, and 4\n", - pdev->name); - return ERR_PTR(-EINVAL); - } - - if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || - (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) - return ERR_PTR(-EBUSY); - - fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL); - if (!fwd_adapter) - return ERR_PTR(-ENOMEM); - - pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); - adapter->num_rx_pools++; - set_bit(pool, &adapter->fwd_bitmask); - limit = find_last_bit(&adapter->fwd_bitmask, 32); - - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; - adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; - adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; - - /* Force reinit of ring allocation with VMDQ enabled */ - err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); - if (err) - goto fwd_add_err; - fwd_adapter->pool = pool; - fwd_adapter->real_adapter = adapter; - - if (netif_running(pdev)) { - err = ixgbe_fwd_ring_up(vdev, fwd_adapter); - if (err) - goto fwd_add_err; - netif_tx_start_all_queues(vdev); - } - - return fwd_adapter; -fwd_add_err: - /* unwind counter and free adapter struct */ - netdev_info(pdev, - "%s: dfwd hardware acceleration failed\n", vdev->name); - clear_bit(pool, &adapter->fwd_bitmask); - adapter->num_rx_pools--; - kfree(fwd_adapter); - return ERR_PTR(err); -} - -static void ixgbe_fwd_del(struct net_device *pdev, void *priv) -{ - struct ixgbe_fwd_adapter *fwd_adapter = priv; - struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; - unsigned int limit; - - clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); - adapter->num_rx_pools--; - - limit = find_last_bit(&adapter->fwd_bitmask, 32); - adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; - ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); - ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); - netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", - fwd_adapter->pool, adapter->num_rx_pools, - fwd_adapter->rx_base_queue, - fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, - adapter->fwd_bitmask); - kfree(fwd_adapter); -} - +#ifdef HAVE_NDO_FEATURES_CHECK +#define IXGBE_MAX_TUNNEL_HDR_LEN 80 +#ifdef NETIF_F_GSO_PARTIAL #define IXGBE_MAX_MAC_HDR_LEN 127 #define IXGBE_MAX_NETWORK_HDR_LEN 511 @@ -9253,97 +10384,207 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +#else +static netdev_features_t +ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation) + return features; + + if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > + IXGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + + return features; +} +#endif /* NETIF_F_GSO_PARTIAL */ +#endif /* HAVE_NDO_FEATURES_CHECK */ +#ifdef HAVE_NET_DEVICE_OPS static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, +#if IS_ENABLED(CONFIG_FCOE) .ndo_select_queue = ixgbe_select_queue, +#else +#ifndef HAVE_MQPRIO + .ndo_select_queue = __netdev_pick_tx, +#endif +#endif .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, .ndo_change_mtu = ixgbe_change_mtu, .ndo_tx_timeout = ixgbe_tx_timeout, - .ndo_set_tx_maxrate = ixgbe_tx_maxrate, +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, +#endif .ndo_do_ioctl = ixgbe_ioctl, +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +#ifdef IFLA_VF_MAX .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, +#else .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#if defined(HAVE_VF_SPOOFCHK_CONFIGURE) && IS_ENABLED(CONFIG_PCI_IOV) .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, +#else .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ .ndo_get_vf_config = ixgbe_ndo_get_vf_config, +#endif /* IFLA_VF_MAX */ +#ifdef HAVE_NDO_GET_STATS64 .ndo_get_stats64 = ixgbe_get_stats64, +#else + .ndo_get_stats = ixgbe_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ +#ifdef HAVE_SETUP_TC +#ifdef NETIF_F_HW_TC .ndo_setup_tc = __ixgbe_setup_tc, +#else + .ndo_setup_tc = ixgbe_setup_tc, +#endif /* NETIF_F_HW_TC */ +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ixgbe_low_latency_recv, -#endif -#ifdef IXGBE_FCOE +#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + .ndo_busy_poll = ixgbe_busy_poll_recv, +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ +#if IS_ENABLED(CONFIG_FCOE) .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, +#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, +#endif .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, +#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE .ndo_fcoe_enable = ixgbe_fcoe_enable, .ndo_fcoe_disable = ixgbe_fcoe_disable, +#endif +#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, - .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, -#endif /* IXGBE_FCOE */ - .ndo_set_features = ixgbe_set_features, - .ndo_fix_features = ixgbe_fix_features, +#endif +#endif /* CONFIG_FCOE */ +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = &ixgbe_vlan_mode, +#endif +#ifdef HAVE_FDB_OPS .ndo_fdb_add = ixgbe_ndo_fdb_add, +#ifndef USE_DEFAULT_FDB_DEL_DUMP + .ndo_fdb_del = ndo_dflt_fdb_del, + .ndo_fdb_dump = ndo_dflt_fdb_dump, +#endif +#ifdef HAVE_BRIDGE_ATTRIBS .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, - .ndo_dfwd_add_station = ixgbe_fwd_add, - .ndo_dfwd_del_station = ixgbe_fwd_del, +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL + .extended.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, + .extended.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, +#else .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, +#endif +#elif defined(HAVE_VXLAN_RX_OFFLOAD) + .ndo_add_vxlan_port = ixgbe_add_vxlan_port, + .ndo_del_vxlan_port = ixgbe_del_vxlan_port, +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_NDO_GSO_CHECK + .ndo_gso_check = ixgbe_gso_check, +#endif /* HAVE_NDO_GSO_CHECK */ +#ifdef HAVE_NDO_FEATURES_CHECK .ndo_features_check = ixgbe_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT }; -/** - * ixgbe_enumerate_functions - Get the number of ports this device has - * @adapter: adapter structure - * - * This function enumerates the phsyical functions co-located on a single slot, - * in order to determine how many ports a device has. This is most useful in - * determining the required GT/s of PCIe bandwidth necessary for optimal - * performance. - **/ -static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) -{ - struct pci_dev *entry, *pdev = adapter->pdev; - int physfns = 0; - - /* Some cards can not use the generic count PCIe functions method, - * because they are behind a parent switch, so we hardcode these with - * the correct number of functions. - */ - if (ixgbe_pcie_from_parent(&adapter->hw)) - physfns = 4; - - list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { - /* don't count virtual functions */ - if (entry->is_virtfn) - continue; - - /* When the devices on the bus don't all match our device ID, - * we can't reliably determine the correct number of - * functions. This can occur if a function has been direct - * attached to a virtual machine using VT-d, for example. In - * this case, simply return -1 to indicate this. - */ - if ((entry->vendor != pdev->vendor) || - (entry->device != pdev->device)) - return -1; +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext ixgbe_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#ifdef HAVE_NDO_SET_FEATURES + .ndo_set_features = ixgbe_set_features, + .ndo_fix_features = ixgbe_fix_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; +#endif /* HAVE_NET_DEVICE_OPS */ + +void ixgbe_assign_netdev_ops(struct net_device *dev) +{ +#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops = &ixgbe_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(dev, &ixgbe_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#else /* HAVE_NET_DEVICE_OPS */ + dev->open = &ixgbe_open; + dev->stop = &ixgbe_close; + dev->hard_start_xmit = &ixgbe_xmit_frame; + dev->get_stats = &ixgbe_get_stats; +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = &ixgbe_set_rx_mode; +#endif + dev->set_multicast_list = &ixgbe_set_rx_mode; + dev->set_mac_address = &ixgbe_set_mac; + dev->change_mtu = &ixgbe_change_mtu; + dev->do_ioctl = &ixgbe_ioctl; +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = &ixgbe_tx_timeout; +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + dev->vlan_rx_register = &ixgbe_vlan_mode; + dev->vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = &ixgbe_netpoll; +#endif +#ifdef HAVE_NETDEV_SELECT_QUEUE +#if IS_ENABLED(CONFIG_FCOE) + dev->select_queue = &ixgbe_select_queue; +#else + dev->select_queue = &__netdev_pick_tx; +#endif +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* HAVE_NET_DEVICE_OPS */ - physfns++; - } +#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + netdev_extended(dev)->ndo_busy_poll = ixgbe_busy_poll_recv; +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ - return physfns; + ixgbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; } /** @@ -9386,6 +10627,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, /* only support first port */ if (hw->bus.func != 0) break; + /* fall through */ case IXGBE_SUBDEV_ID_82599_SP_560FLR: case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: @@ -9397,7 +10639,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, } break; case IXGBE_DEV_ID_82599EN_SFP: - /* Only these subdevices support WOL */ + /* Only these subdevices support WoL */ switch (subdevice_id) { case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: return true; @@ -9417,6 +10659,84 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, return false; } +/** + * ixgbe_set_fw_version - Set FW version + * @adapter: the adapter private structure + * + * This function is used by probe and ethtool to determine the FW version to + * format to display. The FW version is taken from the EEPROM/NVM. + * + **/ +static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 eeprom_verh = 0, eeprom_verl = 0; + u16 offset = 0; + u32 etrack_id; + + /* Check for OEM Product Version block format */ + hw->eeprom.ops.read(hw, 0x1b, &offset); + + /* Make sure offset to OEM Product Version block is valid */ + if (!(offset == 0x0) && !(offset == 0xffff)) { + u16 mod_len = 0, cap = 0, prod_ver = 0, rel_num = 0; + u16 build, major, patch; + + /* Read product version block */ + hw->eeprom.ops.read(hw, offset, &mod_len); + hw->eeprom.ops.read(hw, offset + 0x1, &cap); + hw->eeprom.ops.read(hw, offset + 0x2, &prod_ver); + hw->eeprom.ops.read(hw, offset + 0x3, &rel_num); + + /* Only display OEM product version if valid block */ + if (mod_len == 0x3 && (cap & 0xf) == 0x0) { + major = prod_ver >> 8; + build = prod_ver & 0xff; + patch = rel_num; + + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "%x.%x.%x", major, build, patch); + return; + } + } + + /* + * Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + hw->eeprom.ops.read(hw, 0x2e, &eeprom_verh); + hw->eeprom.ops.read(hw, 0x2d, &eeprom_verl); + etrack_id = (eeprom_verh << 16) | eeprom_verl; + + /* Check for SCSI block version format */ + hw->eeprom.ops.read(hw, 0x17, &offset); + + /* Make sure offset to SCSI block is valid */ + if (!(offset == 0x0) && !(offset == 0xffff)) { + u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; + u16 build, major, patch; + + hw->eeprom.ops.read(hw, offset + 0x84, &eeprom_cfg_blkh); + hw->eeprom.ops.read(hw, offset + 0x83, &eeprom_cfg_blkl); + + /* Only display Option Rom if exist */ + if (eeprom_cfg_blkl && eeprom_cfg_blkh) { + major = eeprom_cfg_blkl >> 8; + build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); + patch = eeprom_cfg_blkh & 0x00ff; + + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x, %d.%d.%d", etrack_id, major, build, + patch); + return; + } + } + + /* Set ETrack ID format */ + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); +} + /** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -9428,82 +10748,125 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit ixgbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) { struct net_device *netdev; struct ixgbe_adapter *adapter = NULL; - struct ixgbe_hw *hw; - const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; - int i, err, pci_using_dac, expected_gts; - unsigned int indices = MAX_TX_QUEUES; + struct ixgbe_hw *hw = NULL; + static int cards_found; + int err, pci_using_dac, expected_gts; + char *info_string, *i_s_var; u8 part_str[IXGBE_PBANUM_LENGTH]; + enum ixgbe_mac_type mac_type = ixgbe_mac_unknown; +#ifdef HAVE_TX_MQ + unsigned int indices = MAX_TX_QUEUES; +#endif /* HAVE_TX_MQ */ bool disable_dev = false; -#ifdef IXGBE_FCOE +#if IS_ENABLED(CONFIG_FCOE) u16 device_caps; #endif - u32 eec; - - /* Catch broken hardware that put the wrong VF device ID in - * the PCIe SR-IOV capability. - */ - if (pdev->is_virtfn) { - WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", - pci_name(pdev), pdev->vendor, pdev->device); - return -EINVAL; - } +#ifndef NETIF_F_GSO_PARTIAL +#ifdef HAVE_NDO_SET_FEATURES +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + netdev_features_t hw_features; +#else + u32 hw_features; +#endif +#endif +#endif /* NETIF_F_GSO_PARTIAL */ err = pci_enable_device_mem(pdev); if (err) return err; - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } } pci_using_dac = 0; } err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { - dev_err(&pdev->dev, + dev_err(pci_dev_to_dev(pdev), "pci_request_selected_regions failed 0x%x\n", err); goto err_pci_reg; } + /* + * The mac_type is needed before we have the adapter is set up + * so rather than maintain two devID -> MAC tables we dummy up + * an ixgbe_hw stuct and use ixgbe_set_mac_type. + */ + hw = vmalloc(sizeof(struct ixgbe_hw)); + if (!hw) { + pr_info("Unable to allocate memory for early mac " + "check\n"); + } else { + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + ixgbe_set_mac_type(hw); + mac_type = hw->mac.type; + vfree(hw); + } + + /* + * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch + * port to which the 82598 is connected to prevent duplicate + * completions caused by LOs. We need the mac type so that we only + * do this on 82598 devices, ixgbe_set_mac_type does this for us if + * we set it's device ID. + */ + if (mac_type == ixgbe_mac_82598EB) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); + pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); - pci_save_state(pdev); - if (ii->mac == ixgbe_mac_82598EB) { -#ifdef CONFIG_IXGBE_DCB - /* 8 TC w/ 4 queues per TC */ - indices = 4 * MAX_TRAFFIC_CLASS; -#else +#ifdef HAVE_TX_MQ + if (mac_type == ixgbe_mac_82598EB) { +#if IS_ENABLED(CONFIG_DCB) + indices = IXGBE_MAX_DCB_INDICES * 4; +#else /* CONFIG_DCB */ indices = IXGBE_MAX_RSS_INDICES; -#endif +#endif /* !CONFIG_DCB */ } netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); +#else /* HAVE_TX_MQ */ + netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); +#endif /* HAVE_TX_MQ */ if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } - SET_NETDEV_DEV(netdev, &pdev->dev); + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); adapter = netdev_priv(netdev); - +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + adapter->indices = indices; +#endif +#endif adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; - adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); @@ -9513,42 +10876,13 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; } - netdev->netdev_ops = &ixgbe_netdev_ops; - ixgbe_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); - - /* Setup hw api */ - hw->mac.ops = *ii->mac_ops; - hw->mac.type = ii->mac; - hw->mvals = ii->mvals; - if (ii->link_ops) - hw->link.ops = *ii->link_ops; - - /* EEPROM */ - hw->eeprom.ops = *ii->eeprom_ops; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - if (ixgbe_removed(hw->hw_addr)) { - err = -EIO; - goto err_ioremap; - } - /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ - if (!(eec & BIT(8))) - hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; + ixgbe_assign_netdev_ops(netdev); - /* PHY */ - hw->phy.ops = *ii->phy_ops; - hw->phy.sfp_type = ixgbe_sfp_type_unknown; - /* ixgbe_identify_phy_generic will set prtad and mmds properly */ - hw->phy.mdio.prtad = MDIO_PRTAD_NONE; - hw->phy.mdio.mmds = 0; - hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - hw->phy.mdio.dev = netdev; - hw->phy.mdio.mdio_read = ixgbe_mdio_read; - hw->phy.mdio.mdio_write = ixgbe_mdio_write; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - ii->get_invariants(hw); + adapter->bd_number = cards_found; + ixgbe_get_hw_control(adapter); /* setup the private structure */ err = ixgbe_sw_init(adapter); if (err) @@ -9564,7 +10898,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -9572,8 +10906,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* - * If there is a fan on this device and it has failed log the - * failure. + * If we have a fan, this is as early we know, warn if we + * have had a failure. */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); @@ -9581,18 +10915,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e_crit(probe, "Fan has stopped, replace the adapter\n"); } - if (allow_unsupported_sfp) - hw->allow_unsupported_sfp = allow_unsupported_sfp; + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + ixgbe_check_options(adapter); /* reset_hw fills in the perm_addr as well */ hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; if (err == IXGBE_ERR_SFP_NOT_PRESENT) { - err = 0; + err = IXGBE_SUCCESS; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); - e_dev_err("Reload the driver after installing a supported module.\n"); + e_dev_err("failed to load because an unsupported SFP+ or QSFP " + "module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported " + "module.\n"); goto err_sw_init; } else if (err) { e_dev_err("HW Init failed: %d\n", err); @@ -9600,17 +10939,28 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #ifdef CONFIG_PCI_IOV - /* SR-IOV not supported on the 82598 */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - goto skip_sriov; - /* Mailbox */ - ixgbe_init_mbx_params_pf(hw); - hw->mbx.ops = ii->mbx_ops; - pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); - ixgbe_enable_sriov(adapter); -skip_sriov: +#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE) + if (adapter->max_vfs > 0) { + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated.\n"); + e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); + e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x/sriov_numvfs\n", + adapter->max_vfs, + pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn) + ); + } #endif + if (adapter->flags & IXGBE_FLAG_SRIOV_CAPABLE) { + pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); + ixgbe_enable_sriov(adapter); + } + +#endif /* CONFIG_PCI_IOV */ + +#ifdef NETIF_F_GSO_PARTIAL netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | @@ -9618,13 +10968,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXCSUM | NETIF_F_HW_CSUM; -#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ - NETIF_F_GSO_GRE_CSUM | \ - NETIF_F_GSO_IPXIP4 | \ - NETIF_F_GSO_IPXIP6 | \ - NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_UDP_TUNNEL_CSUM) - netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; netdev->features |= NETIF_F_GSO_PARTIAL | IXGBE_GSO_PARTIAL_FEATURES; @@ -9640,69 +10983,196 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXALL | NETIF_F_HW_L2FW_DOFFLOAD; - if (hw->mac.type >= ixgbe_mac_82599EB) - netdev->hw_features |= NETIF_F_NTUPLE | - NETIF_F_HW_TC; + if (hw->mac.type >= ixgbe_mac_82599EB) + netdev->hw_features |= NETIF_F_NTUPLE | + NETIF_F_HW_TC; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + /* give us the option of enabling RSC/LRO later */ + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; + +#else /* NETIF_F_GSO_PARTIAL */ + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM; + +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= NETIF_F_IPV6_CSUM; +#endif + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX; +#endif + +#ifdef NETIF_F_HW_VLAN_TX + netdev->features |= NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_FILTER | + NETIF_F_HW_VLAN_RX; +#endif + netdev->features |= ixgbe_tso_features(); +#ifdef NETIF_F_RXHASH + netdev->features |= NETIF_F_RXHASH; +#endif /* NETIF_F_RXHASH */ + netdev->features |= NETIF_F_RXCSUM; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = netdev->hw_features; +#else + hw_features = get_netdev_hw_features(netdev); +#endif + hw_features |= netdev->features; - netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; - netdev->hw_enc_features |= netdev->vlan_features; - netdev->mpls_features |= NETIF_F_HW_CSUM; + /* give us the option of enabling RSC/LRO later */ + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + hw_features |= NETIF_F_LRO; - /* set this bit last since it cannot be part of vlan_features */ - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX; +#else +#ifdef NETIF_F_GRO + + /* this is only needed on kernels prior to 2.6.39 */ + netdev->features |= NETIF_F_GRO; +#endif /* NETIF_F_GRO */ +#endif /* HAVE_NDO_SET_FEATURES */ + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + netdev->features |= NETIF_F_SCTP_CSUM; +#ifdef HAVE_NDO_SET_FEATURES + hw_features |= NETIF_F_SCTP_CSUM | + NETIF_F_NTUPLE; +#endif + break; + default: + break; + } +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif +#endif +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_ENCAP_CSUM_OFFLOAD + netdev->hw_enc_features |= NETIF_F_SG; +#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + } +#endif /* NETIF_F_GSO_PARTIAL */ + +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + if (netdev->features & NETIF_F_LRO) { + if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + ((adapter->rx_itr_setting == 1) || + (adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR))) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + } else if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { + e_dev_info("InterruptThrottleRate set too high, " + "disabling RSC\n"); + } + } +#ifdef IFF_UNICAST_FLT netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +#ifdef IFF_SUPP_NOFCS netdev->priv_flags |= IFF_SUPP_NOFCS; +#endif + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9710 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); -#ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) - netdev->dcbnl_ops = &dcbnl_ops; #endif +#if IS_ENABLED(CONFIG_DCB) + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + netdev->dcbnl_ops = &ixgbe_dcbnl_ops; -#ifdef IXGBE_FCOE +#endif /* CONFIG_DCB */ +#if IS_ENABLED(CONFIG_FCOE) +#ifdef NETIF_F_FSO if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { unsigned int fcoe_l; - if (hw->mac.ops.get_device_caps) { - hw->mac.ops.get_device_caps(hw, &device_caps); - if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) - adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + e_dev_info("FCoE offload feature is not available. " + "Disabling FCoE offload feature\n"); + } else { + netdev->features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC; +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE + ixgbe_fcoe_ddp_enable(adapter); + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; + netdev->features |= NETIF_F_FCOE_MTU; +#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ } - fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; - netdev->features |= NETIF_F_FSO | - NETIF_F_FCOE_CRC; - +#ifdef HAVE_NETDEV_VLAN_FEATURES netdev->vlan_features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + } +#endif /* NETIF_F_FSO */ +#endif /* CONFIG_FCOE */ + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; +#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_HIGHDMA; +#endif /* HAVE_NETDEV_VLAN_FEATURES */ } -#endif /* IXGBE_FCOE */ - - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) - netdev->hw_features |= NETIF_F_LRO; - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - netdev->features |= NETIF_F_LRO; /* make sure the EEPROM is good */ - if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { + if (hw->eeprom.ops.validate_checksum && + (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); err = -EIO; goto err_sw_init; } - eth_platform_get_mac_address(&adapter->pdev->dev, - adapter->hw.mac.perm_addr); - memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); +#ifdef ETHTOOL_GPERMADDR + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); +#endif if (!is_valid_ether_addr(netdev->dev_addr)) { e_dev_err("invalid MAC address\n"); @@ -9717,7 +11187,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) setup_timer(&adapter->service_timer, &ixgbe_service_timer, (unsigned long) adapter); - if (ixgbe_removed(hw->hw_addr)) { + if (IXGBE_REMOVED(hw->hw_addr)) { err = -EIO; goto err_sw_init; } @@ -9732,53 +11202,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* WOL not supported for all devices */ adapter->wol = 0; hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); - hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, - pdev->subsystem_device); - if (hw->wol_enabled) + if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device)) adapter->wol = IXGBE_WUFC_MAG; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - /* save off EEPROM version number */ - hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); - hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); - - /* pick up the PCI bus settings for reporting later */ - if (ixgbe_pcie_from_parent(hw)) - ixgbe_get_parent_bus_info(adapter); - else - hw->mac.ops.get_bus_info(hw); - - /* calculate the expected PCIe bandwidth required for optimal - * performance. Note that some older parts will never have enough - * bandwidth due to being older generation PCIe parts. We clamp these - * parts to ensure no warning is displayed if it can't be fixed. - */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); - break; - default: - expected_gts = ixgbe_enumerate_functions(adapter) * 10; - break; - } - - /* don't check link if we failed to enumerate functions */ - if (expected_gts > 0) - ixgbe_check_minimum_link(adapter, expected_gts); + hw->wol_enabled = !!(adapter->wol); - err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); - if (err) - strlcpy(part_str, "Unknown", sizeof(part_str)); - if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, hw->phy.sfp_type, - part_str); - else - e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, part_str); + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); - e_dev_info("%pM\n", netdev->dev_addr); + ixgbe_set_fw_version(adapter); /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); @@ -9790,9 +11221,21 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "problems please contact your Intel or hardware " "representative who provided you with this " "hardware.\n"); + } else if (err == IXGBE_ERR_OVERTEMP) { + e_crit(drv, "%s\n", ixgbe_overheat_msg); + goto err_register; + } else if (err) { + e_dev_err("HW init failed\n"); + goto err_register; } - - /*2019/06/03, change OOB from eth2 to eth0, for pegatron fn-6524-dn-f, Peter5_Lin*/ + + /* pick up the PCI bus settings for reporting later */ + if (ixgbe_pcie_from_parent(hw)) + ixgbe_get_parent_bus_info(hw); + else + if (hw->mac.ops.get_bus_info) + hw->mac.ops.get_bus_info(hw); + if(!strcmp("0000:03:00.0", pci_name(pdev))) strcpy(netdev->name, "eth0"); else if(!strcmp("0000:03:00.1", pci_name(pdev))) @@ -9801,12 +11244,21 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) strcpy(netdev->name, "eth2"); else if(!strcmp("0000:02:00.1", pci_name(pdev))) strcpy(netdev->name, "eth3"); - + err = register_netdev(netdev); if (err) goto err_register; pci_set_drvdata(pdev, adapter); + adapter->netdev_registered = true; +#ifdef HAVE_PCI_ERS + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + +#endif /* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser) @@ -9814,38 +11266,134 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); -#ifdef CONFIG_IXGBE_DCA - if (dca_add_requester(&pdev->dev) == 0) { - adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - ixgbe_setup_dca(adapter); +#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) { + err = dca_add_requester(pci_dev_to_dev(pdev)); + switch (err) { + case IXGBE_SUCCESS: + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + break; + /* -19 is returned from the kernel when no provider is found */ + case -19: + e_info(rx_err, "No DCA provider found. Please " + "start ioatdma for DCA functionality.\n"); + break; + default: + e_info(probe, "DCA registration failed: %d\n", err); + break; + } + } +#endif + + /* print all messages at the end so that we use our eth%d name */ + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure that no warning is displayed, as this could confuse + * users otherwise. */ + switch(hw->mac.type) { + case ixgbe_mac_82598EB: + expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); + break; + default: + expected_gts = ixgbe_enumerate_functions(adapter) * 10; + break; } + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + ixgbe_check_minimum_link(adapter, expected_gts); + + /* First try to read PBA as a string */ + err = ixgbe_read_pba_string(hw, part_str, IXGBE_PBANUM_LENGTH); + if (err) + strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + e_info(probe, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str); + else + e_info(probe, "MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); + + e_dev_info("%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + +#define INFO_STRING_LEN 255 + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) { + e_err(probe, "allocation for info string failed\n"); + goto no_info_string; + } + i_s_var = info_string; + i_s_var += sprintf(info_string, "Enabled Features: "); + i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", + adapter->num_rx_queues, adapter->num_tx_queues); +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + i_s_var += sprintf(i_s_var, "FCoE "); #endif + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + i_s_var += sprintf(i_s_var, "FdirHash "); + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + i_s_var += sprintf(i_s_var, "DCB "); + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + i_s_var += sprintf(i_s_var, "DCA "); + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + i_s_var += sprintf(i_s_var, "RSC "); + if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE) + i_s_var += sprintf(i_s_var, "vxlan_rx "); + + BUG_ON(i_s_var > (info_string + INFO_STRING_LEN)); + /* end features printing */ + e_info(probe, "%s\n", info_string); + kfree(info_string); +no_info_string: +#ifdef CONFIG_PCI_IOV if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); + int i; for (i = 0; i < adapter->num_vfs; i++) ixgbe_vf_configuration(pdev, (i | 0x10000000)); } +#endif - /* firmware requires driver version to be 0xFFFFFFFF - * since os does not support feature - */ + /* Initialize the LED link active for LED blink support */ + if (hw->mac.ops.init_led_link_act) + hw->mac.ops.init_led_link_act(hw); + + /* firmware requires blank numerical version */ if (hw->mac.ops.set_fw_drv_ver) hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, sizeof(ixgbe_driver_version) - 1, ixgbe_driver_version); +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); - e_dev_info("%s\n", ixgbe_default_device_descr); +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ + e_info(probe, "Intel(R) 10 Gigabit Network Connection\n"); + cards_found++; -#ifdef CONFIG_IXGBE_HWMON +#ifdef IXGBE_SYSFS if (ixgbe_sysfs_init(adapter)) e_err(probe, "failed to allocate sysfs resources\n"); -#endif /* CONFIG_IXGBE_HWMON */ +#else +#ifdef IXGBE_PROCFS + if (ixgbe_procfs_init(adapter)) + e_err(probe, "failed to allocate procfs resources\n"); +#endif /* IXGBE_PROCFS */ +#endif /* IXGBE_SYSFS */ +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_adapter_init(adapter); +#endif /* HAVE_IXGBE_DEBUG_FS */ /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) @@ -9853,17 +11401,26 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); + if (hw->mac.ops.setup_eee && + (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) { + bool eee_enable = !!(adapter->flags2 & IXGBE_FLAG2_EEE_ENABLED); + + hw->mac.ops.setup_eee(hw, eee_enable); + } + return 0; err_register: - ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); err_sw_init: + ixgbe_release_hw_control(adapter); +#ifdef CONFIG_PCI_IOV ixgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; - iounmap(adapter->io_addr); - kfree(adapter->jump_tables[0]); kfree(adapter->mac_table); + kfree(adapter->rss_key); + iounmap(adapter->io_addr); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -9885,51 +11442,64 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void ixgbe_remove(struct pci_dev *pdev) +static void __devexit ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev; bool disable_dev; - int i; /* if !adapter then we already cleaned up in probe */ if (!adapter) return; - netdev = adapter->netdev; + netdev = adapter->netdev; +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_adapter_exit(adapter); - set_bit(__IXGBE_REMOVING, &adapter->state); +#endif /*HAVE_IXGBE_DEBUG_FS */ + set_bit(__IXGBE_REMOVE, &adapter->state); cancel_work_sync(&adapter->service_task); - -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - dca_remove_requester(&pdev->dev); + dca_remove_requester(pci_dev_to_dev(pdev)); IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_DISABLE); } +#endif /* CONFIG_DCA */ -#endif -#ifdef CONFIG_IXGBE_HWMON +#ifdef IXGBE_SYSFS ixgbe_sysfs_exit(adapter); -#endif /* CONFIG_IXGBE_HWMON */ +#else +#ifdef IXGBE_PROCFS + ixgbe_procfs_exit(adapter); +#endif /* IXGBE_PROCFS */ +#endif /* IXGBE-SYSFS */ +#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) /* remove the added san mac */ ixgbe_del_sanmac_netdev(netdev); +#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ + #ifdef CONFIG_PCI_IOV ixgbe_disable_sriov(adapter); -#endif - if (netdev->reg_state == NETREG_REGISTERED) +#endif /* CONFIG_PCI_IOV */ + if (adapter->netdev_registered) { unregister_netdev(netdev); + adapter->netdev_registered = false; + } +#if IS_ENABLED(CONFIG_FCOE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE + ixgbe_fcoe_ddp_disable(adapter); +#endif +#endif /* CONFIG_FCOE */ ixgbe_clear_interrupt_scheme(adapter); - ixgbe_release_hw_control(adapter); -#ifdef CONFIG_DCB +#ifdef HAVE_DCBNL_IEEE kfree(adapter->ixgbe_ieee_pfc); kfree(adapter->ixgbe_ieee_ets); @@ -9937,17 +11507,8 @@ static void ixgbe_remove(struct pci_dev *pdev) iounmap(adapter->io_addr); pci_release_mem_regions(pdev); - e_dev_info("complete\n"); - - for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { - if (adapter->jump_tables[i]) { - kfree(adapter->jump_tables[i]->input); - kfree(adapter->jump_tables[i]->mask); - } - kfree(adapter->jump_tables[i]); - } - kfree(adapter->mac_table); + kfree(adapter->rss_key); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -9955,8 +11516,70 @@ static void ixgbe_remove(struct pci_dev *pdev) if (disable_dev) pci_disable_device(pdev); + +} + +static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD) { + ixgbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u16 value; + + if (IXGBE_REMOVED(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef HAVE_PCI_ERS +#ifdef CONFIG_PCI_IOV +static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u32 value; + + if (IXGBE_REMOVED(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_DWORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_ERS */ + +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (IXGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +void ewarn(struct ixgbe_hw *hw, const char *st) +{ + struct ixgbe_adapter *adapter = hw->back; + + netif_warn(adapter, drv, adapter->netdev, "%s", st); } +#ifdef HAVE_PCI_ERS /** * ixgbe_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -9997,7 +11620,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); - if (ixgbe_removed(hw->hw_addr)) + if (IXGBE_REMOVED(hw->hw_addr)) goto skip_bad_vf_detection; req_id = dw1 >> 16; @@ -10016,10 +11639,10 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, dw0, dw1, dw2, dw3); switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - device_id = IXGBE_82599_VF_DEVICE_ID; + device_id = IXGBE_DEV_ID_82599_VF; break; case ixgbe_mac_X540: - device_id = IXGBE_X540_VF_DEVICE_ID; + device_id = IXGBE_DEV_ID_X540_VF; break; case ixgbe_mac_X550: device_id = IXGBE_DEV_ID_X550_VF; @@ -10027,7 +11650,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_X550EM_x: device_id = IXGBE_DEV_ID_X550EM_X_VF; break; - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: device_id = IXGBE_DEV_ID_X550EM_A_VF; break; default: @@ -10101,7 +11724,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); pci_ers_result_t result; - int err; if (pci_enable_device_mem(pdev)) { e_err(probe, "Cannot re-enable PCI device after reset.\n"); @@ -10112,21 +11734,22 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) adapter->hw.hw_addr = adapter->io_addr; pci_set_master(pdev); pci_restore_state(pdev); + /* + * After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ pci_save_state(pdev); pci_wake_from_d3(pdev, false); - ixgbe_reset(adapter); + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; } - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - e_dev_err("pci_cleanup_aer_uncorrect_error_status " - "failed 0x%0x\n", err); - /* non-fatal, continue */ - } + pci_cleanup_aer_uncorrect_error_status(pdev); return result; } @@ -10159,26 +11782,83 @@ static void ixgbe_io_resume(struct pci_dev *pdev) rtnl_unlock(); } +#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS static const struct pci_error_handlers ixgbe_err_handler = { +#else +static struct pci_error_handlers ixgbe_err_handler = { +#endif .error_detected = ixgbe_io_error_detected, .slot_reset = ixgbe_io_slot_reset, .resume = ixgbe_io_resume, }; +#endif /* HAVE_PCI_ERS */ + +struct net_device *ixgbe_hw_to_netdev(const struct ixgbe_hw *hw) +{ + return ((struct ixgbe_adapter *)hw->back)->netdev; +} +struct ixgbe_msg *ixgbe_hw_to_msg(const struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = + container_of(hw, struct ixgbe_adapter, hw); + return (struct ixgbe_msg *)&adapter->msg_enable; +} + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh ixgbe_driver_rh = { + .sriov_configure = ixgbe_pci_sriov_configure, +}; +#endif + +#ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT +static const struct dev_pm_ops ixgbe_pm_ops = { + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, + .freeze = ixgbe_freeze, + .thaw = ixgbe_thaw, + .poweroff = ixgbe_suspend, + .restore = ixgbe_resume, +}; +#endif /* USE_LEGACY_PM_SUPPORT */ +#endif static struct pci_driver ixgbe_driver = { .name = ixgbe_driver_name, .id_table = ixgbe_pci_tbl, .probe = ixgbe_probe, - .remove = ixgbe_remove, + .remove = __devexit_p(ixgbe_remove), #ifdef CONFIG_PM +#ifndef USE_LEGACY_PM_SUPPORT + .driver = { + .pm = &ixgbe_pm_ops, + }, +#else .suspend = ixgbe_suspend, .resume = ixgbe_resume, +#endif /* USE_LEGACY_PM_SUPPORT */ #endif +#ifndef USE_REBOOT_NOTIFIER .shutdown = ixgbe_shutdown, +#endif +#if defined(HAVE_SRIOV_CONFIGURE) .sriov_configure = ixgbe_pci_sriov_configure, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &ixgbe_driver_rh, +#endif /* HAVE_SRIOV_CONFIGURE */ +#ifdef HAVE_PCI_ERS .err_handler = &ixgbe_err_handler +#endif }; +bool ixgbe_is_ixgbe(struct pci_dev *pcidev) +{ + if (pci_dev_driver(pcidev) != &ixgbe_driver) + return false; + else + return true; +} + /** * ixgbe_init_module - Driver Registration Routine * @@ -10197,20 +11877,32 @@ static int __init ixgbe_init_module(void) return -ENOMEM; } +#ifdef IXGBE_PROCFS + if (ixgbe_procfs_topdir_init()) + pr_info("Procfs failed to initialize topdir\n"); +#endif + +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_init(); +#endif /* HAVE_IXGBE_DEBUG_FS */ ret = pci_register_driver(&ixgbe_driver); if (ret) { destroy_workqueue(ixgbe_wq); +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_exit(); +#endif /* HAVE_IXGBE_DEBUG_FS */ +#ifdef IXGBE_PROCFS + ixgbe_procfs_topdir_exit(); +#endif return ret; - } +} +#if IS_ENABLED(CONFIG_DCA) -#ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif - return 0; + return ret; } module_init(ixgbe_init_module); @@ -10223,21 +11915,22 @@ module_init(ixgbe_init_module); **/ static void __exit ixgbe_exit_module(void) { -#ifdef CONFIG_IXGBE_DCA +#if IS_ENABLED(CONFIG_DCA) dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); - +#ifdef IXGBE_PROCFS + ixgbe_procfs_topdir_exit(); +#endif + destroy_workqueue(ixgbe_wq); +#ifdef HAVE_IXGBE_DEBUG_FS ixgbe_dbg_exit(); - if (ixgbe_wq) { - destroy_workqueue(ixgbe_wq); - ixgbe_wq = NULL; - } +#endif /* HAVE_IXGBE_DEBUG_FS */ } -#ifdef CONFIG_IXGBE_DCA -static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, - void *p) +#if IS_ENABLED(CONFIG_DCA) +static int ixgbe_notify_dca(struct notifier_block __always_unused *nb, unsigned long event, + void __always_unused *p) { int ret_val; @@ -10246,9 +11939,8 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } - -#endif /* CONFIG_IXGBE_DCA */ - +#endif module_exit(ixgbe_exit_module); /* ixgbe_main.c */ + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c index a0cb84381cd0..ab3aa32489d5 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,9 +22,7 @@ *******************************************************************************/ -#include -#include -#include "ixgbe.h" +#include "ixgbe_type.h" #include "ixgbe_mbx.h" /** @@ -38,20 +32,23 @@ * @size: Length of buffer * @mbx_id: id of mailbox to read * - * returns SUCCESS if it successfully read message from buffer + * returns SUCCESS if it successfuly read message from buffer **/ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_read_mbx"); /* limit read to size of mailbox */ if (size > mbx->size) size = mbx->size; - if (!mbx->ops) - return IXGBE_ERR_MBX; + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); - return mbx->ops->read(hw, msg, size, mbx_id); + return ret_val; } /** @@ -66,14 +63,18 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_SUCCESS; - if (size > mbx->size) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_write_mbx"); - if (!mbx->ops) - return IXGBE_ERR_MBX; + if (size > mbx->size) { + ret_val = IXGBE_ERR_MBX; + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); - return mbx->ops->write(hw, msg, size, mbx_id); + return ret_val; } /** @@ -86,11 +87,14 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; - if (!mbx->ops) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_check_for_msg"); - return mbx->ops->check_for_msg(hw, mbx_id); + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; } /** @@ -103,11 +107,14 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_ack"); - if (!mbx->ops) - return IXGBE_ERR_MBX; + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); - return mbx->ops->check_for_ack(hw, mbx_id); + return ret_val; } /** @@ -120,11 +127,14 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_rst"); - if (!mbx->ops) - return IXGBE_ERR_MBX; + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); - return mbx->ops->check_for_rst(hw, mbx_id); + return ret_val; } /** @@ -134,22 +144,29 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if it successfully received a message notification **/ -static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_poll_for_msg"); - while (mbx->ops->check_for_msg(hw, mbx_id)) { + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) - return IXGBE_ERR_MBX; - udelay(mbx->usec_delay); + break; + usec_delay(mbx->usec_delay); } - return 0; + if (countdown == 0) + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; } /** @@ -159,22 +176,29 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if it successfully received a message acknowledgement **/ -static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops) - return IXGBE_ERR_MBX; + DEBUGFUNC("ixgbe_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; - while (mbx->ops->check_for_ack(hw, mbx_id)) { + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) - return IXGBE_ERR_MBX; - udelay(mbx->usec_delay); + break; + usec_delay(mbx->usec_delay); } - return 0; + if (countdown == 0) + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; } /** @@ -187,21 +211,23 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ -static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_read_posted_mbx"); - if (!mbx->ops) - return IXGBE_ERR_MBX; + if (!mbx->ops.read) + goto out; ret_val = ixgbe_poll_for_msg(hw, mbx_id); - if (ret_val) - return ret_val; - /* if ack received read message */ - return mbx->ops->read(hw, msg, size, mbx_id); + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; } /** @@ -214,35 +240,291 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ -static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_write_posted_mbx"); /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops || !mbx->timeout) - return IXGBE_ERR_MBX; + if (!mbx->ops.write || !mbx->timeout) + goto out; /* send msg */ - ret_val = mbx->ops->write(hw, msg, size, mbx_id); - if (ret_val) - return ret_val; + ret_val = mbx->ops.write(hw, msg, size, mbx_id); /* if msg sent wait until we receive an ack */ - return ixgbe_poll_for_ack(hw, mbx_id); + if (!ret_val) + ret_val = ixgbe_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * ixgbe_init_mbx_ops_generic - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; +} + +/** + * ixgbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw) +{ + u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ixgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw); + s32 ret_val = IXGBE_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = IXGBE_SUCCESS; + + hw->mbx.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_msg_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_ack_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_rst_vf"); + + if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | + IXGBE_VFMAILBOX_RSTI))) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_vf"); + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) + ret_val = IXGBE_SUCCESS; + + return ret_val; +} + +/** + * ixgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + DEBUGFUNC("ixgbe_write_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_vf(hw, 0); + ixgbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + +out_no_write: + return ret_val; } -static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +/** + * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_vf"); + UNREFERENCED_1PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops.read = ixgbe_read_mbx_vf; + mbx->ops.write = ixgbe_write_mbx_vf; + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; + mbx->ops.check_for_msg = ixgbe_check_for_msg_vf; + mbx->ops.check_for_ack = ixgbe_check_for_ack_vf; + mbx->ops.check_for_rst = ixgbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) { u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); + s32 ret_val = IXGBE_ERR_MBX; if (mbvficr & mask) { + ret_val = IXGBE_SUCCESS; IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); - return 0; } - return IXGBE_ERR_MBX; + return ret_val; } /** @@ -252,18 +534,21 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) { + s32 ret_val = IXGBE_ERR_MBX; s32 index = IXGBE_MBVFICR_INDEX(vf_number); u32 vf_bit = vf_number % 16; + DEBUGFUNC("ixgbe_check_for_msg_pf"); + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, index)) { + ret_val = IXGBE_SUCCESS; hw->mbx.stats.reqs++; - return 0; } - return IXGBE_ERR_MBX; + return ret_val; } /** @@ -273,18 +558,21 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) { + s32 ret_val = IXGBE_ERR_MBX; s32 index = IXGBE_MBVFICR_INDEX(vf_number); u32 vf_bit = vf_number % 16; + DEBUGFUNC("ixgbe_check_for_ack_pf"); + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, index)) { + ret_val = IXGBE_SUCCESS; hw->mbx.stats.acks++; - return 0; } - return IXGBE_ERR_MBX; + return ret_val; } /** @@ -294,33 +582,36 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) { u32 reg_offset = (vf_number < 32) ? 0 : 1; u32 vf_shift = vf_number % 32; u32 vflre = 0; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_rst_pf"); switch (hw->mac.type) { case ixgbe_mac_82599EB: vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); break; - case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: break; } - if (vflre & BIT(vf_shift)) { - IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift)); + if (vflre & (1 << vf_shift)) { + ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); hw->mbx.stats.rsts++; - return 0; } - return IXGBE_ERR_MBX; + return ret_val; } /** @@ -330,19 +621,26 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) * * return SUCCESS if we obtained the mailbox lock **/ -static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) { + s32 ret_val = IXGBE_ERR_MBX; u32 p2v_mailbox; + DEBUGFUNC("ixgbe_obtain_mbx_lock_pf"); + /* Take ownership of the buffer */ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); /* reserve mailbox for vf use */ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) - return 0; + ret_val = IXGBE_SUCCESS; + else + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF%d", vf_number); + - return IXGBE_ERR_MBX; + return ret_val; } /** @@ -354,16 +652,18 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) * * returns SUCCESS if it successfully copied message into the buffer **/ -static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, +STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; + DEBUGFUNC("ixgbe_write_mbx_pf"); + /* lock the mailbox to prevent pf/vf race condition */ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) - return ret_val; + goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ ixgbe_check_for_msg_pf(hw, vf_number); @@ -379,7 +679,9 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, /* update stats */ hw->mbx.stats.msgs_tx++; - return 0; +out_no_write: + return ret_val; + } /** @@ -393,16 +695,18 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ -static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, +STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; + DEBUGFUNC("ixgbe_read_mbx_pf"); + /* lock the mailbox to prevent pf/vf race condition */ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) - return ret_val; + goto out_no_read; /* copy the message to the mailbox memory buffer */ for (i = 0; i < size; i++) @@ -414,10 +718,10 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, /* update stats */ hw->mbx.stats.msgs_rx++; - return 0; +out_no_read: + return ret_val; } -#ifdef CONFIG_PCI_IOV /** * ixgbe_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure @@ -431,30 +735,26 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_x550em_a && + hw->mac.type != ixgbe_mac_X550EM_a && hw->mac.type != ixgbe_mac_X540) return; mbx->timeout = 0; mbx->usec_delay = 0; + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops.read = ixgbe_read_mbx_pf; + mbx->ops.write = ixgbe_write_mbx_pf; + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; + mbx->ops.check_for_msg = ixgbe_check_for_msg_pf; + mbx->ops.check_for_ack = ixgbe_check_for_ack_pf; + mbx->ops.check_for_rst = ixgbe_check_for_rst_pf; + mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; - - mbx->size = IXGBE_VFMAILBOX_SIZE; } -#endif /* CONFIG_PCI_IOV */ - -const struct ixgbe_mbx_operations mbx_ops_generic = { - .read = ixgbe_read_mbx_pf, - .write = ixgbe_write_mbx_pf, - .read_posted = ixgbe_read_posted_mbx, - .write_posted = ixgbe_write_posted_mbx, - .check_for_msg = ixgbe_check_for_msg_pf, - .check_for_ack = ixgbe_check_for_ack_pf, - .check_for_rst = ixgbe_check_for_rst_pf, -}; - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h index 01c2667c0f92..b990c321209d 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -31,42 +27,52 @@ #include "ixgbe_type.h" -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 - -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 - -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ - -#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ -#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is IXGBE_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ -#define IXGBE_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) /* definitions to support mailbox API version negotiation */ /* - * Each element denotes a version of the API; existing numbers may not + * each element denotes a version of the API; existing numbers may not * change; any additions must go at the end */ enum ixgbe_pfvf_api_rev { @@ -74,15 +80,16 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; /* mailbox API, legacy requests */ -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ /* mailbox API, version 1.0 VF requests */ #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ @@ -92,37 +99,57 @@ enum ixgbe_pfvf_api_rev { /* mailbox API, version 1.1 VF requests */ #define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, +}; + /* GET_QUEUES return data indices within the mailbox */ #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ #define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ -/* mailbox API, version 1.2 VF requests */ -#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ -#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ - -#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c - /* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 +#define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ +/* mailbox API, version 2.0 VF requests */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +/* mailbox API, version 2.0 PF requests */ +#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); -#ifdef CONFIG_PCI_IOV +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); -#endif /* CONFIG_PCI_IOV */ - -extern const struct ixgbe_mbx_operations mbx_ops_generic; #endif /* _IXGBE_MBX_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h deleted file mode 100644 index 538a1c5475b6..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_model.h +++ /dev/null @@ -1,121 +0,0 @@ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux drive - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ - -#ifndef _IXGBE_MODEL_H_ -#define _IXGBE_MODEL_H_ - -#include "ixgbe.h" -#include "ixgbe_type.h" - -struct ixgbe_mat_field { - unsigned int off; - int (*val)(struct ixgbe_fdir_filter *input, - union ixgbe_atr_input *mask, - u32 val, u32 m); - unsigned int type; -}; - -struct ixgbe_jump_table { - struct ixgbe_mat_field *mat; - struct ixgbe_fdir_filter *input; - union ixgbe_atr_input *mask; - u32 link_hdl; - unsigned long child_loc_map[32]; -}; - -#define IXGBE_MAX_HW_ENTRIES 2045 - -static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, - union ixgbe_atr_input *mask, - u32 val, u32 m) -{ - input->filter.formatted.src_ip[0] = val; - mask->formatted.src_ip[0] = m; - return 0; -} - -static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, - union ixgbe_atr_input *mask, - u32 val, u32 m) -{ - input->filter.formatted.dst_ip[0] = val; - mask->formatted.dst_ip[0] = m; - return 0; -} - -static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { - { .off = 12, .val = ixgbe_mat_prgm_sip, - .type = IXGBE_ATR_FLOW_TYPE_IPV4}, - { .off = 16, .val = ixgbe_mat_prgm_dip, - .type = IXGBE_ATR_FLOW_TYPE_IPV4}, - { .val = NULL } /* terminal node */ -}; - -static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input, - union ixgbe_atr_input *mask, - u32 val, u32 m) -{ - input->filter.formatted.src_port = val & 0xffff; - mask->formatted.src_port = m & 0xffff; - input->filter.formatted.dst_port = val >> 16; - mask->formatted.dst_port = m >> 16; - - return 0; -}; - -static struct ixgbe_mat_field ixgbe_tcp_fields[] = { - {.off = 0, .val = ixgbe_mat_prgm_ports, - .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, - { .val = NULL } /* terminal node */ -}; - -static struct ixgbe_mat_field ixgbe_udp_fields[] = { - {.off = 0, .val = ixgbe_mat_prgm_ports, - .type = IXGBE_ATR_FLOW_TYPE_UDPV4}, - { .val = NULL } /* terminal node */ -}; - -struct ixgbe_nexthdr { - /* offset, shift, and mask of position to next header */ - unsigned int o; - u32 s; - u32 m; - /* match criteria to make this jump*/ - unsigned int off; - u32 val; - u32 mask; - /* location of jump to make */ - struct ixgbe_mat_field *jump; -}; - -static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { - { .o = 0, .s = 6, .m = 0xf, - .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, - { .o = 0, .s = 6, .m = 0xf, - .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields}, - { .jump = NULL } /* terminal node */ -}; -#endif /* _IXGBE_MODEL_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h new file mode 100644 index 000000000000..2e40048edcb6 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h @@ -0,0 +1,200 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* glue for the OS independent part of ixgbe + * includes register access macros + */ + +#ifndef _IXGBE_OSDEP_H_ +#define _IXGBE_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "kcompat.h" + +#define IXGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define IXGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define IXGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define IXGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) + +#define msec_delay(_x) msleep(_x) + +#define usec_delay(_x) udelay(_x) + +#define STATIC static + +#define IOMEM __iomem + +#ifdef DBG +#define ASSERT(_x) BUG_ON(!(_x)) +#define DEBUGOUT(S) printk(KERN_DEBUG S) +#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT2(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT3(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT4(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT5(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT6(S, A...) printk(KERN_DEBUG S, ## A) +#else +#define ASSERT(_x) do {} while (0) +#define DEBUGOUT(S) do {} while (0) +#define DEBUGOUT1(S, A...) do {} while (0) +#define DEBUGOUT2(S, A...) do {} while (0) +#define DEBUGOUT3(S, A...) do {} while (0) +#define DEBUGOUT4(S, A...) do {} while (0) +#define DEBUGOUT5(S, A...) do {} while (0) +#define DEBUGOUT6(S, A...) do {} while (0) +#endif + +#define DEBUGFUNC(S) do {} while (0) + +#define IXGBE_SFP_DETECT_RETRIES 2 + +struct ixgbe_hw; +struct ixgbe_msg { + u16 msg_enable; +}; +struct net_device *ixgbe_hw_to_netdev(const struct ixgbe_hw *hw); +struct ixgbe_msg *ixgbe_hw_to_msg(const struct ixgbe_hw *hw); + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +#define IXGBE_DEAD_READ_RETRIES 10 +#define IXGBE_DEAD_READ_REG 0xdeadbeefU +#define IXGBE_FAILED_READ_REG 0xffffffffU +#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define IXGBE_FAILED_READ_CFG_WORD 0xffffU +#define IXGBE_FAILED_READ_CFG_BYTE 0xffU + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ + IXGBE_WRITE_REG((a), (reg) + ((offset) << 2), (value)) + +#define IXGBE_READ_REG(h, r) ixgbe_read_reg(h, r, false) +#define IXGBE_R32_Q(h, r) ixgbe_read_reg(h, r, true) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \ + IXGBE_READ_REG((a), (reg) + ((offset) << 2))) + +#ifndef writeq +#define writeq(val, addr) do { writel((u32) (val), addr); \ + writel((u32) (val >> 32), (addr + 4)); \ + } while (0); +#endif + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +u32 ixgbe_read_reg(struct ixgbe_hw *, u32 reg, bool quiet); +extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); +extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); +extern void ewarn(struct ixgbe_hw *hw, const char *str); + +#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word +#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word +#define IXGBE_EEPROM_GRANT_ATTEMPS 100 +#define IXGBE_HTONL(_i) htonl(_i) +#define IXGBE_NTOHL(_i) ntohl(_i) +#define IXGBE_NTOHS(_i) ntohs(_i) +#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define IXGBE_CPU_TO_LE16(_i) cpu_to_le16(_i) +#define IXGBE_LE32_TO_CPU(_i) le32_to_cpu(_i) +#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) +#define EWARN(H, W) ewarn(H, W) + +enum { + IXGBE_ERROR_SOFTWARE, + IXGBE_ERROR_POLLING, + IXGBE_ERROR_INVALID_STATE, + IXGBE_ERROR_UNSUPPORTED, + IXGBE_ERROR_ARGUMENT, + IXGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case IXGBE_ERROR_SOFTWARE: \ + case IXGBE_ERROR_CAUTION: \ + case IXGBE_ERROR_POLLING: \ + netif_warn(ixgbe_hw_to_msg(hw), drv, ixgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case IXGBE_ERROR_INVALID_STATE: \ + case IXGBE_ERROR_UNSUPPORTED: \ + case IXGBE_ERROR_ARGUMENT: \ + netif_err(ixgbe_hw_to_msg(hw), hw, ixgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) do { \ + uninitialized_var(_p); \ +} while (0) +#define UNREFERENCED_2PARAMETER(_p, _q) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ +} while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ +} while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ +} while (0) + +#endif /* _IXGBE_OSDEP_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep2.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep2.h new file mode 100644 index 000000000000..549b35350611 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep2.h @@ -0,0 +1,68 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_OSDEP2_H_ +#define _IXGBE_OSDEP2_H_ + +static inline bool ixgbe_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define IXGBE_REMOVED(a) ixgbe_removed(a) + +static inline void IXGBE_WRITE_REG(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr; + + reg_addr = ACCESS_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return; +#ifdef DBG + switch (reg) { + case IXGBE_EIMS: + case IXGBE_EIMC: + case IXGBE_EIAM: + case IXGBE_EIAC: + case IXGBE_EICR: + case IXGBE_EICS: + printk("%s: Reg - 0x%05X, value - 0x%08X\n", __func__, + reg, value); + default: + break; + } +#endif /* DBG */ + writel(value, reg_addr + reg); +} + +static inline void IXGBE_WRITE_REG64(struct ixgbe_hw *hw, u32 reg, u64 value) +{ + u8 __iomem *reg_addr; + + reg_addr = ACCESS_ONCE(hw->hw_addr); + if (IXGBE_REMOVED(reg_addr)) + return; + writeq(value, reg_addr + reg); +} + +#endif /* _IXGBE_OSDEP2_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c new file mode 100644 index 000000000000..5efd0163ccb0 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c @@ -0,0 +1,1256 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include + +#include "ixgbe.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define IXGBE_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define IXGBE_PARAM_INIT { [0 ... IXGBE_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when ixgbe_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labelled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define IXGBE_PARAM(X, desc) \ + static const int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(IXGBE_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define IXGBE_PARAM(X, desc) \ + static int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif + +IXGBE_PARAM(EEE, "Energy Efficient Ethernet (EEE) ,0=disabled, 1=enabled )" + "default EEE disable"); +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +IXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); +IXGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define IXGBE_INT_LEGACY 0 +#define IXGBE_INT_MSI 1 +#define IXGBE_INT_MSIX 2 + +/* MQ - Multiple Queue enable/disable + * + * Valid Range: 0, 1 + * - 0 - disables MQ + * - 1 - enables MQ + * + * Default Value: 1 + */ + +IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); + +#if IS_ENABLED(CONFIG_DCA) +/* DCA - Direct Cache Access (DCA) Control + * + * This option allows the device to hint to DCA enabled processors + * which CPU should have its cache warmed with the data being + * transferred over PCIe. This can increase performance by reducing + * cache misses. ixgbe hardware supports DCA for: + * tx descriptor writeback + * rx descriptor writeback + * rx data + * rx data header only (in packet split mode) + * + * enabling option 2 can cause cache thrash in some tests, particularly + * if the CPU is completely utilized + * + * Valid Range: 0 - 2 + * - 0 - disables DCA + * - 1 - enables DCA + * - 2 - enables DCA with rx data included + * + * Default Value: 2 + */ + +#define IXGBE_MAX_DCA 2 + +IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, " + "1=descriptor only, 2=descriptor and data"); +#endif /* CONFIG_DCA */ + +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues + * + * Valid Range: 0-16 + * - 0 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()). + * - 1-16 - enables RSS and sets the Desc. Q's to the specified value. + * + * Default Value: 0 + */ + +IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " + "default 0=number of cpus"); + +/* VMDQ - Virtual Machine Device Queues (VMDQ) + * + * Valid Range: 1-16 + * - 0/1 Disables VMDQ by allocating only a single queue. + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. + * + * Default Value: 8 + */ + +#define IXGBE_DEFAULT_NUM_VMDQ 8 + +IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable (1 queue) " + "2-16 enable (default=" XSTRINGIFY(IXGBE_DEFAULT_NUM_VMDQ) ")"); + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 + * - 0 Disables SR-IOV + * - 1-63 - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +#define MAX_SRIOV_VFS 63 + +IXGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* VEPA - Set internal bridge to VEPA mode + * + * Valid Range: 0-1 + * - 0 Set bridge to VEB mode + * - 1 Set bridge to VEPA mode + * + * Default Value: 0 + */ +/* + *Note: + *===== + * This provides ability to ensure VEPA mode on the internal bridge even if + * the kernel does not support the netdev bridge setting operations. +*/ +IXGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); +#endif + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 956-488281 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR 1 +IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " + "(0,1,956-488281), default 1"); +#define MAX_ITR IXGBE_MAX_INT_RATE +#define MIN_ITR IXGBE_MIN_INT_RATE + +#ifndef IXGBE_NO_LLI + +/* LLIPort (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + +/* LLIPush (Low Latency Interrupt on TCP Push flag) + * + * Valid Range: 0,1 + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1)"); + +#define DEFAULT_LLIPUSH 0 +#define MAX_LLIPUSH 1 +#define MIN_LLIPUSH 0 + +/* LLISize (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* LLIEType (Low Latency Interrupt Ethernet Type) + * + * Valid Range: 0 - 0x8fff + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); + +#define DEFAULT_LLIETYPE 0 +#define MAX_LLIETYPE 0x8fff +#define MIN_LLIETYPE 0 + +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 (disabled) + */ +IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); + +#define DEFAULT_LLIVLANP 0 +#define MAX_LLIVLANP 7 +#define MIN_LLIVLANP 0 + +#endif /* IXGBE_NO_LLI */ +#ifdef HAVE_TX_MQ +/* Flow Director packet buffer allocation level + * + * Valid Range: 1-3 + * 1 = 8k hash/2k perfect, + * 2 = 16k hash/4k perfect, + * 3 = 32k hash/8k perfect + * + * Default Value: 0 + */ +IXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n" + "\t\t\t1 = 8k hash filters or 2k perfect filters\n" + "\t\t\t2 = 16k hash filters or 4k perfect filters\n" + "\t\t\t3 = 32k hash filters or 8k perfect filters"); + +#define IXGBE_DEFAULT_FDIR_PBALLOC IXGBE_FDIR_PBALLOC_64K + +/* Software ATR packet sample rate + * + * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection + * + * Default Value: 20 + */ +IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); + +#define IXGBE_MAX_ATR_SAMPLE_RATE 255 +#define IXGBE_MIN_ATR_SAMPLE_RATE 1 +#define IXGBE_ATR_SAMPLE_RATE_OFF 0 +#define IXGBE_DEFAULT_ATR_SAMPLE_RATE 20 +#endif /* HAVE_TX_MQ */ + +#if IS_ENABLED(CONFIG_FCOE) +/* FCoE - Fibre Channel over Ethernet Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables FCoE Offload + * - 1 - enables FCoE Offload + * + * Default Value: 1 + */ +IXGBE_PARAM(FCoE, "Disable or enable FCoE Offload, default 1"); +#endif /* CONFIG_FCOE */ + +/* Enable/disable Malicious Driver Detection + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +IXGBE_PARAM(MDD, "Malicious Driver Detection: (0,1), default 1 = on"); + +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +IXGBE_PARAM(LRO, "Large Receive Offload (0,1), default 0 = off"); + +/* Enable/disable support for untested SFP+ modules on 82599-based adapters + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 0 + */ +IXGBE_PARAM(allow_unsupported_sfp, "Allow unsupported and untested " + "SFP+ modules on 82599 based adapters, default 0 = Disable"); + +/* Enable/disable support for DMA coalescing + * + * Valid Values: 0(off), 41 - 10000(on) + * + * Default Value: 0 + */ +IXGBE_PARAM(dmac_watchdog, + "DMA coalescing watchdog in microseconds (0,41-10000), default 0 = off"); + +/* Enable/disable support for VXLAN rx checksum offload + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 1 on hardware that supports it + */ +IXGBE_PARAM(vxlan_rx, + "VXLAN receive checksum offload (0,1), default 1 = Enable"); + + +struct ixgbe_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct ixgbe_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +#ifndef IXGBE_NO_LLI +#ifdef module_param_array +/** + * helper function to determine LLI support + * + * LLI is only supported for 82599 and X540 + * LLIPush is not supported on 82599 + **/ +static bool __devinit ixgbe_lli_supported(struct ixgbe_adapter *adapter, + struct ixgbe_option *opt) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (hw->mac.type == ixgbe_mac_82599EB) { + + if (LLIPush[adapter->bd_number] > 0) + goto not_supp; + + return true; + } + + if (hw->mac.type == ixgbe_mac_X540) + return true; + +not_supp: + DPRINTK(PROBE, INFO, "%s not supported on this HW\n", opt->name); + return false; +} +#endif /* module_param_array */ +#endif /* IXGBE_NO_LLI */ + +static int __devinit ixgbe_validate_option(unsigned int *value, + struct ixgbe_option *opt) +{ + if (*value == OPTION_UNSET) { + printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + printk(KERN_INFO "ixgbe: %s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + printk(KERN_INFO "ixgbe: %s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || + *value == opt->def) { + if (opt->msg) + printk(KERN_INFO "ixgbe: %s set to %d, %s\n", + opt->name, *value, opt->msg); + else + printk(KERN_INFO "ixgbe: %s set to %d\n", + opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + + for (i = 0; i < opt->arg.l.nr; i++) { + const struct ixgbe_opt_list *ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + printk(KERN_INFO "%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) + +/** + * ixgbe_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) +{ + unsigned int mdd; + int bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + struct ixgbe_ring_feature *feature = adapter->ring_feature; + unsigned int vmdq; + + if (bd >= IXGBE_MAX_NIC) { + printk(KERN_NOTICE + "Warning: no configuration for board #%d\n", bd); + printk(KERN_NOTICE "Using defaults for all values\n"); +#ifndef module_param_array + bd = IXGBE_MAX_NIC; +#endif + } + + { /* Interrupt Mode */ + unsigned int int_mode; + static struct ixgbe_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of " __MODULE_STRING(IXGBE_INT_MSIX), + .def = IXGBE_INT_MSIX, + .arg = { .r = { .min = IXGBE_INT_LEGACY, + .max = IXGBE_INT_MSIX} } + }; + +#ifdef module_param_array + if (num_IntMode > bd || num_InterruptType > bd) { +#endif + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + ixgbe_validate_option(&int_mode, &opt); + switch (int_mode) { + case IXGBE_INT_MSIX: + if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) + printk(KERN_INFO + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case IXGBE_INT_MSI: + if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) { + printk(KERN_INFO + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; + } + break; + case IXGBE_INT_LEGACY: + default: + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; + break; + } +#ifdef module_param_array + } else { + /* default settings */ + if (*aflags & IXGBE_FLAG_MSIX_CAPABLE) { + *aflags |= IXGBE_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; + } + } +#endif + } + { /* Multiple Queue Support */ + static struct ixgbe_option opt = { + .type = enable_option, + .name = "Multiple Queue Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_MQ > bd) { +#endif + unsigned int mq = MQ[bd]; + ixgbe_validate_option(&mq, &opt); + if (mq) + *aflags |= IXGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; +#ifdef module_param_array + } else { + *aflags |= IXGBE_FLAG_MQ_CAPABLE; + } +#endif + /* Check Interoperability */ + if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) && + !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiple queues are not supported while MSI-X " + "is disabled. Disabling Multiple Queues.\n"); + *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; + } + } +#if IS_ENABLED(CONFIG_DCA) + { /* Direct Cache Access (DCA) */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Direct Cache Access (DCA)", + .err = "defaulting to Enabled", + .def = IXGBE_MAX_DCA, + .arg = { .r = { .min = OPTION_DISABLED, + .max = IXGBE_MAX_DCA} } + }; + unsigned int dca = opt.def; + +#ifdef module_param_array + if (num_DCA > bd) { +#endif + dca = DCA[bd]; + ixgbe_validate_option(&dca, &opt); + if (!dca) + *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; + + /* Check Interoperability */ + if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) { + DPRINTK(PROBE, INFO, "DCA is disabled\n"); + *aflags &= ~IXGBE_FLAG_DCA_ENABLED; + } + + if (dca == IXGBE_MAX_DCA) { + DPRINTK(PROBE, INFO, + "DCA enabled for rx data\n"); + adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; + } +#ifdef module_param_array + } else { + /* make sure to clear the capability flag if the + * option is disabled by default above */ + if (opt.def == OPTION_DISABLED) + *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; + } +#endif + if (dca == IXGBE_MAX_DCA) + adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; + } +#endif /* CONFIG_DCA */ + { /* Receive-Side Scaling (RSS) */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Receive-Side Scaling (RSS)", + .err = "using default.", + .def = 0, + .arg = { .r = { .min = 0, + .max = 16} } + }; + unsigned int rss = RSS[bd]; + /* adjust Max allowed RSS queues based on MAC type */ + opt.arg.r.max = ixgbe_max_rss_indices(adapter); + +#ifdef module_param_array + if (num_RSS > bd) { +#endif + ixgbe_validate_option(&rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) + rss = min_t(int, opt.arg.r.max, + num_online_cpus()); + else + feature[RING_F_FDIR].limit = rss; + + feature[RING_F_RSS].limit = rss; +#ifdef module_param_array + } else if (opt.def == 0) { + rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); + feature[RING_F_RSS].limit = rss; + } +#endif + /* Check Interoperability */ + if (rss > 1) { + if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiqueue is disabled. " + "Limiting RSS.\n"); + feature[RING_F_RSS].limit = 1; + } + } + } + { /* Virtual Machine Device Queues (VMDQ) */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Virtual Machine Device Queues (VMDQ)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = IXGBE_MAX_VMDQ_INDICES + } } + }; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* 82598 only supports up to 16 pools */ + opt.arg.r.max = 16; + break; + default: + break; + } + +#ifdef module_param_array + if (num_VMDQ > bd) { +#endif + vmdq = VMDQ[bd]; + + ixgbe_validate_option(&vmdq, &opt); + + /* zero or one both mean disabled from our driver's + * perspective */ + if (vmdq > 1) { + *aflags |= IXGBE_FLAG_VMDQ_ENABLED; + } else + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = vmdq; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; + else + *aflags |= IXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = opt.def; + } +#endif + /* Check Interoperability */ + if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) { + if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "VMDQ is not supported while multiple " + "queues are disabled. " + "Disabling VMDQ.\n"); + *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; + feature[RING_F_VMDQ].limit = 0; + } + } + } +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = MAX_SRIOV_VFS} } + }; + +#ifdef module_param_array + if (num_max_vfs > bd) { +#endif + unsigned int vfs = max_vfs[bd]; + if (ixgbe_validate_option(&vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->max_vfs = vfs; + + if (vfs) + *aflags |= IXGBE_FLAG_SRIOV_ENABLED; + else + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) { + adapter->max_vfs = 0; + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; + } else { + adapter->max_vfs = opt.def; + *aflags |= IXGBE_FLAG_SRIOV_ENABLED; + } + } +#endif + + /* Check Interoperability */ + if (*aflags & IXGBE_FLAG_SRIOV_ENABLED) { + if (!(*aflags & IXGBE_FLAG_SRIOV_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported on this " + "hardware. Disabling IOV.\n"); + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; + adapter->max_vfs = 0; + } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported while multiple " + "queues are disabled. " + "Disabling IOV.\n"); + *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; + adapter->max_vfs = 0; + } + } + } + { /* VEPA Bridge Mode enable for SR-IOV mode */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "VEPA Bridge Mode Enable", + .err = "defaulting to disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED} } + }; + +#ifdef module_param_array + if (num_VEPA > bd) { +#endif + unsigned int vepa = VEPA[bd]; + ixgbe_validate_option(&vepa, &opt); + if (vepa) + adapter->flags |= + IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + adapter->flags |= + IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } +#endif + } +#endif /* CONFIG_PCI_IOV */ + { /* Interrupt Throttling Rate */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + ixgbe_validate_option(&itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (1000000/itr) << 2; + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; +#ifdef module_param_array + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } +#endif + } +#ifndef IXGBE_NO_LLI + { /* Low Latency Interrupt TCP Port*/ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + +#ifdef module_param_array + if (num_LLIPort > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + adapter->lli_port = LLIPort[bd]; + if (adapter->lli_port) { + ixgbe_validate_option(&adapter->lli_port, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_port = opt.def; + } +#endif + } + { /* Low Latency Interrupt on Packet Size */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + +#ifdef module_param_array + if (num_LLISize > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + adapter->lli_size = LLISize[bd]; + if (adapter->lli_size) { + ixgbe_validate_option(&adapter->lli_size, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_size = opt.def; + } +#endif + } + { /*Low Latency Interrupt on TCP Push flag*/ + static struct ixgbe_option opt = { + .type = enable_option, + .name = "Low Latency Interrupt on TCP Push flag", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + +#ifdef module_param_array + if (num_LLIPush > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + unsigned int lli_push = LLIPush[bd]; + + ixgbe_validate_option(&lli_push, &opt); + if (lli_push) + *aflags |= IXGBE_FLAG_LLI_PUSH; + else + *aflags &= ~IXGBE_FLAG_LLI_PUSH; +#ifdef module_param_array + } else { + *aflags &= ~IXGBE_FLAG_LLI_PUSH; + } +#endif + } + { /* Low Latency Interrupt EtherType*/ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Ethernet Protocol " + "Type", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIETYPE), + .def = DEFAULT_LLIETYPE, + .arg = { .r = { .min = MIN_LLIETYPE, + .max = MAX_LLIETYPE } } + }; + +#ifdef module_param_array + if (num_LLIEType > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + adapter->lli_etype = LLIEType[bd]; + if (adapter->lli_etype) { + ixgbe_validate_option(&adapter->lli_etype, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_etype = opt.def; + } +#endif + } + { /* LLI VLAN Priority */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on VLAN priority " + "threshold", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIVLANP), + .def = DEFAULT_LLIVLANP, + .arg = { .r = { .min = MIN_LLIVLANP, + .max = MAX_LLIVLANP } } + }; + +#ifdef module_param_array + if (num_LLIVLANP > bd && ixgbe_lli_supported(adapter, &opt)) { +#endif + adapter->lli_vlan_pri = LLIVLANP[bd]; + if (adapter->lli_vlan_pri) { + ixgbe_validate_option(&adapter->lli_vlan_pri, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } +#ifdef module_param_array + } else { + adapter->lli_vlan_pri = opt.def; + } +#endif + } +#endif /* IXGBE_NO_LLI */ +#ifdef HAVE_TX_MQ + { /* Flow Director packet buffer allocation */ + unsigned int fdir_pballoc_mode; + static struct ixgbe_option opt = { + .type = range_option, + .name = "Flow Director packet buffer allocation", + .err = "using default of " + __MODULE_STRING(IXGBE_DEFAULT_FDIR_PBALLOC), + .def = IXGBE_DEFAULT_FDIR_PBALLOC, + .arg = {.r = {.min = IXGBE_FDIR_PBALLOC_64K, + .max = IXGBE_FDIR_PBALLOC_256K} } + }; + char pstring[10]; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_NONE; + } else if (num_FdirPballoc > bd) { + fdir_pballoc_mode = FdirPballoc[bd]; + ixgbe_validate_option(&fdir_pballoc_mode, &opt); + switch (fdir_pballoc_mode) { + case IXGBE_FDIR_PBALLOC_256K: + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K; + sprintf(pstring, "256kB"); + break; + case IXGBE_FDIR_PBALLOC_128K: + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K; + sprintf(pstring, "128kB"); + break; + case IXGBE_FDIR_PBALLOC_64K: + default: + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; + sprintf(pstring, "64kB"); + break; + } + DPRINTK(PROBE, INFO, "Flow Director will be allocated " + "%s of packet buffer\n", pstring); + } else { + adapter->fdir_pballoc = opt.def; + } + + } + { /* Flow Director ATR Tx sample packet rate */ + static struct ixgbe_option opt = { + .type = range_option, + .name = "Software ATR Tx packet sample rate", + .err = "using default of " + __MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE), + .def = IXGBE_DEFAULT_ATR_SAMPLE_RATE, + .arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF, + .max = IXGBE_MAX_ATR_SAMPLE_RATE} } + }; + static const char atr_string[] = + "ATR Tx Packet sample rate set to"; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF; + } else if (num_AtrSampleRate > bd) { + adapter->atr_sample_rate = AtrSampleRate[bd]; + + if (adapter->atr_sample_rate) { + ixgbe_validate_option(&adapter->atr_sample_rate, + &opt); + DPRINTK(PROBE, INFO, "%s %d\n", atr_string, + adapter->atr_sample_rate); + } + } else { + adapter->atr_sample_rate = opt.def; + } + } +#endif /* HAVE_TX_MQ */ +#if IS_ENABLED(CONFIG_FCOE) + { + *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_82599EB: { + struct ixgbe_option opt = { + .type = enable_option, + .name = "Enabled/Disable FCoE offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; +#ifdef module_param_array + if (num_FCoE > bd) { +#endif + unsigned int fcoe = FCoE[bd]; + + ixgbe_validate_option(&fcoe, &opt); + if (fcoe) + *aflags |= IXGBE_FLAG_FCOE_CAPABLE; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= IXGBE_FLAG_FCOE_CAPABLE; + } +#endif + DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n", + (*aflags & IXGBE_FLAG_FCOE_CAPABLE) ? + "en" : "dis"); + } + break; + default: + break; + } + } +#endif /* CONFIG_FCOE */ + { /* LRO - Set Large Receive Offload */ + struct ixgbe_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + struct net_device *netdev = adapter->netdev; + + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + opt.def = OPTION_DISABLED; + +#ifdef module_param_array + if (num_LRO > bd) { +#endif + unsigned int lro = LRO[bd]; + ixgbe_validate_option(&lro, &opt); + if (lro) + netdev->features |= NETIF_F_LRO; + else + netdev->features &= ~NETIF_F_LRO; +#ifdef module_param_array + } else { + netdev->features &= ~NETIF_F_LRO; + } +#endif + if ((netdev->features & NETIF_F_LRO) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { + DPRINTK(PROBE, INFO, + "RSC is not supported on this " + "hardware. Disabling RSC.\n"); + netdev->features &= ~NETIF_F_LRO; + } + } + { /* + * allow_unsupported_sfp - Enable/Disable support for unsupported + * and untested SFP+ modules. + */ + struct ixgbe_option opt = { + .type = enable_option, + .name = "allow_unsupported_sfp", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; +#ifdef module_param_array + if (num_allow_unsupported_sfp > bd) { +#endif + unsigned int enable_unsupported_sfp = + allow_unsupported_sfp[bd]; + ixgbe_validate_option(&enable_unsupported_sfp, &opt); + if (enable_unsupported_sfp) { + adapter->hw.allow_unsupported_sfp = true; + } else { + adapter->hw.allow_unsupported_sfp = false; + } +#ifdef module_param_array + } else { + adapter->hw.allow_unsupported_sfp = false; + } +#endif + } + { /* DMA Coalescing */ + struct ixgbe_option opt = { + .type = range_option, + .name = "dmac_watchdog", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 41, .max = 10000 } }, + }; + const char *cmsg = "DMA coalescing not supported on this hardware"; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + if (adapter->rx_itr_setting || adapter->tx_itr_setting) + break; + opt.err = "interrupt throttling disabled also disables DMA coalescing"; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + break; + default: + opt.err = cmsg; + opt.msg = cmsg; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + } +#ifdef module_param_array + if (num_dmac_watchdog > bd) { +#endif + unsigned int dmac_wd = dmac_watchdog[bd]; + + ixgbe_validate_option(&dmac_wd, &opt); + adapter->hw.mac.dmac_config.watchdog_timer = dmac_wd; +#ifdef module_param_array + } else { + adapter->hw.mac.dmac_config.watchdog_timer = opt.def; + } +#endif + } + { /* VXLAN rx offload */ + struct ixgbe_option opt = { + .type = range_option, + .name = "vxlan_rx", + .err = "defaulting to 1 (enabled)", + .def = 1, + .arg = { .r = { .min = 0, .max = 1 } }, + }; + const char *cmsg = "VXLAN rx offload not supported on this hardware"; + const u32 flag = IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { + opt.err = cmsg; + opt.msg = cmsg; + opt.def = 0; + opt.arg.r.max = 0; + } +#ifdef module_param_array + if (num_vxlan_rx > bd) { +#endif + unsigned int enable_vxlan_rx = vxlan_rx[bd]; + + ixgbe_validate_option(&enable_vxlan_rx, &opt); + if (enable_vxlan_rx) + adapter->flags |= flag; + else + adapter->flags &= ~flag; +#ifdef module_param_array + } else if (opt.def) { + adapter->flags |= flag; + } else { + adapter->flags &= ~flag; + } +#endif + } + + { /* MDD support */ + struct ixgbe_option opt = { + .type = enable_option, + .name = "Malicious Driver Detection", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED, + }; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: +#ifdef module_param_array + if (num_MDD > bd) { +#endif + mdd = MDD[bd]; + ixgbe_validate_option(&mdd, &opt); + + if (mdd){ + *aflags |= IXGBE_FLAG_MDD_ENABLED; + + } else{ + *aflags &= ~IXGBE_FLAG_MDD_ENABLED; + } +#ifdef module_param_array + } else { + *aflags |= IXGBE_FLAG_MDD_ENABLED; + } +#endif + break; + default: + *aflags &= ~IXGBE_FLAG_MDD_ENABLED; + break; + } + } + +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c index d914b4070f92..442f9a9c174a 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,37 +22,32 @@ *******************************************************************************/ -#include -#include -#include - -#include "ixgbe.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" #include "ixgbe_phy.h" -static void ixgbe_i2c_start(struct ixgbe_hw *hw); -static void ixgbe_i2c_stop(struct ixgbe_hw *hw); -static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); -static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); -static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); -static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); -static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); -static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); -static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); -static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); -static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); -static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); -static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); +STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw); +STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); /** - * ixgbe_out_i2c_byte_ack - Send I2C byte with ack - * @hw: pointer to the hardware structure - * @byte: byte to send + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send * - * Returns an error code on error. - **/ -static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) + * Returns an error code on error. + */ +STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) { s32 status; @@ -67,13 +58,13 @@ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) } /** - * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack - * @hw: pointer to the hardware structure - * @byte: pointer to a u8 to receive the byte + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte * - * Returns an error code on error. - **/ -static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) + * Returns an error code on error. + */ +STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) { s32 status; @@ -85,13 +76,13 @@ static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) } /** - * ixgbe_ones_comp_byte_add - Perform one's complement addition - * @add1: addend 1 - * @add2: addend 2 + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1 - addend 1 + * @add2 - addend 2 * - * Returns one's complement 8-bit sum. - **/ -static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) + * Returns one's complement 8-bit sum. + */ +STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) { u16 sum = add1 + add2; @@ -100,17 +91,17 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) } /** - * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * @lock: true if to take and release semaphore + * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * @lock: true if to take and release semaphore * - * Returns an error code on error. + * Returns an error code on error. */ -s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val, bool lock) +s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 3; @@ -121,7 +112,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u8 reg_high; u8 csum; - reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ~csum; do { @@ -169,26 +160,26 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) - hw_dbg(hw, "I2C byte read combined error - Retry.\n"); + DEBUGOUT("I2C byte read combined error - Retrying.\n"); else - hw_dbg(hw, "I2C byte read combined error.\n"); + DEBUGOUT("I2C byte read combined error.\n"); } while (retry < max_retry); return IXGBE_ERR_I2C; } /** - * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * @lock: true if to take and release semaphore + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * @lock: true if to take and release semaphore * - * Returns an error code on error. + * Returns an error code on error. */ -s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 val, bool lock) +s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 1; @@ -196,7 +187,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u8 reg_high; u8 csum; - reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ixgbe_ones_comp_byte_add(csum, val >> 8); csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); @@ -234,44 +225,80 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) - hw_dbg(hw, "I2C byte write combined error - Retry.\n"); + DEBUGOUT("I2C byte write combined error - Retrying.\n"); else - hw_dbg(hw, "I2C byte write combined error.\n"); + DEBUGOUT("I2C byte write combined error.\n"); } while (retry < max_retry); return IXGBE_ERR_I2C; } /** - * ixgbe_probe_phy - Probe a single address for a PHY - * @hw: pointer to hardware structure - * @phy_addr: PHY address to probe + * ixgbe_init_phy_ops_generic - Inits PHY function ptrs + * @hw: pointer to the hardware structure * - * Returns true if PHY found + * Initialize the function pointers. **/ +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + DEBUGFUNC("ixgbe_init_phy_ops_generic"); + + /* PHY */ + phy->ops.identify = ixgbe_identify_phy_generic; + phy->ops.reset = ixgbe_reset_phy_generic; + phy->ops.read_reg = ixgbe_read_phy_reg_generic; + phy->ops.write_reg = ixgbe_write_phy_reg_generic; + phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi; + phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi; + phy->ops.setup_link = ixgbe_setup_phy_link_generic; + phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic; + phy->ops.check_link = NULL; + phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic; + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic; + phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic; + phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear; + phy->ops.identify_sfp = ixgbe_identify_module_generic; + phy->sfp_type = ixgbe_sfp_type_unknown; + phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked; + phy->ops.write_i2c_byte_unlocked = + ixgbe_write_i2c_byte_generic_unlocked; + phy->ops.check_overtemp = ixgbe_tn_check_overtemp; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_probe_phy - Probe a single address for a PHY + * @hw: pointer to hardware structure + * @phy_addr: PHY address to probe + * + * Returns true if PHY found + */ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) { u16 ext_ability = 0; - hw->phy.mdio.prtad = phy_addr; - if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0) { + if (!ixgbe_validate_phy_addr(hw, phy_addr)) { + DEBUGOUT1("Unable to validate PHY address 0x%04X\n", + phy_addr); return false; - } + } - if (ixgbe_get_phy_id(hw)) { + if (ixgbe_get_phy_id(hw)) return false; - } hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); if (hw->phy.type == ixgbe_phy_unknown) { - hw->phy.ops.read_reg(hw, - MDIO_PMA_EXTABLE, - MDIO_MMD_PMAPMD, - &ext_ability); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); if (ext_ability & - (MDIO_PMA_EXTABLE_10GBT | - MDIO_PMA_EXTABLE_1000BT)) + (IXGBE_MDIO_PHY_10GBASET_ABILITY | + IXGBE_MDIO_PHY_1000BASET_ABILITY)) hw->phy.type = ixgbe_phy_cu_unknown; else hw->phy.type = ixgbe_phy_generic; @@ -288,8 +315,10 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) **/ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) { - u32 phy_addr; - u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u16 phy_addr; + + DEBUGFUNC("ixgbe_identify_phy_generic"); if (!hw->phy.phy_semaphore_mask) { if (hw->bus.lan_id) @@ -299,21 +328,21 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) } if (hw->phy.type != ixgbe_phy_unknown) - return 0; + return IXGBE_SUCCESS; if (hw->phy.nw_mng_if_sel) { phy_addr = (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; if (ixgbe_probe_phy(hw, phy_addr)) - return 0; + return IXGBE_SUCCESS; else return IXGBE_ERR_PHY_ADDR_INVALID; } for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { if (ixgbe_probe_phy(hw, phy_addr)) { - status = 0; + status = IXGBE_SUCCESS; break; } } @@ -322,8 +351,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) * be found and the code will take this path. Caller has to * decide if it is an error or not. */ - if (status) - hw->phy.mdio.prtad = MDIO_PRTAD_NONE; + if (status != IXGBE_SUCCESS) + hw->phy.addr = 0; return status; } @@ -334,59 +363,95 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) * * This function checks the MMNGC.MNG_VETO bit to see if there are * any constraints on link from manageability. For MAC's that don't - * have this bit just return false since the link can not be blocked + * have this bit just return faluse since the link can not be blocked * via this method. **/ -bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw) { u32 mmngc; + DEBUGFUNC("ixgbe_check_reset_blocked"); + /* If we don't have this bit, it can't be blocking */ if (hw->mac.type == ixgbe_mac_82598EB) return false; mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); if (mmngc & IXGBE_MMNGC_MNG_VETO) { - hw_dbg(hw, "MNG_VETO bit detected.\n"); + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, + "MNG_VETO bit detected.\n"); return true; } return false; } +/** + * ixgbe_validate_phy_addr - Determines phy address is valid + * @hw: pointer to hardware structure + * + **/ +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + + DEBUGFUNC("ixgbe_validate_phy_addr"); + + hw->phy.addr = phy_addr; + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id); + + return valid; +} + /** * ixgbe_get_phy_id - Get the phy type * @hw: pointer to hardware structure * **/ -static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) { - s32 status; + u32 status; u16 phy_id_high = 0; u16 phy_id_low = 0; - status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, + DEBUGFUNC("ixgbe_get_phy_id"); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id_high); - if (!status) { + if (status == IXGBE_SUCCESS) { hw->phy.id = (u32)(phy_id_high << 16); - status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id_low); hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); } + DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n", + phy_id_high, phy_id_low); + return status; } /** * ixgbe_get_phy_type_from_id - Get the phy type - * @hw: pointer to hardware structure + * @phy_id: PHY ID information * **/ -static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) { enum ixgbe_phy_type phy_type; + DEBUGFUNC("ixgbe_get_phy_type_from_id"); + switch (phy_id) { case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; @@ -406,11 +471,14 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case X557_PHY_ID2: phy_type = ixgbe_phy_x550em_ext_t; break; + case IXGBE_M88E1500_E_PHY_ID: + case IXGBE_M88E1543_E_PHY_ID: + phy_type = ixgbe_phy_ext_1g_t; + break; default: phy_type = ixgbe_phy_unknown; break; } - return phy_type; } @@ -422,30 +490,32 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) { u32 i; u16 ctrl = 0; - s32 status = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_reset_phy_generic"); if (hw->phy.type == ixgbe_phy_unknown) status = ixgbe_identify_phy_generic(hw); - if (status != 0 || hw->phy.type == ixgbe_phy_none) - return status; + if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none) + goto out; /* Don't reset PHY if it's shut down due to overtemp. */ if (!hw->phy.reset_if_overtemp && (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) - return 0; + goto out; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) - return 0; + goto out; /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY */ - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_PHYXS, - MDIO_CTRL1_RESET); + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); /* * Poll for reset bit to self-clear indicating reset is complete. @@ -453,131 +523,44 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) * 1.7 usec delay after the reset is complete. */ for (i = 0; i < 30; i++) { - msleep(100); + msec_delay(100); if (hw->phy.type == ixgbe_phy_x550em_ext_t) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_TX_VENDOR_ALARMS_3, - MDIO_MMD_PMAPMD, &ctrl); - if (status) + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ctrl); + if (status != IXGBE_SUCCESS) return status; if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { - udelay(2); + usec_delay(2); break; } } else { - status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_PHYXS, &ctrl); - if (status) + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + &ctrl); + if (status != IXGBE_SUCCESS) return status; - if (!(ctrl & MDIO_CTRL1_RESET)) { - udelay(2); + if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { + usec_delay(2); break; } } } - if (ctrl & MDIO_CTRL1_RESET) { - hw_dbg(hw, "PHY reset polling failed to complete.\n"); - return IXGBE_ERR_RESET_FAILED; - } - - return 0; -} - -/** - * ixgbe_read_phy_mdio - Reads a value from a specified PHY register without - * the SWFW lock. This Clasue 22 API is patched by Hilbert - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @phy_data: Pointer to read data from PHY register - **/ -s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data) -{ - u32 i, data, command; - - /* Setup and write the read command */ - command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | - IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | - IXGBE_MSCA_MDI_COMMAND; - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address command did not complete.\n"); - return IXGBE_ERR_PHY; + if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { + status = IXGBE_ERR_RESET_FAILED; + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY reset polling failed to complete.\n"); } - /* Read operation is complete. Get the data - * from MSRWD - */ - data = IXGBE_READ_REG(hw, IXGBE_MSRWD); - data >>= IXGBE_MSRWD_READ_DATA_SHIFT; - *phy_data = (u16)(data); - - return 0; +out: + return status; } -/** - * ixgbe_write_phy_reg_mdio - Writes a value to specified PHY register - * without SWFW lock. This Clause 22 API is patched by Hilbert - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 5 bit device type - * @phy_data: Data to write to the PHY register - **/ -s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) -{ - u32 i, command; - - /* Put the data in the MDI single read and write data register*/ - IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); - - /* Setup and write the write command */ - command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | - IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | - IXGBE_MSCA_MDI_COMMAND; - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY write cmd didn't complete\n"); - return IXGBE_ERR_PHY; - } - - return 0; -} /** * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without * the SWFW lock @@ -586,52 +569,56 @@ s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data) + u16 *phy_data) { u32 i, data, command; /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - /* Check every 10 usec to see if the address cycle completed. + /* + * Check every 10 usec to see if the address cycle completed. * The MDI Command bit will clear when the operation is * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; + break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address command did not complete.\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n"); + DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n"); return IXGBE_ERR_PHY; } - /* Address cycle complete, setup and write the read + /* + * Address cycle complete, setup and write the read * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - /* Check every 10 usec to see if the address cycle + /* + * Check every 10 usec to see if the address cycle * completed. The MDI Command bit will clear when the * operation is complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) @@ -639,18 +626,20 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY read command didn't complete\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n"); + DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n"); return IXGBE_ERR_PHY; } - /* Read operation is complete. Get the data + /* + * Read operation is complete. Get the data * from MSRWD */ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); data >>= IXGBE_MSRWD_READ_DATA_SHIFT; *phy_data = (u16)(data); - return 0; + return IXGBE_SUCCESS; } /** @@ -666,13 +655,14 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 status; u32 gssr = hw->phy.phy_semaphore_mask; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { - status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, - phy_data); - hw->mac.ops.release_swfw_sync(hw, gssr); - } else { + DEBUGFUNC("ixgbe_read_phy_reg_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return IXGBE_ERR_SWFW_SYNC; - } + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, gssr); return status; } @@ -696,7 +686,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -707,7 +697,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) @@ -715,7 +705,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n"); return IXGBE_ERR_PHY; } @@ -725,17 +715,18 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - /* Check every 10 usec to see if the address cycle + /* + * Check every 10 usec to see if the address cycle * completed. The MDI Command bit will clear when the * operation is complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) @@ -743,11 +734,11 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY write cmd didn't complete\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n"); return IXGBE_ERR_PHY; } - return 0; + return IXGBE_SUCCESS; } /** @@ -764,44 +755,53 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 status; u32 gssr = hw->phy.phy_semaphore_mask; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { - status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + DEBUGFUNC("ixgbe_write_phy_reg_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { + status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); } else { - return IXGBE_ERR_SWFW_SYNC; + status = IXGBE_ERR_SWFW_SYNC; } return status; } /** - * ixgbe_setup_phy_link_generic - Set and restart autoneg + * ixgbe_setup_phy_link_generic - Set and restart auto-neg * @hw: pointer to hardware structure * - * Restart autonegotiation and PHY and waits for completion. + * Restart auto-negotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; + DEBUGFUNC("ixgbe_setup_phy_link_generic"); + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); /* Set or unset auto-negotiation 10G advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); - autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && (speed & IXGBE_LINK_SPEED_10GB_FULL)) - autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; - hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - MDIO_MMD_AN, &autoneg_reg); + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); if (hw->mac.type == ixgbe_mac_X550) { /* Set or unset auto-negotiation 5G advertisement */ @@ -825,30 +825,36 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - MDIO_MMD_AN, autoneg_reg); + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); /* Set or unset auto-negotiation 100M advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); - autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); + autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE | + IXGBE_MII_100BASE_T_ADVERTISE_HALF); if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && (speed & IXGBE_LINK_SPEED_100_FULL)) - autoneg_reg |= ADVERTISE_100FULL; + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); /* Blocked by MNG FW so don't reset PHY */ if (ixgbe_check_reset_blocked(hw)) - return 0; + return status; - /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, &autoneg_reg); + /* Restart PHY auto-negotiation. */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - autoneg_reg |= MDIO_AN_CTRL1_RESTART; + autoneg_reg |= IXGBE_MII_RESTART; - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, autoneg_reg); + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); return status; } @@ -862,7 +868,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - /* Clear autoneg_advertised and set new values based on input link + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + DEBUGFUNC("ixgbe_setup_phy_link_speed_generic"); + + /* + * Clear autoneg_advertised and set new values based on input link * speed. */ hw->phy.autoneg_advertised = 0; @@ -886,34 +897,34 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; /* Setup link based on the new speed settings */ - if (hw->phy.ops.setup_link) - hw->phy.ops.setup_link(hw); + ixgbe_setup_phy_link(hw); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_get_copper_speeds_supported - Get copper link speed from phy + * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy * @hw: pointer to hardware structure * * Determines the supported link capabilities by reading the PHY auto * negotiation register. - */ + **/ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) { - u16 speed_ability; s32 status; + u16 speed_ability; - status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &speed_ability); if (status) return status; - if (speed_ability & MDIO_SPEED_10G) + if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_1000) + if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_100) + if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; switch (hw->mac.type) { @@ -922,27 +933,29 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; break; case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; break; default: break; } - return 0; + return status; } /** - * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value - */ + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + **/ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); *autoneg = true; if (!hw->phy.speeds_supported) @@ -962,13 +975,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up) { - s32 status; + s32 status = IXGBE_SUCCESS; u32 time_out; u32 max_time_out = 10; u16 phy_link = 0; u16 phy_speed = 0; u16 phy_data = 0; + DEBUGFUNC("ixgbe_check_phy_link_tnx"); + /* Initialize speed and link to default case */ *link_up = false; *speed = IXGBE_LINK_SPEED_10GB_FULL; @@ -979,15 +994,14 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * be changed for other copper PHYs. */ for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); + usec_delay(10); status = hw->phy.ops.read_reg(hw, - MDIO_STAT1, - MDIO_MMD_VEND1, - &phy_data); - phy_link = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + &phy_data); + phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; phy_speed = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { *link_up = true; if (phy_speed == @@ -1001,41 +1015,41 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, } /** - * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * ixgbe_setup_phy_link_tnx - Set and restart auto-neg * @hw: pointer to hardware structure * - * Restart autonegotiation and PHY and waits for completion. - * This function always returns success, this is nessary since - * it is called via a function pointer that could call other - * functions that could return an error. + * Restart auto-negotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; + DEBUGFUNC("ixgbe_setup_phy_link_tnx"); + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); if (speed & IXGBE_LINK_SPEED_10GB_FULL) { /* Set or unset auto-negotiation 10G advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; - hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { /* Set or unset auto-negotiation 1G advertisement */ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, - MDIO_MMD_AN, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; @@ -1043,39 +1057,77 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, - MDIO_MMD_AN, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } if (speed & IXGBE_LINK_SPEED_100_FULL) { /* Set or unset auto-negotiation 100M advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - autoneg_reg &= ~(ADVERTISE_100FULL | - ADVERTISE_100HALF); + autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - autoneg_reg |= ADVERTISE_100FULL; + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } /* Blocked by MNG FW so don't reset PHY */ if (ixgbe_check_reset_blocked(hw)) - return 0; + return status; + + /* Restart PHY auto-negotiation. */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, &autoneg_reg); + autoneg_reg |= IXGBE_MII_RESTART; - autoneg_reg |= MDIO_AN_CTRL1_RESTART; + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, autoneg_reg); - return 0; + return status; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; } /** @@ -1088,37 +1140,42 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) bool end_data = false; u16 list_offset, data_offset; u16 phy_data = 0; - s32 ret_val; + s32 ret_val = IXGBE_SUCCESS; u32 i; + DEBUGFUNC("ixgbe_reset_phy_nl"); + /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) - return 0; + goto out; - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); /* reset the PHY and poll for completion */ - hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, - (phy_data | MDIO_CTRL1_RESET)); + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + (phy_data | IXGBE_MDIO_PHY_XS_RESET)); for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, - &phy_data); - if ((phy_data & MDIO_CTRL1_RESET) == 0) + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) break; - usleep_range(10000, 20000); + msec_delay(10); } - if ((phy_data & MDIO_CTRL1_RESET) != 0) { - hw_dbg(hw, "PHY reset did not complete.\n"); - return IXGBE_ERR_PHY; + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { + DEBUGOUT("PHY reset did not complete.\n"); + ret_val = IXGBE_ERR_PHY; + goto out; } /* Get init offsets */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); data_offset++; @@ -1135,52 +1192,57 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) switch (control) { case IXGBE_DELAY_NL: data_offset++; - hw_dbg(hw, "DELAY: %d MS\n", edata); - usleep_range(edata * 1000, edata * 2000); + DEBUGOUT1("DELAY: %d MS\n", edata); + msec_delay(edata); break; case IXGBE_DATA_NL: - hw_dbg(hw, "DATA:\n"); + DEBUGOUT("DATA:\n"); data_offset++; - ret_val = hw->eeprom.ops.read(hw, data_offset++, + ret_val = hw->eeprom.ops.read(hw, data_offset, &phy_offset); if (ret_val) goto err_eeprom; + data_offset++; for (i = 0; i < edata; i++) { ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); if (ret_val) goto err_eeprom; hw->phy.ops.write_reg(hw, phy_offset, - MDIO_MMD_PMAPMD, eword); - hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, - phy_offset); + IXGBE_TWINAX_DEV, eword); + DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword, + phy_offset); data_offset++; phy_offset++; } break; case IXGBE_CONTROL_NL: data_offset++; - hw_dbg(hw, "CONTROL:\n"); + DEBUGOUT("CONTROL:\n"); if (edata == IXGBE_CONTROL_EOL_NL) { - hw_dbg(hw, "EOL\n"); + DEBUGOUT("EOL\n"); end_data = true; } else if (edata == IXGBE_CONTROL_SOL_NL) { - hw_dbg(hw, "SOL\n"); + DEBUGOUT("SOL\n"); } else { - hw_dbg(hw, "Bad control value\n"); - return IXGBE_ERR_PHY; + DEBUGOUT("Bad control value\n"); + ret_val = IXGBE_ERR_PHY; + goto out; } break; default: - hw_dbg(hw, "Bad control type\n"); - return IXGBE_ERR_PHY; + DEBUGOUT("Bad control type\n"); + ret_val = IXGBE_ERR_PHY; + goto out; } } +out: return ret_val; err_eeprom: - hw_err(hw, "eeprom read at offset %d failed\n", data_offset); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", data_offset); return IXGBE_ERR_PHY; } @@ -1192,17 +1254,26 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) **/ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) { + s32 status = IXGBE_ERR_SFP_NOT_PRESENT; + + DEBUGFUNC("ixgbe_identify_module_generic"); + switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: - return ixgbe_identify_sfp_module_generic(hw); + status = ixgbe_identify_sfp_module_generic(hw); + break; + case ixgbe_media_type_fiber_qsfp: - return ixgbe_identify_qsfp_module_generic(hw); + status = ixgbe_identify_qsfp_module_generic(hw); + break; + default: hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + status = IXGBE_ERR_SFP_NOT_PRESENT; + break; } - return IXGBE_ERR_SFP_NOT_PRESENT; + return status; } /** @@ -1213,8 +1284,7 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; - s32 status; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; @@ -1225,241 +1295,258 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) u8 cable_spec = 0; u16 enforce_sfp = 0; + DEBUGFUNC("ixgbe_identify_sfp_module_generic"); + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; } - /* LAN ID is needed for sfp_type determination */ + /* LAN ID is needed for I2C access */ hw->mac.ops.set_lan_id(hw); status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); - if (status) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_1GBE_COMP_CODES, - &comp_codes_1g); - - if (status) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_10GBE_COMP_CODES, - &comp_codes_10g); - - if (status) - goto err_read_i2c_eeprom; - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_CABLE_TECHNOLOGY, - &cable_tech); - - if (status) - goto err_read_i2c_eeprom; - - /* ID Module - * ========= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CORE0 - 82599-specific - * 4 SFP_DA_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific - * 7 SFP_act_lmt_DA_CORE0 - 82599-specific - * 8 SFP_act_lmt_DA_CORE1 - 82599-specific - * 9 SFP_1g_cu_CORE0 - 82599-specific - * 10 SFP_1g_cu_CORE1 - 82599-specific - * 11 SFP_1g_sx_CORE0 - 82599-specific - * 12 SFP_1g_sx_CORE1 - 82599-specific - */ - if (hw->mac.type == ixgbe_mac_82598EB) { - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.sfp_type = ixgbe_sfp_type_da_cu; - else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - hw->phy.sfp_type = ixgbe_sfp_type_sr; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - hw->phy.sfp_type = ixgbe_sfp_type_lr; - else - hw->phy.sfp_type = ixgbe_sfp_type_unknown; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; } else { - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_da_cu_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_da_cu_core1; - } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { - hw->phy.ops.read_i2c_eeprom( - hw, IXGBE_SFF_CABLE_SPEC_COMP, - &cable_spec); - if (cable_spec & - IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core1; - } else { - hw->phy.sfp_type = - ixgbe_sfp_type_unknown; - } - } else if (comp_codes_10g & - (IXGBE_SFF_10GBASESR_CAPABLE | - IXGBE_SFF_10GBASELR_CAPABLE)) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_cu_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_cu_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_sx_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_sx_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_lx_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_lx_core1; - } else { - hw->phy.sfp_type = ixgbe_sfp_type_unknown; - } - } - - if (hw->phy.sfp_type != stored_sfp_type) - hw->phy.sfp_setup_needed = true; - - /* Determine if the SFP+ PHY is dual speed or not. */ - hw->phy.multispeed_fiber = false; - if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || - ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) - hw->phy.multispeed_fiber = true; - - /* Determine PHY vendor */ - if (hw->phy.type != ixgbe_phy_nl) { - hw->phy.id = identifier; status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE0, - &oui_bytes[0]); + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE1, - &oui_bytes[1]); + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE2, - &oui_bytes[2]); + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; - vendor_oui = - ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | - (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | - (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); - - switch (vendor_oui) { - case IXGBE_SFF_VENDOR_OUI_TYCO: + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + * 11 SFP_1g_sx_CORE0 - 82599-specific + * 12 SFP_1g_sx_CORE1 - 82599-specific + */ + if (hw->mac.type == ixgbe_mac_82598EB) { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_passive_tyco; - break; - case IXGBE_SFF_VENDOR_OUI_FTL: - if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) - hw->phy.type = ixgbe_phy_sfp_ftl_active; - else - hw->phy.type = ixgbe_phy_sfp_ftl; - break; - case IXGBE_SFF_VENDOR_OUI_AVAGO: - hw->phy.type = ixgbe_phy_sfp_avago; - break; - case IXGBE_SFF_VENDOR_OUI_INTEL: - hw->phy.type = ixgbe_phy_sfp_intel; - break; - default: - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_passive_unknown; - else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_active_unknown; + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_sr; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_lr; else - hw->phy.type = ixgbe_phy_sfp_unknown; - break; + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } else { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core1; + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { + hw->phy.ops.read_i2c_eeprom( + hw, IXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + ixgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core1; + } else { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } } - } - /* Allow any DA cable vendor */ - if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | - IXGBE_SFF_DA_ACTIVE_CABLE)) - return 0; + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor */ + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case IXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_tyco; + break; + case IXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = ixgbe_phy_sfp_ftl_active; + else + hw->phy.type = ixgbe_phy_sfp_ftl; + break; + case IXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = ixgbe_phy_sfp_avago; + break; + case IXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = ixgbe_phy_sfp_intel; + break; + default: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_unknown; + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_active_unknown; + else + hw->phy.type = ixgbe_phy_sfp_unknown; + break; + } + } - /* Verify supported 1G SFP modules */ - if (comp_codes_10g == 0 && - !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { - hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) { + status = IXGBE_SUCCESS; + goto out; + } - /* Anything else 82598-based is supported */ - if (hw->mac.type == ixgbe_mac_82598EB) - return 0; + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } - hw->mac.ops.get_device_caps(hw, &enforce_sfp); - if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && - !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { - /* Make sure we're a supported PHY type */ - if (hw->phy.type == ixgbe_phy_sfp_intel) - return 0; - if (hw->allow_unsupported_sfp) { - e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); - return 0; + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) { + status = IXGBE_SUCCESS; + goto out; + } + + ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { + EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + status = IXGBE_SUCCESS; + } else { + DEBUGOUT("SFP+ module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { + status = IXGBE_SUCCESS; } - hw_dbg(hw, "SFP+ module not supported\n"); - hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; } - return 0; + +out: + return status; err_read_i2c_eeprom: hw->phy.sfp_type = ixgbe_sfp_type_not_present; @@ -1471,15 +1558,76 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) } /** - * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules - * @hw: pointer to hardware structure + * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current SFP. + */ +u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; + + DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic"); + + hw->phy.ops.identify_sfp(hw); + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return physical_layer; + + switch (hw->phy.type) { + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_qsfp_passive_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_qsfp_active_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; + break; + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; + else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; + break; + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + + return physical_layer; +} + +/** + * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules + * @hw: pointer to hardware structure * - * Searches for and identifies the QSFP module and assigns appropriate PHY type + * Searches for and identifies the QSFP module and assigns appropriate PHY type **/ -static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; - s32 status; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; @@ -1492,23 +1640,27 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) u8 device_tech = 0; bool active_cable = false; + DEBUGFUNC("ixgbe_identify_qsfp_module_generic"); + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; } - /* LAN ID is needed for sfp_type determination */ + /* LAN ID is needed for I2C access */ hw->mac.ops.set_lan_id(hw); status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; } hw->phy.id = identifier; @@ -1516,13 +1668,13 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, &comp_codes_1g); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { @@ -1543,8 +1695,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) if (!active_cable) { /* check for active DA cables that pre-date - * SFF-8436 v3.6 - */ + * SFF-8436 v3.6 */ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_CONNECTOR, &connector); @@ -1576,7 +1727,8 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) } else { /* unsupported module type */ hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; } } @@ -1586,61 +1738,68 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) /* Determine if the QSFP+ PHY is dual speed or not. */ hw->phy.multispeed_fiber = false; if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || - ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) hw->phy.multispeed_fiber = true; /* Determine PHY vendor for optical modules */ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | - IXGBE_SFF_10GBASELR_CAPABLE)) { + IXGBE_SFF_10GBASELR_CAPABLE)) { status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, - &oui_bytes[0]); + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, - &oui_bytes[1]); + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, - &oui_bytes[2]); + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); - if (status != 0) + if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; vendor_oui = - ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | - (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | - (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) hw->phy.type = ixgbe_phy_qsfp_intel; else hw->phy.type = ixgbe_phy_qsfp_unknown; - hw->mac.ops.get_device_caps(hw, &enforce_sfp); + ixgbe_get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { /* Make sure we're a supported PHY type */ - if (hw->phy.type == ixgbe_phy_qsfp_intel) - return 0; - if (hw->allow_unsupported_sfp) { - e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); - return 0; + if (hw->phy.type == ixgbe_phy_qsfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { + EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + status = IXGBE_SUCCESS; + } else { + DEBUGOUT("QSFP module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } } - hw_dbg(hw, "QSFP module not supported\n"); - hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = IXGBE_SUCCESS; } - return 0; } - return 0; + +out: + return status; err_read_i2c_eeprom: hw->phy.sfp_type = ixgbe_sfp_type_not_present; @@ -1666,6 +1825,8 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 sfp_id; u16 sfp_type = hw->phy.sfp_type; + DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets"); + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) return IXGBE_ERR_SFP_NOT_SUPPORTED; @@ -1693,8 +1854,9 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, /* Read offset to PHY init contents */ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { - hw_err(hw, "eeprom read at %d failed\n", - IXGBE_PHY_INIT_OFFSET_NL); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + IXGBE_PHY_INIT_OFFSET_NL); return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; } @@ -1717,7 +1879,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) goto err_phy; if ((!*data_offset) || (*data_offset == 0xFFFF)) { - hw_dbg(hw, "SFP+ module not supported\n"); + DEBUGOUT("SFP+ module not supported\n"); return IXGBE_ERR_SFP_NOT_SUPPORTED; } else { break; @@ -1730,14 +1892,15 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, } if (sfp_id == IXGBE_PHY_INIT_END_NL) { - hw_dbg(hw, "No matching SFP+ module found\n"); + DEBUGOUT("No matching SFP+ module found\n"); return IXGBE_ERR_SFP_NOT_SUPPORTED; } - return 0; + return IXGBE_SUCCESS; err_phy: - hw_err(hw, "eeprom read at offset %d failed\n", *list_offset); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", *list_offset); return IXGBE_ERR_PHY; } @@ -1752,6 +1915,8 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { + DEBUGFUNC("ixgbe_read_i2c_eeprom_generic"); + return hw->phy.ops.read_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); @@ -1765,8 +1930,8 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, * * Performs byte read operation to SFP module's SFF-8472 data over I2C **/ -s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data) +STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) { return hw->phy.ops.read_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR2, @@ -1784,6 +1949,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data) { + DEBUGFUNC("ixgbe_write_i2c_eeprom_generic"); + return hw->phy.ops.write_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); @@ -1795,7 +1962,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, * @offset: eeprom offset to be read * @addr: I2C address to be read */ -static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) +STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) { if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && offset == IXGBE_SFF_IDENTIFIER && @@ -1813,23 +1980,24 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. - */ -static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + **/ +STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data, bool lock) { s32 status; u32 max_retry = 10; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; - bool nack = true; + bool nack = 1; + *data = 0; + + DEBUGFUNC("ixgbe_read_i2c_byte_generic"); if (hw->mac.type >= ixgbe_mac_X550) max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; - *data = 0; - do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; @@ -1838,56 +2006,56 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, /* Device Address and write indication */ status = ixgbe_clock_out_i2c_byte(hw, dev_addr); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, byte_offset); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_start(hw); /* Device Address and read indication */ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_in_i2c_byte(hw, data); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_bit(hw, nack); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return 0; + return IXGBE_SUCCESS; fail: ixgbe_i2c_bus_clear(hw); if (lock) { hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(100); + msec_delay(100); } retry++; if (retry < max_retry) - hw_dbg(hw, "I2C byte read error - Retrying.\n"); + DEBUGOUT("I2C byte read error - Retrying.\n"); else - hw_dbg(hw, "I2C byte read error.\n"); + DEBUGOUT("I2C byte read error.\n"); } while (retry < max_retry); @@ -1902,7 +2070,7 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. - */ + **/ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { @@ -1918,7 +2086,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. - */ + **/ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { @@ -1935,8 +2103,8 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. - */ -static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + **/ +STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data, bool lock) { s32 status; @@ -1944,48 +2112,51 @@ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; - if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + DEBUGFUNC("ixgbe_write_i2c_byte_generic"); + + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != + IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; do { ixgbe_i2c_start(hw); status = ixgbe_clock_out_i2c_byte(hw, dev_addr); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, byte_offset); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, data); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); - if (status != 0) + if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return 0; + return IXGBE_SUCCESS; fail: ixgbe_i2c_bus_clear(hw); retry++; if (retry < max_retry) - hw_dbg(hw, "I2C byte write error - Retrying.\n"); + DEBUGOUT("I2C byte write error - Retrying.\n"); else - hw_dbg(hw, "I2C byte write error.\n"); + DEBUGOUT("I2C byte write error.\n"); } while (retry < max_retry); if (lock) @@ -2002,7 +2173,7 @@ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. - */ + **/ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { @@ -2018,7 +2189,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. - */ + **/ s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { @@ -2033,28 +2204,30 @@ s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, * Sets I2C start condition (High -> Low on SDA while SCL is High) * Set bit-bang mode on X550 hardware. **/ -static void ixgbe_i2c_start(struct ixgbe_hw *hw) +STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_i2c_start"); - i2cctl |= IXGBE_I2C_BB_EN(hw); + i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Setup time for start condition (4.7us) */ - udelay(IXGBE_I2C_T_SU_STA); + usec_delay(IXGBE_I2C_T_SU_STA); ixgbe_set_i2c_data(hw, &i2cctl, 0); /* Hold time for start condition (4us) */ - udelay(IXGBE_I2C_T_HD_STA); + usec_delay(IXGBE_I2C_T_HD_STA); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); } @@ -2066,29 +2239,31 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) * Disables bit-bang mode and negates data output enable on X550 * hardware. **/ -static void ixgbe_i2c_stop(struct ixgbe_hw *hw) +STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); - u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); - u32 bb_en_bit = IXGBE_I2C_BB_EN(hw); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw); + + DEBUGFUNC("ixgbe_i2c_stop"); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Setup time for stop condition (4us) */ - udelay(IXGBE_I2C_T_SU_STO); + usec_delay(IXGBE_I2C_T_SU_STO); ixgbe_set_i2c_data(hw, &i2cctl, 1); /* bus free time between stop and start (4.7us)*/ - udelay(IXGBE_I2C_T_BUF); + usec_delay(IXGBE_I2C_T_BUF); if (bb_en_bit || data_oe_bit || clk_oe_bit) { i2cctl &= ~bb_en_bit; i2cctl |= data_oe_bit | clk_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } } @@ -2100,10 +2275,12 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) * * Clocks in one byte data via I2C data/clock **/ -static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) { s32 i; - bool bit = false; + bool bit = 0; + + DEBUGFUNC("ixgbe_clock_in_i2c_byte"); *data = 0; for (i = 7; i >= 0; i--) { @@ -2111,7 +2288,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) *data |= bit << i; } - return 0; + return IXGBE_SUCCESS; } /** @@ -2121,26 +2298,28 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) * * Clocks out one byte data via I2C data/clock **/ -static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) { - s32 status; + s32 status = IXGBE_SUCCESS; s32 i; u32 i2cctl; - bool bit = false; + bool bit; + + DEBUGFUNC("ixgbe_clock_out_i2c_byte"); for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; status = ixgbe_clock_out_i2c_bit(hw, bit); - if (status != 0) + if (status != IXGBE_SUCCESS) break; } /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); - i2cctl |= IXGBE_I2C_DATA_OUT(hw); - i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; @@ -2152,46 +2331,48 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) * * Clocks in/out one bit via I2C data/clock **/ -static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); - s32 status = 0; + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + s32 status = IXGBE_SUCCESS; u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 timeout = 10; - bool ack = true; + bool ack = 1; + + DEBUGFUNC("ixgbe_get_i2c_ack"); if (data_oe_bit) { - i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); + usec_delay(IXGBE_I2C_T_HIGH); /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ack = ixgbe_get_i2c_data(hw, &i2cctl); - udelay(1); - if (ack == 0) + usec_delay(1); + if (!ack) break; } - if (ack == 1) { - hw_dbg(hw, "I2C ack was not received.\n"); + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); status = IXGBE_ERR_I2C; } ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); return status; } @@ -2203,31 +2384,33 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) * * Clocks in one bit via I2C data/clock **/ -static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + + DEBUGFUNC("ixgbe_clock_in_i2c_bit"); if (data_oe_bit) { - i2cctl |= IXGBE_I2C_DATA_OUT(hw); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); + usec_delay(IXGBE_I2C_T_HIGH); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); - return 0; + return IXGBE_SUCCESS; } /** @@ -2237,31 +2420,35 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) * * Clocks out one bit via I2C data/clock **/ -static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_clock_out_i2c_bit"); status = ixgbe_set_i2c_data(hw, &i2cctl, data); - if (status == 0) { + if (status == IXGBE_SUCCESS) { ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); + usec_delay(IXGBE_I2C_T_HIGH); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us. * This also takes care of the data hold time. */ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); } else { - hw_dbg(hw, "I2C data was not set to %X\n", data); - return IXGBE_ERR_I2C; + status = IXGBE_ERR_I2C; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "I2C data was not set to %X\n", data); } - return 0; + return status; } + /** * ixgbe_raise_i2c_clk - Raises the I2C SCL clock * @hw: pointer to hardware structure @@ -2270,27 +2457,30 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) * Raises the I2C clock line '0'->'1' * Negates the I2C clock output enable on X550 hardware. **/ -static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); u32 i = 0; u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; + DEBUGFUNC("ixgbe_raise_i2c_clk"); + if (clk_oe_bit) { *i2cctl |= clk_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); } for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ - udelay(IXGBE_I2C_T_RISE); + usec_delay(IXGBE_I2C_T_RISE); - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); - if (i2cctl_r & IXGBE_I2C_CLK_IN(hw)) + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) break; } } @@ -2303,17 +2493,18 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * Lowers the I2C clock line '1'->'0' * Asserts the I2C clock output enable on X550 hardware. **/ -static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { + DEBUGFUNC("ixgbe_lower_i2c_clk"); - *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw); - *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw); + *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw)); + *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ - udelay(IXGBE_I2C_T_FALL); + usec_delay(IXGBE_I2C_T_FALL); } /** @@ -2325,38 +2516,43 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * Sets the I2C data bit * Asserts the I2C data output enable on X550 hardware. **/ -static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_i2c_data"); if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT(hw); + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); else - *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw); + *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw)); *i2cctl &= ~data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ - udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); if (!data) /* Can't verify data in this case */ - return 0; + return IXGBE_SUCCESS; if (data_oe_bit) { *i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); } /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); if (data != ixgbe_get_i2c_data(hw, i2cctl)) { - hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); - return IXGBE_ERR_I2C; + status = IXGBE_ERR_I2C; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "Error - I2C data was not set to %X.\n", + data); } - return 0; + return status; } /** @@ -2367,20 +2563,26 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) * Returns the I2C data bit value * Negates the I2C data output enable on X550 hardware. **/ -static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) +STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + bool data; + + DEBUGFUNC("ixgbe_get_i2c_data"); if (data_oe_bit) { *i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); - udelay(IXGBE_I2C_T_FALL); + usec_delay(IXGBE_I2C_T_FALL); } - if (*i2cctl & IXGBE_I2C_DATA_IN(hw)) - return true; - return false; + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) + data = 1; + else + data = 0; + + return data; } /** @@ -2390,13 +2592,15 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) * Clears the I2C bus by sending nine clock pulses. * Used when data line is stuck low. **/ -static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { u32 i2cctl; u32 i; + DEBUGFUNC("ixgbe_i2c_bus_clear"); + ixgbe_i2c_start(hw); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -2404,12 +2608,12 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) ixgbe_raise_i2c_clk(hw, &i2cctl); /* Min high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); + usec_delay(IXGBE_I2C_T_HIGH); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Min low period of clock is 4.7us*/ - udelay(IXGBE_I2C_T_LOW); + usec_delay(IXGBE_I2C_T_LOW); } ixgbe_i2c_start(hw); @@ -2426,38 +2630,43 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) **/ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u16 phy_data = 0; + DEBUGFUNC("ixgbe_tn_check_overtemp"); + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) - return 0; + goto out; /* Check that the LASI temp alarm status was triggered */ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, - MDIO_MMD_PMAPMD, &phy_data); + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) - return 0; + goto out; - return IXGBE_ERR_OVERTEMP; + status = IXGBE_ERR_OVERTEMP; + ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature"); +out: + return status; } -/** ixgbe_set_copper_phy_power - Control power for copper phy - * @hw: pointer to hardware structure - * @on: true for on, false for off - **/ +/** + * ixgbe_set_copper_phy_power - Control power for copper phy + * @hw: pointer to hardware structure + * @on: true for on, false for off + */ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) { u32 status; u16 reg; - /* Bail if we don't have copper phy */ - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return 0; - if (!on && ixgbe_mng_present(hw)) return 0; - status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); if (status) return status; @@ -2469,6 +2678,8 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; } - status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg); + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); return status; } diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h index e9f94ee42c9f..445394ff9a32 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,8 +26,9 @@ #define _IXGBE_PHY_H_ #include "ixgbe_type.h" -#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 -#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF /* EEPROM byte offsets */ #define IXGBE_SFF_IDENTIFIER 0x0 @@ -58,109 +55,111 @@ #define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 /* Bitmasks */ -#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 -#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 #define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 -#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 -#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 -#define IXGBE_SFF_1GBASET_CAPABLE 0x8 -#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 -#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 -#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 -#define IXGBE_SFF_ADDRESSING_MODE 0x4 -#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 -#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 #define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 -#define IXGBE_I2C_EEPROM_READ_MASK 0x100 -#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 -#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 -#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 -#define IXGBE_CS4227 0xBE /* CS4227 address */ -#define IXGBE_CS4227_GLOBAL_ID_LSB 0 -#define IXGBE_CS4227_GLOBAL_ID_MSB 1 -#define IXGBE_CS4227_SCRATCH 2 -#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F -#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ -#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ -#define IXGBE_CS4227_RESET_PENDING 0x1357 -#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 -#define IXGBE_CS4227_RETRIES 15 -#define IXGBE_CS4227_EFUSE_STATUS 0x0181 -#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to set speed */ -#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to set EDC */ -#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to set speed */ -#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ -#define IXGBE_CS4227_EEPROM_STATUS 0x5001 -#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 -#define IXGBE_CS4227_SPEED_1G 0x8000 -#define IXGBE_CS4227_SPEED_10G 0 -#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 -#define IXGBE_CS4227_EDC_MODE_SR 0x0004 -#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 -#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ -#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ -#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ -#define IXGBE_PE 0xE0 /* Port expander addr */ -#define IXGBE_PE_OUTPUT 1 /* Output reg offset */ -#define IXGBE_PE_CONFIG 3 /* Config reg offset */ -#define IXGBE_PE_BIT1 BIT(1) + +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_GLOBAL_ID_MSB 1 +#define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F +#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ +#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ +#define IXGBE_CS4227_RESET_PENDING 0x1357 +#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 +#define IXGBE_CS4227_RETRIES 15 +#define IXGBE_CS4227_EFUSE_STATUS 0x0181 +#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define IXGBE_CS4227_EEPROM_STATUS 0x5001 +#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 +#define IXGBE_CS4227_SPEED_1G 0x8000 +#define IXGBE_CS4227_SPEED_10G 0 +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 +#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 +#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define IXGBE_CS4227_RESET_DELAY 450 /* milliseconds */ +#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define IXGBE_PE 0xE0 /* Port expander address */ +#define IXGBE_PE_OUTPUT 1 /* Output register offset */ +#define IXGBE_PE_CONFIG 3 /* Config register offset */ +#define IXGBE_PE_BIT1 (1 << 1) /* Flow control defines */ -#define IXGBE_TAF_SYM_PAUSE 0x400 -#define IXGBE_TAF_ASM_PAUSE 0x800 +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 /* Bit-shift macros */ -#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 -#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 -#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 /* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ -#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 -#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 -#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 -#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 /* I2C SDA and SCL timing parameters for standard mode */ -#define IXGBE_I2C_T_HD_STA 4 -#define IXGBE_I2C_T_LOW 5 -#define IXGBE_I2C_T_HIGH 4 -#define IXGBE_I2C_T_SU_STA 5 -#define IXGBE_I2C_T_HD_DATA 5 -#define IXGBE_I2C_T_SU_DATA 1 -#define IXGBE_I2C_T_RISE 1 -#define IXGBE_I2C_T_FALL 1 -#define IXGBE_I2C_T_SU_STO 4 -#define IXGBE_I2C_T_BUF 5 - -#define IXGBE_SFP_DETECT_RETRIES 2 - -#define IXGBE_TN_LASI_STATUS_REG 0x9005 -#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 - -/* SFP+ SFF-8472 Compliance code */ -#define IXGBE_SFF_SFF_8472_UNSUP 0x00 - +#define IXGBE_I2C_T_HD_STA 4 +#define IXGBE_I2C_T_LOW 5 +#define IXGBE_I2C_T_HIGH 4 +#define IXGBE_I2C_T_SU_STA 5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE 1 +#define IXGBE_I2C_T_FALL 1 +#define IXGBE_I2C_T_SU_STO 4 +#define IXGBE_I2C_T_BUF 5 + +#ifndef IXGBE_SFP_DETECT_RETRIES +#define IXGBE_SFP_DETECT_RETRIES 10 + +#endif /* IXGBE_SFP_DETECT_RETRIES */ +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance */ +#define IXGBE_SFF_SFF_8472_UNSUP 0x00 + +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data); s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data); -s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); -s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); -#if 1 //by hilbert -s32 ixgbe_read_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); -s32 ixgbe_write_phy_reg_mdio(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); -#endif s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, @@ -168,18 +167,24 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); -bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw); /* PHY specific */ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); @@ -194,10 +199,9 @@ s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); -s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data); s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val, bool lock); s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_procfs.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_procfs.c new file mode 100644 index 000000000000..54f0940a0ac8 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_procfs.c @@ -0,0 +1,938 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_type.h" + +#ifdef IXGBE_PROCFS +#ifndef IXGBE_SYSFS + +#include +#include +#include +#include +#include + +static struct proc_dir_entry *ixgbe_top_dir = NULL; + +static struct net_device_stats *procfs_get_stats(struct net_device *netdev) +{ +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct ixgbe_adapter *adapter; +#endif + if (netdev == NULL) + return NULL; + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} + +bool ixgbe_thermal_present(struct ixgbe_adapter *adapter) +{ + s32 status; + if (adapter == NULL) + return false; + status = ixgbe_init_thermal_sensor_thresh_generic(&(adapter->hw)); + if (status != IXGBE_SUCCESS) + return false; + + return true; +} + +static int ixgbe_fwbanner(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%s\n", adapter->eeprom_id); +} + +static int ixgbe_porttype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + return snprintf(page, count, "%d\n", + test_bit(__IXGBE_DOWN, &adapter->state)); +} + +static int ixgbe_portspeed(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + int speed = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + speed = 1; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed = 10; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + speed = 100; + break; + } + return snprintf(page, count, "%d\n", speed); +} + +static int ixgbe_wqlflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->wol); +} + +static int ixgbe_xflowctl(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct ixgbe_hw *hw; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", hw->fc.current_mode); +} + +static int ixgbe_rxdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_dropped); +} + +static int ixgbe_rxerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", net_stats->rx_errors); +} + +static int ixgbe_rxupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPR)); +} + +static int ixgbe_rxmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPRC)); +} + +static int ixgbe_rxbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPRC)); +} + +static int ixgbe_txupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPT)); +} + +static int ixgbe_txmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPTC)); +} + +static int ixgbe_txbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPTC)); +} + +static int ixgbe_txerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_errors); +} + +static int ixgbe_txdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_dropped); +} + +static int ixgbe_rxframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_packets); +} + +static int ixgbe_rxbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_bytes); +} + +static int ixgbe_txframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_packets); +} + +static int ixgbe_txbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_bytes); +} + +static int ixgbe_linkstat(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + int bitmask = 0; + u32 link_speed; + bool link_up = false; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + bitmask |= 1; + + if (hw->mac.ops.check_link) + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + else + /* always assume link is up, if no check link function */ + link_up = true; + if (link_up) + bitmask |= 2; + + if (adapter->old_lsc != adapter->lsc_int) { + bitmask |= 4; + adapter->old_lsc = adapter->lsc_int; + } + + return snprintf(page, count, "0x%X\n", bitmask); +} + +static int ixgbe_funcid(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct ixgbe_hw *hw; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "0x%X\n", hw->bus.func); +} + +static int ixgbe_funcvers(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%s\n", ixgbe_driver_version); +} + +static int ixgbe_macburn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static int ixgbe_macadmn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static int ixgbe_maclla1(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct ixgbe_hw *hw; + int rc; + u16 eeprom_buff[6]; + u16 first_word = 0x37; + const u16 word_count = ARRAY_SIZE(eeprom_buff); + + if (!adapter) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + + rc = hw->eeprom.ops.read_buffer(hw, first_word, 1, &first_word); + if (rc != 0) + return snprintf(page, count, "error: reading pointer to the EEPROM\n"); + + if (first_word != 0x0000 && first_word != 0xFFFF) { + rc = hw->eeprom.ops.read_buffer(hw, first_word, word_count, + eeprom_buff); + if (rc != 0) + return snprintf(page, count, "error: reading buffer\n"); + } else { + memset(eeprom_buff, 0, sizeof(eeprom_buff)); + } + + switch (hw->bus.func) { + case 0: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[0], + eeprom_buff[1], + eeprom_buff[2]); + case 1: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[3], + eeprom_buff[4], + eeprom_buff[5]); + } + return snprintf(page, count, "unexpected port %d\n", hw->bus.func); +} + +static int ixgbe_mtusize(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", netdev->mtu); +} + +static int ixgbe_featflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int bitmask = 0; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + if (adapter->netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(page, count, "%d\n", bitmask); +} + +static int ixgbe_lsominct(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%d\n", 1); +} + +static int ixgbe_prommode(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static int ixgbe_txdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); +} + +static int ixgbe_rxdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); +} + +static int ixgbe_rxqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->rx_ring[index]->count - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues); +} + +static int ixgbe_txqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->tx_ring[index]->count - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(page, count, "%d\n", + diff/adapter->num_tx_queues); +} + +static int ixgbe_iovotype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "2\n"); +} + +static int ixgbe_funcnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->num_vfs); +} + +static int ixgbe_pciebnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->pdev->bus->number); +} + +static int ixgbe_therm_location(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_therm_proc_data *therm_data = + (struct ixgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->location); +} + + +static int ixgbe_therm_maxopthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_therm_proc_data *therm_data = + (struct ixgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->max_op_thresh); +} + + +static int ixgbe_therm_cautionthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ixgbe_therm_proc_data *therm_data = + (struct ixgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->caution_thresh); +} + +static int ixgbe_therm_temp(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + s32 status; + struct ixgbe_therm_proc_data *therm_data = + (struct ixgbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + status = ixgbe_get_thermal_sensor_data_generic(therm_data->hw); + if (status != IXGBE_SUCCESS) + snprintf(page, count, "error: status %d returned\n", status); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); +} + + +struct ixgbe_proc_type { + char name[32]; + int (*read)(char*, char**, off_t, int, int*, void*); +}; + +struct ixgbe_proc_type ixgbe_proc_entries[] = { + {"fwbanner", &ixgbe_fwbanner}, + {"porttype", &ixgbe_porttype}, + {"portspeed", &ixgbe_portspeed}, + {"wqlflag", &ixgbe_wqlflag}, + {"xflowctl", &ixgbe_xflowctl}, + {"rxdrops", &ixgbe_rxdrops}, + {"rxerrors", &ixgbe_rxerrors}, + {"rxupacks", &ixgbe_rxupacks}, + {"rxmpacks", &ixgbe_rxmpacks}, + {"rxbpacks", &ixgbe_rxbpacks}, + {"txdrops", &ixgbe_txdrops}, + {"txerrors", &ixgbe_txerrors}, + {"txupacks", &ixgbe_txupacks}, + {"txmpacks", &ixgbe_txmpacks}, + {"txbpacks", &ixgbe_txbpacks}, + {"rxframes", &ixgbe_rxframes}, + {"rxbytes", &ixgbe_rxbytes}, + {"txframes", &ixgbe_txframes}, + {"txbytes", &ixgbe_txbytes}, + {"linkstat", &ixgbe_linkstat}, + {"funcid", &ixgbe_funcid}, + {"funcvers", &ixgbe_funcvers}, + {"macburn", &ixgbe_macburn}, + {"macadmn", &ixgbe_macadmn}, + {"maclla1", &ixgbe_maclla1}, + {"mtusize", &ixgbe_mtusize}, + {"featflag", &ixgbe_featflag}, + {"lsominct", &ixgbe_lsominct}, + {"prommode", &ixgbe_prommode}, + {"txdscqsz", &ixgbe_txdscqsz}, + {"rxdscqsz", &ixgbe_rxdscqsz}, + {"txqavg", &ixgbe_txqavg}, + {"rxqavg", &ixgbe_rxqavg}, + {"iovotype", &ixgbe_iovotype}, + {"funcnbr", &ixgbe_funcnbr}, + {"pciebnbr", &ixgbe_pciebnbr}, + {"", NULL} +}; + +struct ixgbe_proc_type ixgbe_internal_entries[] = { + {"location", &ixgbe_therm_location}, + {"temp", &ixgbe_therm_temp}, + {"cautionthresh", &ixgbe_therm_cautionthresh}, + {"maxopthresh", &ixgbe_therm_maxopthresh}, + {"", NULL} +}; + +void ixgbe_del_proc_entries(struct ixgbe_adapter *adapter) +{ + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + if (ixgbe_top_dir == NULL) + return; + + for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + if (adapter->therm_dir[i] == NULL) + continue; + + for (index = 0; ; index++) { + if (ixgbe_internal_entries[index].read == NULL) + break; + + remove_proc_entry(ixgbe_internal_entries[index].name, + adapter->therm_dir[i]); + } + snprintf(buf, sizeof(buf), "sensor_%d", i); + remove_proc_entry(buf, adapter->info_dir); + } + + if (adapter->info_dir != NULL) { + for (index = 0; ; index++) { + if (ixgbe_proc_entries[index].read == NULL) + break; + remove_proc_entry(ixgbe_proc_entries[index].name, + adapter->info_dir); + } + remove_proc_entry("info", adapter->eth_dir); + } + + if (adapter->eth_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), ixgbe_top_dir); +} + +/* called from ixgbe_main.c */ +void ixgbe_procfs_exit(struct ixgbe_adapter *adapter) +{ + ixgbe_del_proc_entries(adapter); +} + +int ixgbe_procfs_topdir_init() +{ + ixgbe_top_dir = proc_mkdir("driver/ixgbe", NULL); + if (ixgbe_top_dir == NULL) + return -ENOMEM; + + return 0; +} + +void ixgbe_procfs_topdir_exit() +{ + remove_proc_entry("driver/ixgbe", NULL); +} + +/* called from ixgbe_main.c */ +int ixgbe_procfs_init(struct ixgbe_adapter *adapter) +{ + int rc = 0; + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + adapter->eth_dir = NULL; + adapter->info_dir = NULL; + for (i = 0; i < IXGBE_MAX_SENSORS; i++) + adapter->therm_dir[i] = NULL; + + if (ixgbe_top_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), ixgbe_top_dir); + if (adapter->eth_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->info_dir = proc_mkdir("info", adapter->eth_dir); + if (adapter->info_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ixgbe_proc_entries[index].read == NULL) + break; + if (!(create_proc_read_entry(ixgbe_proc_entries[index].name, + 0444, + adapter->info_dir, + ixgbe_proc_entries[index].read, + adapter))) { + + rc = -ENOMEM; + goto fail; + } + } + if (ixgbe_thermal_present(adapter) == false) + goto exit; + + for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == + 0) + continue; + + snprintf(buf, sizeof(buf), "sensor_%d", i); + adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); + if (adapter->therm_dir[i] == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ixgbe_internal_entries[index].read == NULL) + break; + /* + * therm_data struct contains pointer the read func + * will be needing + */ + adapter->therm_data[i].hw = &adapter->hw; + adapter->therm_data[i].sensor_data = + &adapter->hw.mac.thermal_sensor_data.sensor[i]; + + if (!(create_proc_read_entry( + ixgbe_internal_entries[index].name, + 0444, + adapter->therm_dir[i], + ixgbe_internal_entries[index].read, + &adapter->therm_data[i]))) { + rc = -ENOMEM; + goto fail; + } + } + } + goto exit; + +fail: + ixgbe_del_proc_entries(adapter); +exit: + return rc; +} + +#endif /* !IXGBE_SYSFS */ +#endif /* IXGBE_PROCFS */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c index a92277683a64..0fe14217d403 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,9 +21,9 @@ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ + #include "ixgbe.h" #include -#include /* * The 82599 and the X540 do not have true 64bit nanosecond scale @@ -96,7 +92,7 @@ #define IXGBE_INCPER_SHIFT_82599 24 #define IXGBE_OVERFLOW_PERIOD (HZ * 30) -#define IXGBE_PTP_TX_TIMEOUT (HZ * 15) +#define IXGBE_PTP_TX_TIMEOUT (HZ) /* half of a one second clock period, for use with PPS signal. We have to use * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in @@ -114,7 +110,7 @@ * high bit representing whether the adjustent is positive or negative. Every * clock cycle, the X550 will add 12.5 ns + TIMINCA which can result in a range * of 12 to 13 nanoseconds adjustment. Unlike the 82599 and X540 devices, the - * X550's clock for purposes of SYSTIME generation is constant and not dependent + * X550's clock for purposes of SYSTIME generation is constant and not dependant * on the link speed. * * SYSTIMEH SYSTIMEL SYSTIMER @@ -165,8 +161,8 @@ #define MAX_TIMADJ 0x7FFFFFFF /** - * ixgbe_ptp_setup_sdp_x540 - * @hw: the hardware private structure + * ixgbe_ptp_setup_sdp_X540 + * @adapter: the adapter private structure * * this function enables or disables the clock out feature on SDP0 for * the X540 device. It will create a 1second periodic output that can @@ -176,7 +172,7 @@ * aligns the start of the PPS signal to that value. The shift is * necessary because it can change based on the link speed. */ -static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) +static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int shift = adapter->hw_cc.shift; @@ -192,24 +188,27 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - /* enable the SDP0 pin as output, and connected to the + /* + * enable the SDP0 pin as output, and connected to the * native function for Timesync (ClockOut) */ esdp |= IXGBE_ESDP_SDP0_DIR | IXGBE_ESDP_SDP0_NATIVE; - /* enable the Clock Out feature on SDP0, and allow + /* + * enable the Clock Out feature on SDP0, and allow * interrupts to occur when the pin changes */ tsauxc = IXGBE_TSAUXC_EN_CLK | - IXGBE_TSAUXC_SYNCLK | - IXGBE_TSAUXC_SDP0_INT; + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT; - /* clock period (or pulse length) */ + /* set to half clock period */ clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); - /* Account for the cyclecounter wrap-around value by + /* + * Account for the cyclecounter wrap-around value by * using the converted ns value of the current time to * check for when the next aligned second would occur. */ @@ -245,10 +244,10 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of * "cycles", rather than seconds and nanoseconds. */ -static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) +static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) { struct ixgbe_adapter *adapter = - container_of(hw_cc, struct ixgbe_adapter, hw_cc); + container_of(hw_cc, struct ixgbe_adapter, hw_cc); struct ixgbe_hw *hw = &adapter->hw; struct timespec64 ts; @@ -276,16 +275,16 @@ static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) /** * ixgbe_ptp_read_82599 - read raw cycle counter (to be used by time counter) - * @cc: the cyclecounter structure + * @hw_cc: the cyclecounter structure * * this function reads the cyclecounter registers and is called by the * cyclecounter structure used to construct a ns counter from the * arbitrary fixed point registers */ -static cycle_t ixgbe_ptp_read_82599(const struct cyclecounter *cc) +static u64 ixgbe_ptp_read_82599(const struct cyclecounter *hw_cc) { struct ixgbe_adapter *adapter = - container_of(cc, struct ixgbe_adapter, hw_cc); + container_of(hw_cc, struct ixgbe_adapter, hw_cc); struct ixgbe_hw *hw = &adapter->hw; u64 stamp = 0; @@ -333,7 +332,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, */ case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected @@ -396,7 +395,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) if (incval > 0x00FFFFFFULL) e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - BIT(IXGBE_INCPER_SHIFT_82599) | + (1 << IXGBE_INCPER_SHIFT_82599) | ((u32)incval & 0x00FFFFFFUL)); break; default: @@ -417,7 +416,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) { struct ixgbe_adapter *adapter = - container_of(ptp, struct ixgbe_adapter, ptp_caps); + container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; int neg_adj = 0; u64 rate = IXGBE_X550_BASE_PERIOD; @@ -444,13 +443,14 @@ static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) } /** - * ixgbe_ptp_adjtime + * ixgbe_ptp_adjtime_timecounter * @ptp: the ptp clock structure * @delta: offset to adjust the cycle counter by * * adjust the timer by resetting the timecounter structure. */ -static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +static int ixgbe_ptp_adjtime_timecounter(struct ptp_clock_info *ptp, + s64 delta) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); @@ -467,14 +467,14 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) } /** - * ixgbe_ptp_gettime + * ixgbe_ptp_gettime64_timecounter * @ptp: the ptp clock structure - * @ts: timespec structure to hold the current time value + * @ts: timespec64 structure to hold the current time value * * read the timecounter and return the correct value on ns, - * after converting it into a struct timespec. + * after converting it into a struct timespec64. */ -static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int ixgbe_ptp_gettime64_timecounter(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); @@ -491,20 +491,22 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) } /** - * ixgbe_ptp_settime + * ixgbe_ptp_settime64_timecounter * @ptp: the ptp clock structure - * @ts: the timespec containing the new time for the cycle counter + * @ts: the timespec64 containing the new time for the cycle counter * * reset the timecounter to use a new base value instead of the kernel * wall timer value. */ -static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) +static int ixgbe_ptp_settime64_timecounter(struct ptp_clock_info *ptp, + const struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); + u64 ns; unsigned long flags; - u64 ns = timespec64_to_ns(ts); + + ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); @@ -516,6 +518,31 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, return 0; } +#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 +static int ixgbe_ptp_gettime_timecounter(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct timespec64 ts64; + int err; + + err = ixgbe_ptp_gettime64_timecounter(ptp, &ts64); + if (err) + return err; + + *ts = timespec64_to_timespec(ts64); + + return 0; +} + +static int ixgbe_ptp_settime_timecounter(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return ixgbe_ptp_settime64_timecounter(ptp, &ts64); +} +#endif + /** * ixgbe_ptp_feature_enable * @ptp: the ptp clock structure @@ -537,21 +564,23 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, * event when the clock SDP triggers. Clear mask when PPS is * disabled */ - if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) - return -ENOTSUPP; + if (rq->type == PTP_CLK_REQ_PPS && adapter->ptp_setup_sdp) { + if (on) + adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - if (on) - adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; - else - adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + adapter->ptp_setup_sdp(adapter); + return 0; + } - adapter->ptp_setup_sdp(adapter); - return 0; + return -ENOTSUPP; } /** * ixgbe_ptp_check_pps_event * @adapter: the private adapter structure + * @eicr: the interrupt cause register value * * This function is called by the interrupt routine when checking for * interrupts. It will check and handle a pps event. @@ -585,16 +614,18 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) * * this watchdog task periodically reads the timecounter * in order to prevent missing when the system time registers wrap - * around. This needs to be run approximately twice a minute. + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. */ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->last_overflow_check + - IXGBE_OVERFLOW_PERIOD); + IXGBE_OVERFLOW_PERIOD); struct timespec64 ts; if (timeout) { - ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); + ixgbe_ptp_gettime64_timecounter(&adapter->ptp_caps, &ts); adapter->last_overflow_check = jiffies; } } @@ -611,8 +642,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); struct ixgbe_ring *rx_ring; + u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); unsigned long rx_event; int n; @@ -633,12 +664,12 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) } /* only need to read the high RXSTMP register to clear the lock */ - if (time_is_before_jiffies(rx_event + 5 * HZ)) { + if (time_is_before_jiffies(rx_event + 5*HZ)) { IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; adapter->rx_hwtstamp_cleared++; - e_warn(drv, "clearing RX Timestamp hang\n"); + e_warn(drv, "clearing RX Timestamp hang"); } } @@ -662,6 +693,33 @@ static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); } +/** + * ixgbe_ptp_tx_hang - detect error case where Tx timestamp never finishes + * @adapter: private network adapter structure + */ +void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IXGBE_PTP_TX_TIMEOUT); + + if (!adapter->ptp_tx_skb) + return; + + if (!test_bit(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + ixgbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx timestamp hang\n"); + } +} + /** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: the private adapter struct @@ -672,17 +730,26 @@ static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) */ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) { + struct sk_buff *skb = adapter->ptp_tx_skb; struct ixgbe_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval = 0; regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; - ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); - skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); - ixgbe_ptp_clear_tx_timestamp(adapter); + /* Handle cleanup of the ptp_tx_skb ourselves, and unlock the state + * bit prior to notifying the stack via skb_tstamp_tx(). This prevents + * well behaved applications from attempting to timestamp again prior + * to the lock bit being clear. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and then free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); } /** @@ -690,7 +757,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) * @work: pointer to the work struct * * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware - * timestamp has been taken for the current skb. It is necessary, because the + * timestamp has been taken for the current skb. It is necesary, because the * descriptor's "done" bit does not correlate with the timestamp event. */ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) @@ -715,12 +782,13 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) return; } + /* check timeout last in case timestamp event just occurred */ if (timeout) { ixgbe_ptp_clear_tx_timestamp(adapter); adapter->tx_hwtstamp_timeouts++; - e_warn(drv, "clearing Tx Timestamp hang\n"); + e_warn(drv, "clearing Tx Timestamp hang"); } else { - /* reschedule to keep checking if it's not available yet */ + /* reschedule to keep checking until we timeout */ schedule_work(&adapter->ptp_tx_work); } } @@ -740,8 +808,7 @@ void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, __le64 regval; /* copy the bits out of the skb, and then trim the skb length */ - skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, ®val, - IXGBE_TS_HDR_LEN); + skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, ®val, IXGBE_TS_HDR_LEN); __pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN); /* The timestamp is recorded in little endian format, and is stored at @@ -778,10 +845,10 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, adapter = q_vector->adapter; hw = &adapter->hw; - /* Read the tsyncrxctl register afterwards in order to prevent taking an + /* + * Read the tsyncrxctl register afterwards in order to prevent taking an * I/O hit on every packet. */ - tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) return; @@ -792,12 +859,21 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } +/** + * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) { struct hwtstamp_config *config = &adapter->tstamp_config; - return copy_to_user(ifr->ifr_data, config, - sizeof(*config)) ? -EFAULT : 0; + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; } /** @@ -826,7 +902,7 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) * mode, if required to support the specifically requested mode. */ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, - struct hwtstamp_config *config) + struct hwtstamp_config *config) { struct ixgbe_hw *hw = &adapter->hw; u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; @@ -858,14 +934,14 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -879,10 +955,13 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: +#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL + case HWTSTAMP_FILTER_NTP_ALL: +#endif /* HAVE_HWTSTAMP_FILTER_NTP_ALL */ case HWTSTAMP_FILTER_ALL: /* The X550 controller is capable of timestamping all packets, * which allows it to accept any filter. @@ -895,11 +974,10 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, } /* fall through */ default: - /* - * register RXMTRL must be set in order to do V1 packets, + /* register RXMTRL must be set in order to do V1 packets, * therefore it is not possible to time stamp both V1 Sync and - * Delay_Req messages and hardware does not support - * timestamping all packets => return error + * Delay_Req messages unless hardware supports timestamping all + * packets => return error */ adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); @@ -922,11 +1000,10 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: /* enable timestamping all packets only if at least some * packets were requested. Otherwise, play nice and disable - * timestamping - */ + * timestamping */ if (config->rx_filter == HWTSTAMP_FILTER_NONE) break; @@ -968,7 +1045,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); - /* clear TX/RX time stamp registers, just to be sure */ + /* clear TX/RX timestamp state, just to be sure */ ixgbe_ptp_clear_tx_timestamp(adapter); IXGBE_READ_REG(hw, IXGBE_RXSTMPH); @@ -1035,6 +1112,8 @@ static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, *incval = IXGBE_INCVAL_10GB; break; } + + return; } /** @@ -1050,11 +1129,10 @@ static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - struct cyclecounter cc; unsigned long flags; + struct cyclecounter cc; u32 incval = 0; - u32 tsauxc = 0; - u32 fuse0 = 0; + u32 tsauxc = 0, fuse0 = 0; /* For some of the boards below this mask is technically incorrect. * The timestamp mask overflows at approximately 61bits. However the @@ -1085,7 +1163,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) cc.shift = 2; } /* fallthrough */ - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; @@ -1114,7 +1192,8 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) incval >>= IXGBE_INCVAL_SHIFT_82599; cc.shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - BIT(IXGBE_INCPER_SHIFT_82599) | incval); + (1 << IXGBE_INCPER_SHIFT_82599) | + incval); break; default: /* other devices aren't supported */ @@ -1135,13 +1214,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) * ixgbe_ptp_reset * @adapter: the ixgbe private board structure * - * When the MAC resets, all the hardware bits for timesync are reset. This - * function is used to re-enable the device for PTP based on current settings. - * We do lose the current clock time, so just reset the cyclecounter to the - * system real clock time. + * When the MAC resets, all of the hardware configuration for timesync is + * reset. This function should be called to re-enable the device for PTP, + * using the last known settings. However, we do lose the current clock time, + * so we fallback to resetting it based on the kernel's realtime clock. * - * This function will maintain hwtstamp_config settings, and resets the SDP - * output if it was enabled. + * This function will maintain the hwtstamp_config settings, and it retriggers + * the SDP output if it's enabled. */ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) { @@ -1164,7 +1243,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) adapter->last_overflow_check = jiffies; - /* Now that the shift has been calculated and the systime + /* + * Now that the shift has been calculated and the systime * registers reset, (re-)enable the Clock out feature */ if (adapter->ptp_setup_sdp) @@ -1175,12 +1255,13 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) * ixgbe_ptp_create_clock * @adapter: the ixgbe private adapter structure * - * This function performs setup of the user entry point function table and - * initializes the PTP clock device, which is used to access the clock-like + * This functino performs setup of the user entry point function table and + * initalizes the PTP clock device used by userspace to access the clock-like * features of the PTP core. It will be called by ixgbe_ptp_init, and may - * reuse a previously initialized clock (such as during a suspend/resume + * re-use a previously initialized clock (such as during a suspend/resume * cycle). */ + static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1202,11 +1283,16 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; - adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; - adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; +#else + adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; + adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; +#endif adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; - adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540; + adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540; break; case ixgbe_mac_82599EB: snprintf(adapter->ptp_caps.name, @@ -1219,14 +1305,19 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; - adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; - adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; +#else + adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; + adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; +#endif adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: + case ixgbe_mac_X550EM_a: snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 30000000; @@ -1235,9 +1326,14 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; - adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; - adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; +#else + adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; + adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; +#endif adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = NULL; break; @@ -1248,7 +1344,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) } adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, - &adapter->pdev->dev); + pci_dev_to_dev(adapter->pdev)); if (IS_ERR(adapter->ptp_clock)) { err = PTR_ERR(adapter->ptp_clock); adapter->ptp_clock = NULL; @@ -1257,9 +1353,9 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) } else if (adapter->ptp_clock) e_dev_info("registered PHC device on %s\n", netdev->name); - /* set default timestamp mode to disabled here. We do this in - * create_clock instead of init, because we don't want to override the - * previous settings during a resume cycle. + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. */ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; @@ -1271,26 +1367,25 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) * ixgbe_ptp_init * @adapter: the ixgbe private adapter structure * - * This function performs the required steps for enabling PTP - * support. If PTP support has already been loaded it simply calls the + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the * cyclecounter init routine and exits. */ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) { - /* initialize the spin lock first since we can't control when a user - * will call the entry functions once we have initialized the clock - * device + /* initialize the spin lock first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. */ spin_lock_init(&adapter->tmreg_lock); - /* obtain a PTP device, or re-use an existing device */ + /* obtain a ptp clock device, or re-use an existing device */ if (ixgbe_ptp_create_clock(adapter)) return; - /* we have a clock so we can initialize work now */ + /* we have a clock, so we can intialize work for timestamps now */ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work); - /* reset the PTP related hardware bits */ + /* reset the ptp related hardware bits */ ixgbe_ptp_reset(adapter); /* enter the IXGBE_PTP_RUNNING state */ @@ -1300,15 +1395,15 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) } /** - * ixgbe_ptp_suspend - stop PTP work items - * @ adapter: pointer to adapter struct + * ixgbe_ptp_suspend - stop ptp work items + * @adapter: pointer to adapter struct * - * this function suspends PTP activity, and prevents more PTP work from being - * generated, but does not destroy the PTP clock device. + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. */ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) { - /* Leave the IXGBE_PTP_RUNNING state. */ + /* leave the IXGBE_PTP_RUNNING STATE */ if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) return; @@ -1316,24 +1411,23 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) if (adapter->ptp_setup_sdp) adapter->ptp_setup_sdp(adapter); - /* ensure that we cancel any pending PTP Tx work item in progress */ cancel_work_sync(&adapter->ptp_tx_work); ixgbe_ptp_clear_tx_timestamp(adapter); } /** - * ixgbe_ptp_stop - close the PTP device + * ixgbe_ptp_stop - destroy the ptp_clock device * @adapter: pointer to adapter struct * - * completely destroy the PTP device, should only be called when the device is - * being fully closed. + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. */ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) { - /* first, suspend PTP activity */ + /* first, suspend ptp activity */ ixgbe_ptp_suspend(adapter); - /* disable the PTP clock device */ + /* now destroy the ptp clock device */ if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); adapter->ptp_clock = NULL; diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c index 7e5d9850e4b2..4b996de9d2b4 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,6 +22,7 @@ *******************************************************************************/ + #include #include #include @@ -36,98 +33,123 @@ #include #include #include -#include -#ifdef NETIF_F_HW_VLAN_CTAG_TX -#include -#endif #include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_sriov.h" #ifdef CONFIG_PCI_IOV -static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, + unsigned int num_vfs) { struct ixgbe_hw *hw = &adapter->hw; - int num_vf_macvlans, i; struct vf_macvlans *mv_list; - - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; - e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); - - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; - if (!adapter->ring_feature[RING_F_VMDQ].limit) - adapter->ring_feature[RING_F_VMDQ].limit = 1; - adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + int num_vf_macvlans, i; num_vf_macvlans = hw->mac.num_rar_entries - - (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); + if (!num_vf_macvlans) + return; - adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, - sizeof(struct vf_macvlans), - GFP_KERNEL); + mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), + GFP_KERNEL); if (mv_list) { /* Initialize list of VF macvlans */ INIT_LIST_HEAD(&adapter->vf_mvs.l); for (i = 0; i < num_vf_macvlans; i++) { - mv_list->vf = -1; - mv_list->free = true; - list_add(&mv_list->l, &adapter->vf_mvs.l); - mv_list++; + mv_list[i].vf = -1; + mv_list[i].free = true; + list_add(&mv_list[i].l, &adapter->vf_mvs.l); } + adapter->mv_list = mv_list; } +} + +static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, + unsigned int num_vfs) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + + /* Allocate memory for per VF control structures */ + adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!adapter->vfinfo) + return -ENOMEM; /* Initialize default switching mode VEB */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - adapter->bridge_mode = BRIDGE_MODE_VEB; - /* If call to enable VFs succeeded then allocate memory - * for per VF control structures. + /* set adapter->num_vfs only after allocating vfinfo to avoid + * NULL pointer issues when accessing adapter->vfinfo */ - adapter->vfinfo = - kcalloc(adapter->num_vfs, - sizeof(struct vf_data_storage), GFP_KERNEL); - if (adapter->vfinfo) { - /* limit trafffic classes based on VFs enabled */ - if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && - (adapter->num_vfs < 16)) { - adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; - } else if (adapter->num_vfs < 32) { - adapter->dcb_cfg.num_tcs.pg_tcs = 4; - adapter->dcb_cfg.num_tcs.pfc_tcs = 4; - } else { - adapter->dcb_cfg.num_tcs.pg_tcs = 1; - adapter->dcb_cfg.num_tcs.pfc_tcs = 1; - } + adapter->num_vfs = num_vfs; - /* Disable RSC when in SR-IOV mode */ - adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | - IXGBE_FLAG2_RSC_ENABLED); + ixgbe_alloc_vf_macvlans(adapter, num_vfs); - for (i = 0; i < adapter->num_vfs; i++) { - /* enable spoof checking for all VFs */ - adapter->vfinfo[i].spoofchk_enabled = true; + adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; - /* We support VF RSS querying only for 82599 and x540 - * devices at the moment. These devices share RSS - * indirection table and RSS hash key with PF therefore - * we want to disable the querying by default. - */ - adapter->vfinfo[i].rss_query_enabled = 0; + /* enable L2 switch and replication */ + adapter->flags |= IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + IXGBE_FLAG_SRIOV_REPLICATION_ENABLE; - /* Untrust all VFs */ - adapter->vfinfo[i].trusted = false; + /* limit traffic classes based on VFs enabled */ + if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && + (adapter->num_vfs < 16)) { + adapter->dcb_cfg.num_tcs.pg_tcs = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + } else if (adapter->num_vfs < 32) { + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + } else { + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + } + adapter->dcb_cfg.vt_mode = true; - /* set the default xcast mode */ - adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; - } +#ifdef IXGBE_DISABLE_VF_MQ + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = 1; +#endif - return 0; + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | + IXGBE_FLAG2_RSC_ENABLED); + + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + +#endif + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } - return -ENOMEM; + e_dev_info("SR-IOV enabled with %d VFs\n", num_vfs); + if (hw->mac.type < ixgbe_mac_X550) + e_dev_info("configure port vlans to keep your VFs secure\n"); + + return 0; } /** @@ -168,9 +190,10 @@ static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) { int pre_existing_vfs = 0; + unsigned int num_vfs; pre_existing_vfs = pci_num_vf(adapter->pdev); - if (!pre_existing_vfs && !adapter->num_vfs) + if (!pre_existing_vfs && !adapter->max_vfs) return; /* If there are pre-existing VFs then we have to force @@ -180,7 +203,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * have been created via the new PCI SR-IOV sysfs interface. */ if (pre_existing_vfs) { - adapter->num_vfs = pre_existing_vfs; + num_vfs = pre_existing_vfs; dev_warn(&adapter->pdev->dev, "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); } else { @@ -189,20 +212,20 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * The 82599 supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the - * physical function. If the user requests greater than + * physical function. If the user requests greater thn * 63 VFs then it is an error - reset to default of zero. */ - adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); + num_vfs = min_t(unsigned int, adapter->max_vfs, + IXGBE_MAX_VFS_DRV_LIMIT); - err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + err = pci_enable_sriov(adapter->pdev, num_vfs); if (err) { e_err(probe, "Failed to enable PCI sriov: %d\n", err); - adapter->num_vfs = 0; return; } } - if (!__ixgbe_enable_sriov(adapter)) { + if (!__ixgbe_enable_sriov(adapter, num_vfs)) { ixgbe_get_vfs(adapter); return; } @@ -210,19 +233,17 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) /* If we have gotten to this point then there is no memory available * to manage the VF devices - print message and bail. */ - e_err(probe, "Unable to allocate memory for VF Data Storage - " - "SRIOV disabled\n"); + e_err(probe, "Unable to allocate memory for VF Data Storage - SRIOV disabled\n"); ixgbe_disable_sriov(adapter); } -#endif /* #ifdef CONFIG_PCI_IOV */ +#endif /* CONFIG_PCI_IOV */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { unsigned int num_vfs = adapter->num_vfs, vf; struct ixgbe_hw *hw = &adapter->hw; u32 gpie; u32 vmdctl; - int rss; /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; @@ -249,6 +270,11 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; + /* Turn off malicious driver detection */ + if ((hw->mac.ops.disable_mdd) && + (!(adapter->flags & IXGBE_FLAG_MDD_ENABLED))) + hw->mac.ops.disable_mdd(hw); + #ifdef CONFIG_PCI_IOV /* * If our VFs are assigned we cannot shut down SR-IOV @@ -276,55 +302,78 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) IXGBE_WRITE_FLUSH(hw); /* Disable VMDq flag so device will be set in VM mode */ - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - rss = min_t(int, ixgbe_max_rss_indices(adapter), - num_online_cpus()); - } else { - rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); - } adapter->ring_feature[RING_F_VMDQ].offset = 0; - adapter->ring_feature[RING_F_RSS].limit = rss; /* take a breather then clean up driver data */ msleep(100); + + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; return 0; } -static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +static int ixgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, int __maybe_unused num_vfs) { #ifdef CONFIG_PCI_IOV struct ixgbe_adapter *adapter = pci_get_drvdata(dev); int err = 0; + u8 num_tc; int i; int pre_existing_vfs = pci_num_vf(dev); + if (!(adapter->flags & IXGBE_FLAG_SRIOV_CAPABLE)) { + e_dev_warn("SRIOV not supported on this device\n"); + return -EOPNOTSUPP; + } + + if (adapter->num_vfs == num_vfs) + return -EINVAL; + if (pre_existing_vfs && pre_existing_vfs != num_vfs) err = ixgbe_disable_sriov(adapter); else if (pre_existing_vfs && pre_existing_vfs == num_vfs) - return num_vfs; + goto out; if (err) - return err; - - /* While the SR-IOV capability structure reports total VFs to be 64, - * we have to limit the actual number allocated based on two factors. - * First, we reserve some transmit/receive resources for the PF. - * Second, VMDQ also uses the same pools that SR-IOV does. We need to - * account for this, so that we don't accidentally allocate more VFs - * than we have available pools. The PCI bus driver already checks for - * other values out of range. + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated as below + * so that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + * Num_TCs MAX_VFs + * 1 63 + * <=4 31 + * >4 15 */ - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS) - return -EPERM; + num_tc = netdev_get_num_tc(adapter->netdev); - adapter->num_vfs = num_vfs; + if (num_tc > 4) { + if (num_vfs > IXGBE_MAX_VFS_8TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC); + err = -EPERM; + goto err_out; + } + } else if ((num_tc > 1) && (num_tc <= 4)) { + if (num_vfs > IXGBE_MAX_VFS_4TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC); + err = -EPERM; + goto err_out; + } + } else { + if (num_vfs > IXGBE_MAX_VFS_1TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC); + err = -EPERM; + goto err_out; + } + } - err = __ixgbe_enable_sriov(adapter); + err = __ixgbe_enable_sriov(adapter, num_vfs); if (err) - return err; + goto err_out; for (i = 0; i < adapter->num_vfs; i++) ixgbe_vf_configuration(dev, (i | 0x10000000)); @@ -335,14 +384,17 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) err = pci_enable_sriov(dev, num_vfs); if (err) { e_dev_warn("Failed to enable PCI sriov: %d\n", err); - return err; + goto err_out; } ixgbe_get_vfs(adapter); +out: return num_vfs; -#else - return 0; + +err_out: + return err; #endif + return 0; } static int ixgbe_pci_sriov_disable(struct pci_dev *dev) @@ -353,6 +405,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) u32 current_flags = adapter->flags; #endif + if (!adapter->num_vfs && !pci_num_vf(dev)) + return -EINVAL; + err = ixgbe_disable_sriov(adapter); /* Only reinit if no error and state changed */ @@ -389,26 +444,22 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, /* only so many hash values supported */ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); - /* - * salt away the number of multi cast addresses assigned + /* salt away the number of multi cast addresses assigned * to this VF for later use to restore when the PF multi cast * list changes */ vfinfo->num_vf_mc_hashes = entries; - /* - * VFs are limited to using the MTA hash table for their multicast - * addresses - */ - for (i = 0; i < entries; i++) { + /* VFs are limited to using the MTA hash table for their multicast + * addresses */ + for (i = 0; i < entries; i++) vfinfo->vf_mc_hashes[i] = hash_list[i]; - } for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= BIT(vector_bit); + mta_reg |= (1 << vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } vmolr |= IXGBE_VMOLR_ROMPE; @@ -435,10 +486,9 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= BIT(vector_bit); + mta_reg |= (1 << vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } - if (vfinfo->num_vf_mc_hashes) vmolr |= IXGBE_VMOLR_ROMPE; else @@ -449,14 +499,14 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) /* Restore any VF macvlans */ ixgbe_full_sync_mac_table(adapter); } -#endif +#endif /* CONFIG_PCI_IOV */ -static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, - u32 vf) +int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; int err; +#ifndef HAVE_VLAN_RX_REGISTER /* If VLAN overlaps with one the PF is currently monitoring make * sure that we are able to allocate a VLVF entry. This may be * redundant but it guarantees PF will maintain visibility to @@ -467,8 +517,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, if (err) return err; } +#endif err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); +#ifndef HAVE_VLAN_RX_REGISTER if (add && !err) return err; @@ -480,14 +532,13 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, if (test_bit(vid, adapter->active_vlans) || (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ixgbe_update_pf_promisc_vlvf(adapter, vid); +#endif return err; } - -static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - int max_frame = msgbuf[1]; u32 max_frs; /* @@ -498,30 +549,31 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) * account before we can enable the VF for receive */ if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; int pf_max_frame = dev->mtu + ETH_HLEN; u32 reg_offset, vf_shift, vfre; s32 err = 0; -#ifdef CONFIG_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (dev->features & NETIF_F_FCOE_MTU) pf_max_frame = max_t(int, pf_max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); - #endif /* CONFIG_FCOE */ + switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: - /* - * Version 1.1 supports jumbo frames on VFs if PF has + case ixgbe_mbox_api_13: + /* Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are * disabled */ if (pf_max_frame > ETH_FRAME_LEN) break; + /* fall through */ default: - /* - * If the PF or VF are running w/ jumbo frames enabled + /* If the PF or VF are running w/ jumbo frames enabled * we need to shut down the VF Rx path as we cannot * support jumbo frames on legacy VFs */ @@ -538,9 +590,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* enable or disable receive depending on error */ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); if (err) - vfre &= ~BIT(vf_shift); + vfre &= ~(1 << vf_shift); else - vfre |= BIT(vf_shift); + vfre |= 1 << vf_shift; IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); if (err) { @@ -549,12 +601,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) } } - /* MTU < 68 is an error and causes problems on some kernels */ - if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { - e_err(drv, "VF max_frame %d out of range\n", max_frame); - return -EINVAL; - } - /* pull current max frame size from hardware */ max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); max_frs &= IXGBE_MHADD_MFS_MASK; @@ -570,10 +616,10 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return 0; } -static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) +void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - vmolr |= IXGBE_VMOLR_BAM; + vmolr |= IXGBE_VMOLR_BAM; if (aupe) vmolr |= IXGBE_VMOLR_AUPE; else @@ -581,6 +627,15 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); } +static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, + u16 vid, u16 qos, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); +} + static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; @@ -594,7 +649,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) u32 vlvfb_mask, pool_mask, i; /* create mask for VF and other pools */ - pool_mask = ~BIT(VMDQ_P(0) % 32); + pool_mask = (u32)~BIT(VMDQ_P(0) % 32); vlvfb_mask = BIT(vf % 32); /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ @@ -612,7 +667,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) /* clear our bit from vlvfb */ vlvfb ^= vlvfb_mask; - /* create 64b mask to chedk to see if we should clear VLVF */ + /* create 64b mask to check to see if we should clear VLVF */ bits[word % 2] = vlvfb; bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); @@ -649,63 +704,12 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) } } -static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - u8 num_tcs = netdev_get_num_tc(adapter->netdev); - - /* remove VLAN filters beloning to this VF */ - ixgbe_clear_vf_vlans(adapter, vf); - - /* add back PF assigned VLAN or VLAN 0 */ - ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); - - /* reset offloads to defaults */ - ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); - - /* set outgoing tags for VFs */ - if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { - ixgbe_clear_vmvir(adapter, vf); - } else { - if (vfinfo->pf_qos || !num_tcs) - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - vfinfo->pf_qos, vf); - else - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - adapter->default_up, vf); - - if (vfinfo->spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - } - - /* reset multicast table array for vf */ - adapter->vfinfo[vf].num_vf_mc_hashes = 0; - - /* Flush and reset the mta with the new values */ - ixgbe_set_rx_mode(adapter->netdev); - - ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - - /* reset VF api back to unknown */ - adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; -} - -static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, - int vf, unsigned char *mac_addr) -{ - ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - - return 0; -} - static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { struct list_head *pos; struct vf_macvlans *entry; + s32 retval = 0; if (index <= 1) { list_for_each(pos, &adapter->vf_mvs.l) { @@ -746,28 +750,89 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, if (!entry || !entry->free) return -ENOSPC; - entry->free = false; - entry->is_macvlan = true; - entry->vf = vf; - memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } - ixgbe_add_mac_filter(adapter, mac_addr, vf); + return retval; +} - return 0; +static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* remove VLAN filters belonging to this VF */ + ixgbe_clear_vf_vlans(adapter, vf); + + /* add back PF assigned VLAN or VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ixgbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ixgbe_set_rx_mode(adapter->netdev); + + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; +} + +int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + s32 retval = 0; + + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) + memcpy(adapter->vfinfo[vf].vf_mac_addresses, + mac_addr, ETH_ALEN); + else + memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + + return retval; } +#ifdef CONFIG_PCI_IOV int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { + unsigned char vf_mac_addr[6]; struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); unsigned int vfn = (event_mask & 0x3f); - bool enable = ((event_mask & 0x10000000U) != 0); - if (enable) - eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); + if (enable) { + memset(vf_mac_addr, 0, ETH_ALEN); + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } return 0; } +#endif /* CONFIG_PCI_IOV */ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, u32 qde) @@ -775,25 +840,25 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg; int i; for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { - u32 reg; - /* flush previous write */ IXGBE_WRITE_FLUSH(hw); - /* indicate to hardware that we want to set drop enable */ - reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; + /* drop enable should always be set in SRIOV mode*/ + reg = IXGBE_QDE_WRITE | qde; reg |= i << IXGBE_QDE_IDX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); } + } static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; u32 reg, reg_offset, vf_shift; u32 msgbuf[4] = {0, 0, 0, 0}; @@ -815,7 +880,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable transmit for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= BIT(vf_shift); + reg |= 1 << vf_shift; IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ @@ -823,7 +888,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= BIT(vf_shift); + reg |= 1 << vf_shift; /* * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. * For more info take a look at ixgbe_set_vf_lpe @@ -832,23 +897,22 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) struct net_device *dev = adapter->netdev; int pf_max_frame = dev->mtu + ETH_HLEN; -#ifdef CONFIG_FCOE +#if IS_ENABLED(CONFIG_FCOE) if (dev->features & NETIF_F_FCOE_MTU) pf_max_frame = max_t(int, pf_max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); - #endif /* CONFIG_FCOE */ + if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~BIT(vf_shift); + reg &= ~(1 << vf_shift); } IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); /* enable VF mailbox for further messages */ adapter->vfinfo[vf].clear_to_send = true; - /* Enable counting of spoofed packets in the SSVPC register */ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); - reg |= BIT(vf_shift); + reg |= (1 << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); /* @@ -862,12 +926,12 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET; - if (!is_zero_ether_addr(vf_mac)) { + if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; memcpy(addr, vf_mac, ETH_ALEN); } else { msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; - dev_warn(&adapter->pdev->dev, + dev_warn(pci_dev_to_dev(adapter->pdev), "VF %d has no MAC address assigned, you may have to assign one manually\n", vf); } @@ -893,14 +957,15 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, } if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && - !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; e_warn(drv, - "VF %d attempted to override administratively set MAC address\n" - "Reload the VF driver to resume operations\n", - vf); + "VF %d attempted to set a new MAC address but it already has an administratively set MAC address %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + vf, pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); + e_warn(drv, "Check the VF driver and if it is not using the correct MAC address you may need to reload the VF driver\n"); return -1; } - return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; } @@ -910,6 +975,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); u8 tcs = netdev_get_num_tc(adapter->netdev); + int err = 0; if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -923,7 +989,63 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, if (!vid && !add) return 0; - return ixgbe_set_vf_vlan(adapter, add, vid, vf); + err = ixgbe_set_vf_vlan(adapter, add, vid, vf); + + if (err) + return err; + +#ifdef HAVE_VLAN_RX_REGISTER + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) { + err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + if (err) + return err; + } + +#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + struct ixgbe_hw *hw = &adapter->hw; + u32 bits, vlvf; + s32 reg_ndx; + + reg_ndx = ixgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + bits &= ~(1 << VMDQ_P(0)); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && !bits) + err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: +#endif /* CONFIG_PCI_IOV */ +#else /* HAVE_VLAN_RX_REGISTER */ + return 0; +#endif /* HAVE_VLAN_RX_REGISTER */ + return err; } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, @@ -934,7 +1056,8 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, IXGBE_VT_MSGINFO_SHIFT; int err; - if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + index > 0) { e_warn(drv, "VF %d requested MACVLAN filter but is administratively denied\n", vf); @@ -978,6 +1101,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1002,6 +1126,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return -1; @@ -1029,6 +1154,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, return 0; } +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u32 i, j; @@ -1041,8 +1167,13 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return -EPERM; /* verify the PF is supporting the correct API */ - if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + break; + default: return -EOPNOTSUPP; + } /* This mailbox command is supported (required) only for 82599 and x540 * VFs which support up to 4 RSS queues. Therefore we will compress the @@ -1068,24 +1199,35 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return -EPERM; /* verify the PF is supporting the correct API */ - if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + break; + default: return -EOPNOTSUPP; + } - memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); + memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); return 0; } +#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; int xcast_mode = msgbuf[1]; - u32 vmolr, disable, enable; + u32 vmolr, fctrl, disable, enable; /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -1101,17 +1243,34 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = 0; break; case IXGBEVF_XCAST_MODE_MULTI: - disable = IXGBE_VMOLR_MPE; + disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; break; case IXGBEVF_XCAST_MODE_ALLMULTI: - disable = 0; + disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; break; + case IXGBEVF_XCAST_MODE_PROMISC: + if (hw->mac.type <= ixgbe_mac_82599EB) + return -EOPNOTSUPP; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + if (!(fctrl & IXGBE_FCTRL_UPE)) { + /* VF promisc requires PF in promisc */ + e_warn(drv, + "Enabling VF promisc requires PF in promisc\n"); + return -EPERM; + } + + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + break; default: return -EOPNOTSUPP; } @@ -1145,7 +1304,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) /* this is a message we already processed, do nothing */ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) - return 0; + return retval; /* flush the ack before we write any messages back */ IXGBE_WRITE_FLUSH(hw); @@ -1157,10 +1316,11 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) * until the vf completes a virtual function reset it should not be * allowed to start any configuration. */ + if (!adapter->vfinfo[vf].clear_to_send) { msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; ixgbe_write_mbx(hw, msgbuf, 1, vf); - return 0; + return retval; } switch ((msgbuf[0] & 0xFFFF)) { @@ -1174,7 +1334,11 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); break; case IXGBE_VF_SET_LPE: - retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); + if (msgbuf[1] > IXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", msgbuf[1]); + return -EINVAL; + } + retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf); break; case IXGBE_VF_SET_MACVLAN: retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); @@ -1185,12 +1349,14 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_QUEUES: retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); break; +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN case IXGBE_VF_GET_RETA: retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); break; case IXGBE_VF_GET_RSS_KEY: retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); break; +#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; @@ -1223,11 +1389,58 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) ixgbe_write_mbx(hw, &msg, 1, vf); } +#define Q_BITMAP_DEPTH 2 +static void ixgbe_check_mdd_event(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vf_bitmap[Q_BITMAP_DEPTH] = { 0 }; + u32 j, i; + u32 ping; + + if (!hw->mac.ops.mdd_event) + return; + + /* Did we have a malicious event */ + hw->mac.ops.mdd_event(hw, vf_bitmap); + + /* Log any blocked queues and release lock */ + for (i = 0; i < Q_BITMAP_DEPTH; i++) { + for (j = 0; j < 32 && vf_bitmap[i]; j++) { + u32 vf; + + if (!(vf_bitmap[i] & (1 << j))) + continue; + + /* The VF that malicious event occurred on */ + vf = j + (i * 32); + + dev_warn(pci_dev_to_dev(adapter->pdev), + "Malicious event on VF %d tx:%x rx:%x\n", vf, + IXGBE_READ_REG(hw, IXGBE_LVMMC_TX), + IXGBE_READ_REG(hw, IXGBE_LVMMC_RX)); + + /* restart the vf */ + if (hw->mac.ops.restore_mdd_vf) { + hw->mac.ops.restore_mdd_vf(hw, vf); + + /* get the VF to rebuild its queues */ + adapter->vfinfo[vf].clear_to_send = 0; + ping = IXGBE_PF_CONTROL_MSG | + IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, vf); + } + } + } +} + void ixgbe_msg_task(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vf; + if (adapter->flags & IXGBE_FLAG_MDD_ENABLED && adapter->vfinfo) + ixgbe_check_mdd_event(adapter); + for (vf = 0; vf < adapter->num_vfs; vf++) { /* process any reset requests */ if (!ixgbe_check_for_rst(hw, vf)) @@ -1255,6 +1468,7 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); } +#ifdef HAVE_NDO_SET_VF_TRUST static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) { struct ixgbe_hw *hw = &adapter->hw; @@ -1266,6 +1480,7 @@ static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) ixgbe_write_mbx(hw, &ping, 1, vf); } +#endif void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1280,26 +1495,82 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) } } +#ifdef HAVE_NDO_SET_VF_TRUST +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +#endif +#ifdef IFLA_VF_MAX int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + s32 retval = 0; + + if (vf >= adapter->num_vfs) return -EINVAL; - adapter->vfinfo[vf].pf_set_mac = true; - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" - " change effective."); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, "Bring the PF device up before" - " attempting to use the VF device.\n"); + + if (is_valid_ether_addr(mac)) { + dev_info(pci_dev_to_dev(adapter->pdev), "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), "Reload the VF driver to make this change effective.\n"); + + retval = ixgbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + /* pf_set_mac is used in ESX5.1 and base driver but not in ESX5.5 */ + adapter->vfinfo[vf].pf_set_mac = true; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); + } + } else if (is_zero_ether_addr(mac)) { + unsigned char *vf_mac_addr = + adapter->vfinfo[vf].vf_mac_addresses; + + /* nothing to do */ + if (is_zero_ether_addr(vf_mac_addr)) + return 0; + + dev_info(pci_dev_to_dev(adapter->pdev), "removing MAC on VF %d\n", + vf); + + retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = false; + memcpy(vf_mac_addr, mac, ETH_ALEN); + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), "Could NOT remove the VF MAC address.\n"); + } + } else { + retval = -EINVAL; } - return ixgbe_set_vf_mac(adapter, vf, mac); + return retval; } -static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, - u16 vlan, u8 qos) +static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, + int vf, u16 vlan, u8 qos) { struct ixgbe_hw *hw = &adapter->hw; int err; @@ -1318,16 +1589,13 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, if (hw->mac.type >= ixgbe_mac_X550) ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | IXGBE_QDE_HIDE_VLAN); - adapter->vfinfo[vf].pf_vlan = vlan; adapter->vfinfo[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, + dev_info(pci_dev_to_dev(adapter->pdev), "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before attempting to use the VF device.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), "Bring the PF device up before attempting to use the VF device.\n"); } out: @@ -1349,25 +1617,33 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) /* disable hide VLAN on X550 */ if (hw->mac.type >= ixgbe_mac_X550) ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); - adapter->vfinfo[vf].pf_vlan = 0; adapter->vfinfo[vf].pf_qos = 0; return err; } +#ifdef IFLA_VF_MAX +#ifdef IFLA_VF_VLAN_INFO_MAX int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) +#else +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +#endif { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); - if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) + /* VLAN IDs accepted range 0-4094 */ + if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX if (vlan_proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; +#endif if (vlan || qos) { - /* Check if there is already a port VLAN set, if so + /* + * Check if there is already a port VLAN set, if so * we have to delete the old one first before we * can set the new one. The usage model had * previously assumed the user would delete the @@ -1379,15 +1655,16 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, if (err) goto out; err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); + } else { err = ixgbe_disable_port_vlan(adapter, vf); } - out: return err; } +#endif /* IFLA_VF_MAX */ -int ixgbe_link_mbps(struct ixgbe_adapter *adapter) +static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: @@ -1463,7 +1740,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { adapter->vf_rate_link_speed = 0; - dev_info(&adapter->pdev->dev, + dev_info(pci_dev_to_dev(adapter->pdev), "Link speed has been changed. VF Transmit rate is disabled\n"); } @@ -1475,8 +1752,14 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) } } -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int __always_unused min_tx_rate, int max_tx_rate) +#else +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ { struct ixgbe_adapter *adapter = netdev_priv(netdev); int link_speed; @@ -1494,9 +1777,6 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, if (link_speed != 10000) return -EINVAL; - if (min_tx_rate) - return -EINVAL; - /* rate limit cannot be less than 10Mbs or greater than link speed */ if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) return -EINVAL; @@ -1543,10 +1823,10 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); } - return 0; } +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting) { @@ -1567,28 +1847,7 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, return 0; } -int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (vf >= adapter->num_vfs) - return -EINVAL; - - /* nothing to do */ - if (adapter->vfinfo[vf].trusted == setting) - return 0; - - adapter->vfinfo[vf].trusted = setting; - - /* reset VF to reconfigure features */ - adapter->vfinfo[vf].clear_to_send = false; - ixgbe_ping_vf(adapter, vf); - - e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); - - return 0; -} - +#endif int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1597,12 +1856,26 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; ivi->min_tx_rate = 0; +#else + ivi->tx_rate = adapter->vfinfo[vf].tx_rate; +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; +#endif +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; +#endif +#ifdef HAVE_NDO_SET_VF_TRUST ivi->trusted = adapter->vfinfo[vf].trusted; +#endif return 0; } +#endif /* IFLA_VF_MAX */ + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h index 0c7977d27b71..5d080ba60b02 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,48 +22,71 @@ *******************************************************************************/ + #ifndef _IXGBE_SRIOV_H_ #define _IXGBE_SRIOV_H_ -/* ixgbe driver limit the max number of VFs could be enabled to - * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) +/* ixgbe driver limit the max number of VFs could be enabled to + * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) */ #define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) +#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VFS_DRV_LIMIT +#define IXGBE_MAX_VFS_4TC 31 +#define IXGBE_MAX_VFS_8TC 15 -#ifdef CONFIG_PCI_IOV void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); -#endif +int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); +void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe); void ixgbe_msg_task(struct ixgbe_adapter *adapter); -int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr); void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +#ifdef IFLA_VF_MAX int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +#ifdef IFLA_VF_VLAN_INFO_MAX int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, - u8 qos, __be16 vlan_proto); -int ixgbe_link_mbps(struct ixgbe_adapter *adapter); + u8 qos, __be16 vlan_proto); +#else +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos); +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate); -int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#else +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); -void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +#endif /* IFLA_VF_MAX */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); +int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); #endif int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef IFLA_VF_MAX +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +#endif /* IFLA_VF_MAX */ +void ixgbe_dump_registers(struct ixgbe_adapter *adapter); -static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, - u16 vid, u16 qos, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; - - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); -} - +/* + * These are defined in ixgbe_type.h on behalf of the VF driver + * but we need them here unwrapped for the PF driver. + */ +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 #endif /* _IXGBE_SRIOV_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c index ef6df3d6437e..5d30be5e891f 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,16 +26,22 @@ #include "ixgbe_common.h" #include "ixgbe_type.h" +#ifdef IXGBE_SYSFS + #include #include #include #include #include #include +#include +#ifdef IXGBE_HWMON #include +#endif +#ifdef IXGBE_HWMON /* hwmon callback functions */ -static ssize_t ixgbe_hwmon_show_location(struct device *dev, +static ssize_t ixgbe_hwmon_show_location(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { @@ -49,7 +51,7 @@ static ssize_t ixgbe_hwmon_show_location(struct device *dev, ixgbe_attr->sensor->location); } -static ssize_t ixgbe_hwmon_show_temp(struct device *dev, +static ssize_t ixgbe_hwmon_show_temp(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { @@ -68,7 +70,7 @@ static ssize_t ixgbe_hwmon_show_temp(struct device *dev, return sprintf(buf, "%u\n", value); } -static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, +static ssize_t ixgbe_hwmon_show_cautionthresh(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { @@ -82,7 +84,7 @@ static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, return sprintf(buf, "%u\n", value); } -static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, +static ssize_t ixgbe_hwmon_show_maxopthresh(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { @@ -112,29 +114,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, unsigned int n_attr; struct hwmon_attr *ixgbe_attr; - n_attr = adapter->ixgbe_hwmon_buff->n_hwmon; - ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr]; + n_attr = adapter->ixgbe_hwmon_buff.n_hwmon; + ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr]; switch (type) { case IXGBE_HWMON_TYPE_LOC: ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location; snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), - "temp%u_label", offset + 1); + "temp%u_label", offset); break; case IXGBE_HWMON_TYPE_TEMP: ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp; snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), - "temp%u_input", offset + 1); + "temp%u_input", offset); break; case IXGBE_HWMON_TYPE_CAUTION: ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh; snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), - "temp%u_max", offset + 1); + "temp%u_max", offset); break; case IXGBE_HWMON_TYPE_MAX: ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh; snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), - "temp%u_crit", offset + 1); + "temp%u_crit", offset); break; default: rc = -EPERM; @@ -148,17 +150,35 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, ixgbe_attr->dev_attr.store = NULL; ixgbe_attr->dev_attr.attr.mode = S_IRUGO; ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; - sysfs_attr_init(&ixgbe_attr->dev_attr.attr); - adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr; + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &ixgbe_attr->dev_attr); - ++adapter->ixgbe_hwmon_buff->n_hwmon; + if (rc == 0) + ++adapter->ixgbe_hwmon_buff.n_hwmon; - return 0; + return rc; } +#endif /* IXGBE_HWMON */ -static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) +static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter __maybe_unused *adapter) { +#ifdef IXGBE_HWMON + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(pci_dev_to_dev(adapter->pdev), + &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->ixgbe_hwmon_buff.hwmon_list); + + if (adapter->ixgbe_hwmon_buff.device) + hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device); +#endif /* IXGBE_HWMON */ } /* called from ixgbe_main.c */ @@ -170,27 +190,43 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) /* called from ixgbe_main.c */ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) { - struct hwmon_buff *ixgbe_hwmon; - struct device *hwmon_dev; - unsigned int i; int rc = 0; +#ifdef IXGBE_HWMON + struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff; + unsigned int i; + int n_attrs; +#endif /* IXGBE_HWMON */ + if (adapter == NULL) + goto err; + +#ifdef IXGBE_HWMON /* If this method isn't defined we don't support thermals */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { - goto exit; + goto no_thermal; } /* Don't create thermal hwmon interface if no sensors present */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) - goto exit; - - ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon), - GFP_KERNEL); - if (ixgbe_hwmon == NULL) { + goto no_thermal; + + /* + * Allocation space for max attributs + * max num sensors * values (loc, temp, max, caution) + */ + n_attrs = IXGBE_MAX_SENSORS * 4; + ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!ixgbe_hwmon->hwmon_list) { rc = -ENOMEM; - goto exit; + goto err; + } + + ixgbe_hwmon->device = hwmon_device_register(pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(ixgbe_hwmon->device)) { + rc = PTR_ERR(ixgbe_hwmon->device); + goto err; } - adapter->ixgbe_hwmon_buff = ixgbe_hwmon; for (i = 0; i < IXGBE_MAX_SENSORS; i++) { /* @@ -202,29 +238,20 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) /* Bail if any hwmon attr struct fails to initialize */ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); + rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); + rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); + rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); if (rc) - goto exit; - rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); - if (rc) - goto exit; - rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); - if (rc) - goto exit; - rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); - if (rc) - goto exit; + goto err; } - ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group; - ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs; +no_thermal: +#endif /* IXGBE_HWMON */ + goto exit; - hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, - "ixgbe", - ixgbe_hwmon, - ixgbe_hwmon->groups); - if (IS_ERR(hwmon_dev)) - rc = PTR_ERR(hwmon_dev); +err: + ixgbe_sysfs_del_adapter(adapter); exit: return rc; } - +#endif /* IXGBE_SYSFS */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h index 531990b2f2fb..b5a5365d28c5 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,189 +25,245 @@ #ifndef _IXGBE_TYPE_H_ #define _IXGBE_TYPE_H_ -#include -#include -#include +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - IXGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - IXGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occured, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - IXGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - IXGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - IXGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - IXGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "ixgbe_osdep.h" + +/* Override this by setting IOMEM in your ixgbe_osdep.h header */ +#ifndef IOMEM +#define IOMEM +#endif + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 /* Device IDs */ -#define IXGBE_DEV_ID_82598 0x10B6 -#define IXGBE_DEV_ID_82598_BX 0x1508 -#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 -#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 -#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB -#define IXGBE_DEV_ID_82598AT 0x10C8 -#define IXGBE_DEV_ID_82598AT2 0x150B -#define IXGBE_DEV_ID_82598EB_CX4 0x10DD -#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC -#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 -#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 -#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 -#define IXGBE_DEV_ID_82599_KX4 0x10F7 -#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 -#define IXGBE_DEV_ID_82599_KR 0x1517 -#define IXGBE_DEV_ID_82599_T3_LOM 0x151C -#define IXGBE_DEV_ID_82599_CX4 0x10F9 -#define IXGBE_DEV_ID_82599_SFP 0x10FB -#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a -#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 -#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 -#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 -#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 -#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 -#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 +#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 +#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 +#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B #define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 #define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D #define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 #define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 #define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE -#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 -#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 -#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D -#define IXGBE_DEV_ID_82599EN_SFP 0x1557 -#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 -#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC -#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 -#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C -#define IXGBE_DEV_ID_82599_LS 0x154F -#define IXGBE_DEV_ID_X540T 0x1528 -#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A -#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 -#define IXGBE_DEV_ID_X540T1 0x1560 - -#define IXGBE_DEV_ID_X550T 0x1563 -#define IXGBE_DEV_ID_X550T1 0x15D1 -#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA -#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB -#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC -#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD -#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE -#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 -#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 -#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 -#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 -#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 -#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 -#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 -#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE -#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 -#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 - -/* VF Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 -#define IXGBE_DEV_ID_X550_VF 0x1565 -#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 - -#define IXGBE_CAT(r, m) IXGBE_##r##_##m - -#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, IDX)]) +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_82599_LS 0x154F +#define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 +/* Placeholder value, pending official value. */ +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 +#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA +#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 + +#define IXGBE_CAT(r,m) IXGBE_##r##m + +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)]) /* General Registers */ -#define IXGBE_CTRL 0x00000 -#define IXGBE_STATUS 0x00008 -#define IXGBE_CTRL_EXT 0x00018 -#define IXGBE_ESDP 0x00020 -#define IXGBE_EODSDP 0x00028 - -#define IXGBE_I2CCTL_8259X 0x00028 -#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_8259X +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_I2CCTL_82599 0x00028 +#define IXGBE_I2CCTL IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599 #define IXGBE_I2CCTL_X550 0x15F5C #define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 #define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 -#define IXGBE_I2CCTL(_hw) IXGBE_BY_MAC((_hw), I2CCTL) - -#define IXGBE_LEDCTL 0x00200 -#define IXGBE_FRTIMER 0x00048 -#define IXGBE_TCPTIMER 0x0004C -#define IXGBE_CORESPARE 0x00600 -#define IXGBE_EXVET 0x05078 +#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL) +#define IXGBE_PHY_GPIO 0x00028 +#define IXGBE_MAC_GPIO 0x00030 +#define IXGBE_PHYINT_STATUS0 0x00100 +#define IXGBE_PHYINT_STATUS1 0x00104 +#define IXGBE_PHYINT_STATUS2 0x00108 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 /* NVM Registers */ -#define IXGBE_EEC_8259X 0x10010 -#define IXGBE_EEC_X540 IXGBE_EEC_8259X -#define IXGBE_EEC_X550 IXGBE_EEC_8259X -#define IXGBE_EEC_X550EM_x IXGBE_EEC_8259X +#define IXGBE_EEC 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC +#define IXGBE_EEC_X550 IXGBE_EEC +#define IXGBE_EEC_X550EM_x IXGBE_EEC #define IXGBE_EEC_X550EM_a 0x15FF8 -#define IXGBE_EEC(_hw) IXGBE_BY_MAC((_hw), EEC) -#define IXGBE_EERD 0x10014 -#define IXGBE_EEWR 0x10018 -#define IXGBE_FLA_8259X 0x1001C -#define IXGBE_FLA_X540 IXGBE_FLA_8259X -#define IXGBE_FLA_X550 IXGBE_FLA_8259X -#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X +#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC) + +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 + +#define IXGBE_FLA 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA +#define IXGBE_FLA_X550 IXGBE_FLA +#define IXGBE_FLA_X550EM_x IXGBE_FLA #define IXGBE_FLA_X550EM_a 0x15F68 -#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) -#define IXGBE_EEMNGCTL 0x10110 -#define IXGBE_EEMNGDATA 0x10114 -#define IXGBE_FLMNGCTL 0x10118 -#define IXGBE_FLMNGDATA 0x1011C -#define IXGBE_FLMNGCNT 0x10120 -#define IXGBE_FLOP 0x1013C -#define IXGBE_GRC_8259X 0x10200 -#define IXGBE_GRC_X540 IXGBE_GRC_8259X -#define IXGBE_GRC_X550 IXGBE_GRC_8259X -#define IXGBE_GRC_X550EM_x IXGBE_GRC_8259X +#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA) + +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C + +#define IXGBE_GRC 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC +#define IXGBE_GRC_X550 IXGBE_GRC +#define IXGBE_GRC_X550EM_x IXGBE_GRC #define IXGBE_GRC_X550EM_a 0x15F64 -#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) +#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC) + +#define IXGBE_SRAMREL 0x10210 +#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_a 0x15F6C +#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL) + +#define IXGBE_PHYDBG 0x10218 /* General Receive Control */ -#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ -#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ -#define IXGBE_VPDDIAG0 0x10204 -#define IXGBE_VPDDIAG1 0x10208 +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 /* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN_8259X 0x00000001 -#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN_8259X +#define IXGBE_I2C_CLK_IN 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN #define IXGBE_I2C_CLK_IN_X550 0x00004000 #define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 #define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 -#define IXGBE_I2C_CLK_IN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) -#define IXGBE_I2C_CLK_OUT_8259X 0x00000002 -#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT_8259X +#define IXGBE_I2C_CLK_OUT 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT #define IXGBE_I2C_CLK_OUT_X550 0x00000200 #define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 #define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 -#define IXGBE_I2C_CLK_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) -#define IXGBE_I2C_DATA_IN_8259X 0x00000004 -#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN_8259X +#define IXGBE_I2C_DATA_IN 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN #define IXGBE_I2C_DATA_IN_X550 0x00001000 #define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 #define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 -#define IXGBE_I2C_DATA_IN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) -#define IXGBE_I2C_DATA_OUT_8259X 0x00000008 -#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT_8259X +#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT #define IXGBE_I2C_DATA_OUT_X550 0x00000400 #define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 #define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 -#define IXGBE_I2C_DATA_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) -#define IXGBE_I2C_DATA_OE_N_EN_8259X 0 -#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN_8259X +#define IXGBE_I2C_DATA_OE_N_EN 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN #define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 #define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 #define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 -#define IXGBE_I2C_DATA_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) +#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) -#define IXGBE_I2C_BB_EN_8259X 0 -#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN_8259X +#define IXGBE_I2C_BB_EN 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN #define IXGBE_I2C_BB_EN_X550 0x00000100 #define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 #define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 -#define IXGBE_I2C_BB_EN(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) -#define IXGBE_I2C_CLK_OE_N_EN_8259X 0 -#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN_8259X +#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) + +#define IXGBE_I2C_CLK_OE_N_EN 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN #define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 #define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 #define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 -#define IXGBE_I2C_CLK_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) - +#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 @@ -235,163 +287,211 @@ struct ixgbe_thermal_sensor_data { struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; }; +#define NVM_OROM_OFFSET 0x17 +#define NVM_OROM_BLK_LOW 0x83 +#define NVM_OROM_BLK_HI 0x84 +#define NVM_OROM_PATCH_MASK 0xFF +#define NVM_OROM_SHIFT 8 + +#define NVM_VER_MASK 0x00FF /* version mask */ +#define NVM_VER_SHIFT 8 /* version bit shift */ +#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ +#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ +#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ +#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ +#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ +#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ +#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ +#define NVM_ETK_OFF_HI 0x2E /* version high order word */ +#define NVM_ETK_SHIFT 16 /* high version word shift */ +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETK_VALID 0x8000 +#define NVM_INVALID_PTR 0xFFFF +#define NVM_VER_SIZE 32 /* version sting size */ + +struct ixgbe_nvm_version { + u32 etk_id; + u8 nvm_major; + u16 nvm_minor; + u8 nvm_id; + + bool oem_valid; + u8 oem_major; + u8 oem_minor; + u16 oem_release; + + bool or_valid; + u8 or_major; + u16 or_build; + u8 or_patch; + +}; + /* Interrupt Registers */ -#define IXGBE_EICR 0x00800 -#define IXGBE_EICS 0x00808 -#define IXGBE_EIMS 0x00880 -#define IXGBE_EIMC 0x00888 -#define IXGBE_EIAC 0x00810 -#define IXGBE_EIAM 0x00890 -#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) -#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) -#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) -#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* 82599 EITR is only 12 bits, with the lower 3 always zero */ /* * 82598 EITR is 16 bits but set the limits based on the max - * supported by all ixgbe hardware. 82599 EITR is only 12 bits, - * with the lower 3 always zero. + * supported by all ixgbe hardware */ -#define IXGBE_MAX_INT_RATE 488281 -#define IXGBE_MIN_INT_RATE 956 -#define IXGBE_MAX_EITR 0x00000FF8 -#define IXGBE_MIN_EITR 8 -#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ - (0x012300 + (((_i) - 24) * 4))) -#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 -#define IXGBE_EITR_LLI_MOD 0x00008000 -#define IXGBE_EITR_CNT_WDIS 0x80000000 -#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ -#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ -#define IXGBE_EITRSEL 0x00894 -#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ -#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ -#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) -#define IXGBE_GPIE 0x00898 +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 /* Flow Control Registers */ -#define IXGBE_FCADBUL 0x03210 -#define IXGBE_FCADBUH 0x03214 -#define IXGBE_FCAMACL 0x04328 -#define IXGBE_FCAMACH 0x0432C -#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_PFCTOP 0x03008 -#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ -#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ -#define IXGBE_FCRTV 0x032A0 -#define IXGBE_FCCFG 0x03D00 -#define IXGBE_TFCS 0x0CE00 +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 /* Receive DMA Registers */ -#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ (0x0D000 + (((_i) - 64) * 0x40))) -#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ (0x0D004 + (((_i) - 64) * 0x40))) -#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ (0x0D008 + (((_i) - 64) * 0x40))) -#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ (0x0D010 + (((_i) - 64) * 0x40))) -#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ (0x0D018 + (((_i) - 64) * 0x40))) -#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ - (0x0D028 + (((_i) - 64) * 0x40))) -#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ - (0x0D02C + (((_i) - 64) * 0x40))) -#define IXGBE_RSCDBU 0x03028 -#define IXGBE_RDDCC 0x02F20 -#define IXGBE_RXMEMWRAP 0x03190 -#define IXGBE_STARCTRL 0x03024 +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 /* * Split and Replication Receive Control Registers * 00-15 : 0x02100 + n*4 * 16-64 : 0x01014 + n*0x40 * 64-127: 0x0D014 + (n-64)*0x40 */ -#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ - (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ - (0x0D014 + (((_i) - 64) * 0x40)))) +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40)))) /* * Rx DCA Control Register: * 00-15 : 0x02200 + n*4 * 16-64 : 0x0100C + n*0x40 * 64-127: 0x0D00C + (n-64)*0x40 */ -#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ (0x0D00C + (((_i) - 64) * 0x40)))) -#define IXGBE_RDRXCTL 0x02F00 -#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) - /* 8 of these 0x03C00 - 0x03C1C */ -#define IXGBE_RXCTRL 0x03000 -#define IXGBE_DROPEN 0x03D04 -#define IXGBE_RXPBSIZE_SHIFT 10 +#define IXGBE_RDRXCTL 0x02F00 +/* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 +#define IXGBE_RXPBSIZE_MASK 0x000FFC00 /* Receive Registers */ -#define IXGBE_RXCSUM 0x05000 -#define IXGBE_RFCTL 0x05008 -#define IXGBE_DRECCCTL 0x02F08 -#define IXGBE_DRECCCTL_DISABLE 0 +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +#define IXGBE_DRECCCTL2 0x02F8C + /* Multicast Table Array - 128 entries */ -#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) -#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x0A200 + ((_i) * 8))) -#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x0A204 + ((_i) * 8))) -#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) -#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) /* Packet split receive type */ -#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ - (0x0EA00 + ((_i) * 4))) +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) /* array of 4096 1-bit vlan filters */ -#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) /*array of 4096 4-bit vlan vmdq indices */ -#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) -#define IXGBE_FCTRL 0x05080 -#define IXGBE_VLNCTRL 0x05088 -#define IXGBE_MCSTCTRL 0x05090 -#define IXGBE_MRQC 0x05818 -#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ -#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ -#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ -#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ -#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ -#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ -#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ -#define IXGBE_RQTC 0x0EC70 -#define IXGBE_MTQC 0x08120 -#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ -#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ -#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ -#define IXGBE_PFFLPL 0x050B0 -#define IXGBE_PFFLPH 0x050B4 -#define IXGBE_VT_CTL 0x051B0 -#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ -#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ -#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ -#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ -#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) -#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) -#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) -#define IXGBE_QDE 0x2F04 -#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ -#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ -#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) -#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) -#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) -#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) -#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ -#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ -#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ -#define IXGBE_RXFECCERR0 0x051B8 -#define IXGBE_LLITHRESH 0x0EC90 -#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_IMIRVP 0x05AC0 -#define IXGBE_VMD_CTL 0x0581C -#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ -#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ -#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +/* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_LVMMC_RX 0x2FA8 +#define IXGBE_LVMMC_TX 0x8108 +#define IXGBE_LMVM_RX 0x2FA4 +#define IXGBE_LMVM_TX 0x8124 +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ /* Registers for setting up RSS on X550 with SRIOV * _p - pool number (0..63) @@ -402,960 +502,1086 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) /* Flow Director registers */ -#define IXGBE_FDIRCTRL 0x0EE00 -#define IXGBE_FDIRHKEY 0x0EE68 -#define IXGBE_FDIRSKEY 0x0EE6C -#define IXGBE_FDIRDIP4M 0x0EE3C -#define IXGBE_FDIRSIP4M 0x0EE40 -#define IXGBE_FDIRTCPM 0x0EE44 -#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 #define IXGBE_FDIRSCTPM 0x0EE78 -#define IXGBE_FDIRIP6M 0x0EE74 -#define IXGBE_FDIRM 0x0EE70 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 /* Flow Director Stats registers */ -#define IXGBE_FDIRFREE 0x0EE38 -#define IXGBE_FDIRLEN 0x0EE4C -#define IXGBE_FDIRUSTAT 0x0EE50 -#define IXGBE_FDIRFSTAT 0x0EE54 -#define IXGBE_FDIRMATCH 0x0EE58 -#define IXGBE_FDIRMISS 0x0EE5C +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C /* Flow Director Programming registers */ #define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ -#define IXGBE_FDIRIPSA 0x0EE18 -#define IXGBE_FDIRIPDA 0x0EE1C -#define IXGBE_FDIRPORT 0x0EE20 -#define IXGBE_FDIRVLAN 0x0EE24 -#define IXGBE_FDIRHASH 0x0EE28 -#define IXGBE_FDIRCMD 0x0EE2C +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C /* Transmit DMA registers */ -#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ -#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) -#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) -#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) -#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) -#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) -#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) -#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) -#define IXGBE_DTXCTL 0x07E00 - -#define IXGBE_DMATXCTL 0x04A80 -#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ -#define IXGBE_PFDTXGSWC 0x08220 -#define IXGBE_DTXMXSZRQ 0x08100 -#define IXGBE_DTXTCPFLGL 0x04A88 -#define IXGBE_DTXTCPFLGH 0x04A8C -#define IXGBE_LBDRPEN 0x0CA00 -#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ - -#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ -#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ -#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ -#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ -#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ -#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ - -#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 + +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ /* Anti-spoofing defines */ -#define IXGBE_SPOOF_MACAS_MASK 0xFF -#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 -#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 #define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 #define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 -#define IXGBE_PFVFSPOOF_REG_COUNT 8 - -#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ +#define IXGBE_PFVFSPOOF_REG_COUNT 8 +/* 16 of these (0-15) */ +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* Tx DCA Control register : 128 of these (0-127) */ -#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) -#define IXGBE_TIPG 0x0CB00 -#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_MNGTXMAP 0x0CD10 -#define IXGBE_TIPG_FIBER_DEFAULT 3 -#define IXGBE_TXPBSIZE_SHIFT 10 +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 /* Wake up registers */ -#define IXGBE_WUC 0x05800 -#define IXGBE_WUFC 0x05808 -#define IXGBE_WUS 0x05810 -#define IXGBE_IPAV 0x05838 -#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ -#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ - -#define IXGBE_WUPL 0x05900 -#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */ +#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */ #define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ -#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ -#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host - * Filter Table */ /* masks for accessing VXLAN and GENEVE UDP ports */ -#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ -#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ -#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ +#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ +#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ + +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 + +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +/* Ext Flexible Host Filter Table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) +#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100)) -#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 +/* Four Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 -#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 -#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 +/* Six Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6 +/* Eight Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 /* Each Flexible Filter is at most 128 (0x80) bytes in length */ -#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 -#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ -#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ /* Definitions for power management and wakeup registers */ /* Wake Up Control */ -#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ -#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ -#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ /* Wake Up Filter Control */ -#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ -#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ -#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ -#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ - -#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ -#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ -#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ -#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ -#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ -#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ -#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ -#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ -#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ -#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ -#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */ +#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */ +/* Mask for Ext. flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 +#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ /* Wake Up Status */ -#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC -#define IXGBE_WUS_MAG IXGBE_WUFC_MAG -#define IXGBE_WUS_EX IXGBE_WUFC_EX -#define IXGBE_WUS_MC IXGBE_WUFC_MC -#define IXGBE_WUS_BC IXGBE_WUFC_BC -#define IXGBE_WUS_ARP IXGBE_WUFC_ARP -#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 -#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 -#define IXGBE_WUS_MNG IXGBE_WUFC_MNG -#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 -#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 -#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 -#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 -#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 -#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 -#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS - -/* Wake Up Packet Length */ -#define IXGBE_WUPL_LENGTH_MASK 0xFFFF +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS +#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK +/* Proxy Status */ +#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */ +#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */ +#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */ +#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */ +#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */ +#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */ + +/* Proxying Filter Control */ +#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */ +#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */ +#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */ +#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */ +#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */ +#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */ + +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF /* DCB registers */ -#define MAX_TRAFFIC_CLASS 8 -#define X540_TRAFFIC_CLASS 4 -#define DEF_TRAFFIC_CLASS 1 -#define IXGBE_RMCS 0x03D00 -#define IXGBE_DPMCS 0x07F40 -#define IXGBE_PDPMCS 0x0CD00 -#define IXGBE_RUPPBMR 0x050A0 -#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ -#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ -#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8 +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + +/* Power Management */ +/* DMA Coalescing configuration */ +struct ixgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* + * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed. + * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 == + * 87500 bytes [85KB] + */ +#define IXGBE_DMACRXT_10G 0x55 +#define IXGBE_DMACRXT_1G 0x09 +#define IXGBE_DMACRXT_100M 0x01 + +/* DMA Coalescing registers */ +#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */ +#define IXGBE_DMACR 0x02400 /* Control register */ +#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */ +/* DMA Coalescing register fields */ +#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */ +#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */ +#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */ +#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000 +#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16 +#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */ +#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */ +#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */ +#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */ +#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */ + +/* EEE registers */ +#define IXGBE_EEER 0x043A0 /* EEE register */ +#define IXGBE_EEE_STAT 0x04398 /* EEE Status */ +#define IXGBE_EEE_SU 0x04380 /* EEE Set up */ +#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26 +#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */ +#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */ + +/* EEE register fields */ +#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */ +#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */ +#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */ +#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */ +#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */ /* Security Control Registers */ -#define IXGBE_SECTXCTRL 0x08800 -#define IXGBE_SECTXSTAT 0x08804 -#define IXGBE_SECTXBUFFAF 0x08808 -#define IXGBE_SECTXMINIFG 0x08810 -#define IXGBE_SECRXCTRL 0x08D00 -#define IXGBE_SECRXSTAT 0x08D04 +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 /* Security Bit Fields and Masks */ -#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 -#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 -#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 -#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 -#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 -#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 -#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 -#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 -#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 /* LinkSec (MacSec) Registers */ -#define IXGBE_LSECTXCAP 0x08A00 -#define IXGBE_LSECRXCAP 0x08F00 -#define IXGBE_LSECTXCTRL 0x08A04 -#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ -#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ -#define IXGBE_LSECTXSA 0x08A10 -#define IXGBE_LSECTXPN0 0x08A14 -#define IXGBE_LSECTXPN1 0x08A18 -#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ -#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ -#define IXGBE_LSECRXCTRL 0x08F04 -#define IXGBE_LSECRXSCL 0x08F08 -#define IXGBE_LSECRXSCH 0x08F0C -#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ -#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ -#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) -#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ -#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ -#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ -#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ -#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ -#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ -#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ -#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ -#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ -#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ -#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ -#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ -#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ -#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ -#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ -#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ -#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ -#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ -#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ /* LinkSec (MacSec) Bit Fields and Masks */ -#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 -#define IXGBE_LSECTXCAP_SUM_SHIFT 16 -#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 -#define IXGBE_LSECRXCAP_SUM_SHIFT 16 - -#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 -#define IXGBE_LSECTXCTRL_DISABLE 0x0 -#define IXGBE_LSECTXCTRL_AUTH 0x1 -#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 -#define IXGBE_LSECTXCTRL_AISCI 0x00000020 -#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 -#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 - -#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C -#define IXGBE_LSECRXCTRL_EN_SHIFT 2 -#define IXGBE_LSECRXCTRL_DISABLE 0x0 -#define IXGBE_LSECRXCTRL_CHECK 0x1 -#define IXGBE_LSECRXCTRL_STRICT 0x2 -#define IXGBE_LSECRXCTRL_DROP 0x3 -#define IXGBE_LSECRXCTRL_PLSH 0x00000040 -#define IXGBE_LSECRXCTRL_RP 0x00000080 -#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 + +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 /* IpSec Registers */ -#define IXGBE_IPSTXIDX 0x08900 -#define IXGBE_IPSTXSALT 0x08904 -#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXIDX 0x08E00 -#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXSPI 0x08E14 -#define IXGBE_IPSRXIPIDX 0x08E18 -#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXSALT 0x08E2C -#define IXGBE_IPSRXMOD 0x08E30 - -#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 /* DCB registers */ -#define IXGBE_RTRPCS 0x02430 -#define IXGBE_RTTDCS 0x04900 -#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_RTTPCS 0x0CD00 -#define IXGBE_RTRUP2TC 0x03020 -#define IXGBE_RTTUP2TC 0x0C800 -#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDQSEL 0x04904 -#define IXGBE_RTTDT1C 0x04908 -#define IXGBE_RTTDT1S 0x0490C -#define IXGBE_RTTQCNCR 0x08B00 -#define IXGBE_RTTQCNTG 0x04A90 -#define IXGBE_RTTBCNRD 0x0498C -#define IXGBE_RTTQCNRR 0x0498C -#define IXGBE_RTTDTECC 0x04990 -#define IXGBE_RTTDTECC_NO_BCN 0x00000100 -#define IXGBE_RTTBCNRC 0x04984 -#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 + +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 #define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF #define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 -#define IXGBE_RTTBCNRC_RF_INT_MASK \ +#define IXGBE_RTTBCNRC_RF_INT_MASK \ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) -#define IXGBE_RTTBCNRM 0x04980 -#define IXGBE_RTTQCNRM 0x04980 +#define IXGBE_RTTBCNRM 0x04980 +/* FCoE DMA Context Registers */ /* FCoE Direct DMA Context */ #define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) -/* FCoE DMA Context Registers */ -#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ -#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ -#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ -#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ -#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ -#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) -#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */ -#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */ -#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */ -#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ -#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ -#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 -#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 -#define IXGBE_FCBUFF_OFFSET_SHIFT 16 -#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */ -#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */ -#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ -#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ -#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 - +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 /* FCoE SOF/EOF */ -#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ -#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ -#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ -#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */ +#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */ +#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16 /* FCoE Direct Filter Context */ #define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) #define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) -/* FCoE Filter Context Registers */ -#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ -#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ -#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ -#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */ -#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */ -#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ -#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ -#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */ -#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */ -#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */ +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ /* FCoE Receive Control */ -#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ -#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */ -#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */ -#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */ -#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */ -#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */ -#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */ -#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */ -#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */ -#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ -#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 /* FCoE Redirection */ -#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ -#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ -#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ -#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ -#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ -#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ #define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ /* Higher 7 bits for the queue index */ #define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 #define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 /* Stats registers */ -#define IXGBE_CRCERRS 0x04000 -#define IXGBE_ILLERRC 0x04004 -#define IXGBE_ERRBC 0x04008 -#define IXGBE_MSPDC 0x04010 -#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ -#define IXGBE_MLFC 0x04034 -#define IXGBE_MRFC 0x04038 -#define IXGBE_RLEC 0x04040 -#define IXGBE_LXONTXC 0x03F60 -#define IXGBE_LXONRXC 0x0CF60 -#define IXGBE_LXOFFTXC 0x03F68 -#define IXGBE_LXOFFRXC 0x0CF68 -#define IXGBE_LXONRXCNT 0x041A4 -#define IXGBE_LXOFFRXCNT 0x041A8 -#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ -#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ -#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ -#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ -#define IXGBE_PRC64 0x0405C -#define IXGBE_PRC127 0x04060 -#define IXGBE_PRC255 0x04064 -#define IXGBE_PRC511 0x04068 -#define IXGBE_PRC1023 0x0406C -#define IXGBE_PRC1522 0x04070 -#define IXGBE_GPRC 0x04074 -#define IXGBE_BPRC 0x04078 -#define IXGBE_MPRC 0x0407C -#define IXGBE_GPTC 0x04080 -#define IXGBE_GORCL 0x04088 -#define IXGBE_GORCH 0x0408C -#define IXGBE_GOTCL 0x04090 -#define IXGBE_GOTCH 0x04094 -#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ -#define IXGBE_RUC 0x040A4 -#define IXGBE_RFC 0x040A8 -#define IXGBE_ROC 0x040AC -#define IXGBE_RJC 0x040B0 -#define IXGBE_MNGPRC 0x040B4 -#define IXGBE_MNGPDC 0x040B8 -#define IXGBE_MNGPTC 0x0CF90 -#define IXGBE_TORL 0x040C0 -#define IXGBE_TORH 0x040C4 -#define IXGBE_TPR 0x040D0 -#define IXGBE_TPT 0x040D4 -#define IXGBE_PTC64 0x040D8 -#define IXGBE_PTC127 0x040DC -#define IXGBE_PTC255 0x040E0 -#define IXGBE_PTC511 0x040E4 -#define IXGBE_PTC1023 0x040E8 -#define IXGBE_PTC1522 0x040EC -#define IXGBE_MPTC 0x040F0 -#define IXGBE_BPTC 0x040F4 -#define IXGBE_XEC 0x04120 -#define IXGBE_SSVPC 0x08780 - -#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) -#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ (0x08600 + ((_i) * 4))) -#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) - -#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ -#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ -#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ -#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ -#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ -#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ -#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ -#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ -#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ -#define IXGBE_O2BGPTC 0x041C4 -#define IXGBE_O2BSPC 0x087B0 -#define IXGBE_B2OSPC 0x041C0 -#define IXGBE_B2OGPRC 0x02F90 -#define IXGBE_PCRC8ECL 0x0E810 -#define IXGBE_PCRC8ECH 0x0E811 -#define IXGBE_PCRC8ECH_MASK 0x1F -#define IXGBE_LDPCECL 0x0E820 -#define IXGBE_LDPCECH 0x0E821 - -/* MII clause 22/28 definitions */ -#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 - -#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */ -#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ - -#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ - -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ -#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ - -#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ -#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ -#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ -#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ -#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ -#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ -#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ -#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 -#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 -#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ -#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ -#define IXGBE_MII_RESTART 0x200 -#define IXGBE_MII_AUTONEG_COMPLETE 0x20 -#define IXGBE_MII_AUTONEG_LINK_UP 0x04 -#define IXGBE_MII_AUTONEG_REG 0x0 +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ +#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_BUPRC 0x04180 +#define IXGBE_BMPRC 0x04184 +#define IXGBE_BBPRC 0x04188 +#define IXGBE_BUPTC 0x0418C +#define IXGBE_BMPTC 0x04190 +#define IXGBE_BBPTC 0x04194 +#define IXGBE_BCRCERRS 0x04198 +#define IXGBE_BXONRXC 0x0419C +#define IXGBE_BXOFFRXC 0x041E0 +#define IXGBE_BXONTXC 0x041E4 +#define IXGBE_BXOFFTXC 0x041E8 /* Management */ -#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MANC 0x05820 -#define IXGBE_MFVAL 0x05824 -#define IXGBE_MANC2H 0x05860 -#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MIPAF 0x058B0 -#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ -#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ -#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ -#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_LSWFW 0x15014 +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15F14 +#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */ +#define IXGBE_BMCIPVAL 0x05060 +#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001 +#define IXGBE_BMCIP_IPADDR_VALID 0x00000002 /* Management Bit Fields and Masks */ +#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */ #define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ +#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */ +#define IXGBE_MANC_EN_BMC2OS_SHIFT 28 /* Firmware Semaphore Register */ #define IXGBE_FWSM_MODE_MASK 0xE +#define IXGBE_FWSM_TS_ENABLED 0x1 #define IXGBE_FWSM_FW_MODE_PT 0x4 /* ARC Subsystem registers */ -#define IXGBE_HICR 0x15F00 -#define IXGBE_FWSTS 0x15F0C -#define IXGBE_HSMC0R 0x15F04 -#define IXGBE_HSMC1R 0x15F08 -#define IXGBE_SWSR 0x15F10 -#define IXGBE_HFDR 0x15FE8 -#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ - -#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ /* Driver sets this bit when done to put command in RAM */ -#define IXGBE_HICR_C 0x02 -#define IXGBE_HICR_SV 0x04 /* Status Validity */ -#define IXGBE_HICR_FW_RESET_ENABLE 0x40 -#define IXGBE_HICR_FW_RESET 0x80 +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 /* PCI-E registers */ -#define IXGBE_GCR 0x11000 -#define IXGBE_GTV 0x11004 -#define IXGBE_FUNCTAG 0x11008 -#define IXGBE_GLT 0x1100C -#define IXGBE_GSCL_1 0x11010 -#define IXGBE_GSCL_2 0x11014 -#define IXGBE_GSCL_3 0x11018 -#define IXGBE_GSCL_4 0x1101C -#define IXGBE_GSCN_0 0x11020 -#define IXGBE_GSCN_1 0x11024 -#define IXGBE_GSCN_2 0x11028 -#define IXGBE_GSCN_3 0x1102C -#define IXGBE_FACTPS_8259X 0x10150 -#define IXGBE_FACTPS_X540 IXGBE_FACTPS_8259X -#define IXGBE_FACTPS_X550 IXGBE_FACTPS_8259X -#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS_8259X +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_PCIEPIPEADR 0x11004 +#define IXGBE_PCIEPIPEDAT 0x11008 +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1 +#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0 +#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1 +#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2 +#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3 +#define IXGBE_FACTPS 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS +#define IXGBE_GSCL_1_X550 0x11800 +#define IXGBE_GSCL_2_X550 0x11804 +#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550 0x11820 +#define IXGBE_GSCN_1_X550 0x11824 +#define IXGBE_GSCN_2_X550 0x11828 +#define IXGBE_GSCN_3_X550 0x1182C +#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550 +#define IXGBE_FACTPS_X550 IXGBE_FACTPS +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS +#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550 #define IXGBE_FACTPS_X550EM_a 0x15FEC -#define IXGBE_FACTPS(_hw) IXGBE_BY_MAC((_hw), FACTPS) +#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS) -#define IXGBE_PCIEANACTL 0x11040 -#define IXGBE_SWSM_8259X 0x10140 -#define IXGBE_SWSM_X540 IXGBE_SWSM_8259X -#define IXGBE_SWSM_X550 IXGBE_SWSM_8259X -#define IXGBE_SWSM_X550EM_x IXGBE_SWSM_8259X +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM +#define IXGBE_SWSM_X550 IXGBE_SWSM +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM #define IXGBE_SWSM_X550EM_a 0x15F70 -#define IXGBE_SWSM(_hw) IXGBE_BY_MAC((_hw), SWSM) -#define IXGBE_FWSM_8259X 0x10148 -#define IXGBE_FWSM_X540 IXGBE_FWSM_8259X -#define IXGBE_FWSM_X550 IXGBE_FWSM_8259X -#define IXGBE_FWSM_X550EM_x IXGBE_FWSM_8259X +#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM) + +#define IXGBE_FWSM 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM +#define IXGBE_FWSM_X550 IXGBE_FWSM +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM #define IXGBE_FWSM_X550EM_a 0x15F74 -#define IXGBE_FWSM(_hw) IXGBE_BY_MAC((_hw), FWSM) -#define IXGBE_GSSR 0x10160 -#define IXGBE_MREVID 0x11064 -#define IXGBE_DCA_ID 0x11070 -#define IXGBE_DCA_CTRL 0x11074 -#define IXGBE_SWFW_SYNC_8259X IXGBE_GSSR -#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC_8259X -#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC_8259X -#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC_8259X +#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM) + +#define IXGBE_SWFW_SYNC IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC #define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 -#define IXGBE_SWFW_SYNC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) - -/* PCIe registers 82599-specific */ -#define IXGBE_GCR_EXT 0x11050 -#define IXGBE_GSCL_5_82599 0x11030 -#define IXGBE_GSCL_6_82599 0x11034 -#define IXGBE_GSCL_7_82599 0x11038 -#define IXGBE_GSCL_8_82599 0x1103C -#define IXGBE_PHYADR_82599 0x11040 -#define IXGBE_PHYDAT_82599 0x11044 -#define IXGBE_PHYCTL_82599 0x11048 -#define IXGBE_PBACLR_82599 0x11068 - -#define IXGBE_CIAA_8259X 0x11088 -#define IXGBE_CIAA_X540 IXGBE_CIAA_8259X +#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) + +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 + +/* PCI-E registers 82599-Specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599 +#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599 +#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599 +#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599 +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_CIAA 0x11088 +#define IXGBE_CIAD 0x1108C +#define IXGBE_CIAA_82599 IXGBE_CIAA +#define IXGBE_CIAD_82599 IXGBE_CIAD +#define IXGBE_CIAA_X540 IXGBE_CIAA +#define IXGBE_CIAD_X540 IXGBE_CIAD +#define IXGBE_GSCL_5_X550 0x11810 +#define IXGBE_GSCL_6_X550 0x11814 +#define IXGBE_GSCL_7_X550 0x11818 +#define IXGBE_GSCL_8_X550 0x1181C +#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550 #define IXGBE_CIAA_X550 0x11508 -#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 -#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 -#define IXGBE_CIAA(_hw) IXGBE_BY_MAC((_hw), CIAA) - -#define IXGBE_CIAD_8259X 0x1108C -#define IXGBE_CIAD_X540 IXGBE_CIAD_8259X #define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 #define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550 +#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 #define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 -#define IXGBE_CIAD(_hw) IXGBE_BY_MAC((_hw), CIAD) - -#define IXGBE_PICAUSE 0x110B0 -#define IXGBE_PIENA 0x110B8 -#define IXGBE_CDQ_MBR_82599 0x110B4 -#define IXGBE_PCIESPARE 0x110BC -#define IXGBE_MISC_REG_82599 0x110F0 -#define IXGBE_ECC_CTRL_0_82599 0x11100 -#define IXGBE_ECC_CTRL_1_82599 0x11104 -#define IXGBE_ECC_STATUS_82599 0x110E0 -#define IXGBE_BAR_CTRL_82599 0x110F4 +#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) +#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD) +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 /* PCI Express Control */ -#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 -#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 -#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 -#define IXGBE_GCR_CAP_VER2 0x00040000 - -#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 -#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 -#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 -#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 -#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 -#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ IXGBE_GCR_EXT_VT_MODE_64) - +#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003 /* Time Sync Registers */ -#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ -#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ -#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ -#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ -#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ -#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ -#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ -#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ -#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ -#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ -#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ -#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ -#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ -#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ -#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ -#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ -#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ -#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ -#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ -#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ -#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ -#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ -#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ -#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ -#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ -#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ -#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ -#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ -#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ +#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */ +#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ /* Diagnostic Registers */ -#define IXGBE_RDSTATCTL 0x02C20 -#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ -#define IXGBE_RDHMPN 0x02F08 -#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) -#define IXGBE_RDPROBE 0x02F20 -#define IXGBE_RDMAM 0x02F30 -#define IXGBE_RDMAD 0x02F34 -#define IXGBE_TDSTATCTL 0x07C20 -#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ -#define IXGBE_TDHMPN 0x07F08 -#define IXGBE_TDHMPN2 0x082FC -#define IXGBE_TXDESCIC 0x082CC -#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) -#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) -#define IXGBE_TDPROBE 0x07F20 -#define IXGBE_TXBUFCTRL 0x0C600 -#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_RXBUFCTRL 0x03600 -#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_RFVAL 0x050A4 -#define IXGBE_MDFTC1 0x042B8 -#define IXGBE_MDFTC2 0x042C0 -#define IXGBE_MDFTFIFO1 0x042C4 -#define IXGBE_MDFTFIFO2 0x042C8 -#define IXGBE_MDFTS 0x042CC -#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ -#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ -#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ -#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ -#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ -#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ -#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ -#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ -#define IXGBE_PCIEECCCTL 0x1106C -#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ -#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ -#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ -#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ -#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ -#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ -#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ -#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ -#define IXGBE_PCIEECCCTL0 0x11100 -#define IXGBE_PCIEECCCTL1 0x11104 -#define IXGBE_RXDBUECC 0x03F70 -#define IXGBE_TXDBUECC 0x0CF70 -#define IXGBE_RXDBUEST 0x03F74 -#define IXGBE_TXDBUEST 0x0CF74 -#define IXGBE_PBTXECC 0x0C300 -#define IXGBE_PBRXECC 0x03300 -#define IXGBE_GHECCR 0x110B0 +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 /* MAC Registers */ -#define IXGBE_PCS1GCFIG 0x04200 -#define IXGBE_PCS1GLCTL 0x04208 -#define IXGBE_PCS1GLSTA 0x0420C -#define IXGBE_PCS1GDBG0 0x04210 -#define IXGBE_PCS1GDBG1 0x04214 -#define IXGBE_PCS1GANA 0x04218 -#define IXGBE_PCS1GANLP 0x0421C -#define IXGBE_PCS1GANNP 0x04220 -#define IXGBE_PCS1GANLPNP 0x04224 -#define IXGBE_HLREG0 0x04240 -#define IXGBE_HLREG1 0x04244 -#define IXGBE_PAP 0x04248 -#define IXGBE_MACA 0x0424C -#define IXGBE_APAE 0x04250 -#define IXGBE_ARD 0x04254 -#define IXGBE_AIS 0x04258 -#define IXGBE_MSCA 0x0425C -#define IXGBE_MSRWD 0x04260 -#define IXGBE_MLADD 0x04264 -#define IXGBE_MHADD 0x04268 -#define IXGBE_MAXFRS 0x04268 -#define IXGBE_TREG 0x0426C -#define IXGBE_PCSS1 0x04288 -#define IXGBE_PCSS2 0x0428C -#define IXGBE_XPCSS 0x04290 -#define IXGBE_MFLCN 0x04294 -#define IXGBE_SERDESC 0x04298 -#define IXGBE_MAC_SGMII_BUSY 0x04298 -#define IXGBE_MACS 0x0429C -#define IXGBE_AUTOC 0x042A0 -#define IXGBE_LINKS 0x042A4 -#define IXGBE_LINKS2 0x04324 -#define IXGBE_AUTOC2 0x042A8 -#define IXGBE_AUTOC3 0x042AC -#define IXGBE_ANLP1 0x042B0 -#define IXGBE_ANLP2 0x042B4 -#define IXGBE_MACC 0x04330 -#define IXGBE_ATLASCTL 0x04800 -#define IXGBE_MMNGC 0x042D0 -#define IXGBE_ANLPNP1 0x042D4 -#define IXGBE_ANLPNP2 0x042D8 -#define IXGBE_KRPCSFC 0x042E0 -#define IXGBE_KRPCSS 0x042E4 -#define IXGBE_FECS1 0x042E8 -#define IXGBE_FECS2 0x042EC -#define IXGBE_SMADARCTL 0x14F10 -#define IXGBE_MPVC 0x04318 -#define IXGBE_SGMIIC 0x04314 +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 /* Statistics Registers */ -#define IXGBE_RXNFGPC 0x041B0 -#define IXGBE_RXNFGBCL 0x041B4 -#define IXGBE_RXNFGBCH 0x041B8 -#define IXGBE_RXDGPC 0x02F50 -#define IXGBE_RXDGBCL 0x02F54 -#define IXGBE_RXDGBCH 0x02F58 -#define IXGBE_RXDDGPC 0x02F5C -#define IXGBE_RXDDGBCL 0x02F60 -#define IXGBE_RXDDGBCH 0x02F64 -#define IXGBE_RXLPBKGPC 0x02F68 -#define IXGBE_RXLPBKGBCL 0x02F6C -#define IXGBE_RXLPBKGBCH 0x02F70 -#define IXGBE_RXDLPBKGPC 0x02F74 -#define IXGBE_RXDLPBKGBCL 0x02F78 -#define IXGBE_RXDLPBKGBCH 0x02F7C -#define IXGBE_TXDGPC 0x087A0 -#define IXGBE_TXDGBCL 0x087A4 -#define IXGBE_TXDGBCH 0x087A8 - -#define IXGBE_RXDSTATCTRL 0x02F40 +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 /* Copper Pond 2 link timeout */ #define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 /* Omer CORECTL */ -#define IXGBE_CORECTL 0x014F00 +#define IXGBE_CORECTL 0x014F00 /* BARCTRL */ -#define IXGBE_BARCTRL 0x110F4 -#define IXGBE_BARCTRL_FLSIZE 0x0700 -#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 -#define IXGBE_BARCTRL_CSRSIZE 0x2000 +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 /* RSCCTL Bit Masks */ -#define IXGBE_RSCCTL_RSCEN 0x01 -#define IXGBE_RSCCTL_MAXDESC_1 0x00 -#define IXGBE_RSCCTL_MAXDESC_4 0x04 -#define IXGBE_RSCCTL_MAXDESC_8 0x08 -#define IXGBE_RSCCTL_MAXDESC_16 0x0C +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C +#define IXGBE_RSCCTL_TS_DIS 0x02 /* RSCDBU Bit Masks */ -#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F -#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 /* RDRXCTL Bit Masks */ -#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ -#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ -#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad small packet */ -#define IXGBE_RDRXCTL_MVMEN 0x00000020 -#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ -#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ -#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ -#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ -#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ -#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ -#define IXGBE_RDRXCTL_MBINTEN 0x10000000 -#define IXGBE_RDRXCTL_MDP_EN 0x20000000 +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080 +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 /* RQTC Bit Masks and Shifts */ -#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) -#define IXGBE_RQTC_TC0_MASK (0x7 << 0) -#define IXGBE_RQTC_TC1_MASK (0x7 << 4) -#define IXGBE_RQTC_TC2_MASK (0x7 << 8) -#define IXGBE_RQTC_TC3_MASK (0x7 << 12) -#define IXGBE_RQTC_TC4_MASK (0x7 << 16) -#define IXGBE_RQTC_TC5_MASK (0x7 << 20) -#define IXGBE_RQTC_TC6_MASK (0x7 << 24) -#define IXGBE_RQTC_TC7_MASK (0x7 << 28) +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) /* PSRTYPE.RQPL Bit masks and shift */ -#define IXGBE_PSRTYPE_RQPL_MASK 0x7 -#define IXGBE_PSRTYPE_RQPL_SHIFT 29 +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 /* CTRL Bit Masks */ -#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ -#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ -#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) /* FACTPS */ -#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ -#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ +#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ /* MHADD Bit Masks */ -#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 -#define IXGBE_MHADD_MFS_SHIFT 16 +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 /* Extended Device Control */ -#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ -#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ -#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ -#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ /* Direct Cache Access (DCA) definitions */ -#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ -#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ - -#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ -#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ - -#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ -#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ - -#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ -#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ -#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ /* MSCA Bit Masks */ -#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ -#define IXGBE_MSCA_NP_ADDR_SHIFT 0 -#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ -#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ -#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ -#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ -#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ -#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ -#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ -#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ -#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ -#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ -#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ -#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ -#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ -#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ -#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ -#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */ /* MSRWD bit masks */ -#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF -#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 -#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 -#define IXGBE_MSRWD_READ_DATA_SHIFT 16 +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 /* Atlas registers */ -#define IXGBE_ATLAS_PDN_LPBK 0x24 -#define IXGBE_ATLAS_PDN_10G 0xB -#define IXGBE_ATLAS_PDN_1G 0xC -#define IXGBE_ATLAS_PDN_AN 0xD +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD /* Atlas bit masks */ -#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 -#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 -#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 -#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 -#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 /* Omer bit masks */ -#define IXGBE_CORECTL_WRITE_CMD 0x00010000 - -/* MDIO definitions */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 +/* Device Type definitions for new protocol MDIO commands */ #define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 -#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ #define IXGBE_TWINAX_DEV 1 -#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 #define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ #define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ #define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ #define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ #define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ #define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ - -#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ +#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */ +#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */ +#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */ +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ +#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ #define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ -#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT */ -#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ -#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */ +#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */ +#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ #define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ -#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ #define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ #define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ @@ -1363,251 +1589,300 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ #define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ #define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ -#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* global fault msg */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */ #define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ #define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ -/* autoneg vendor alarm int enable */ -#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ #define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ #define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ -#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /*int dev fault enable */ - +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */ +#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ -#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ -#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ -#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ -#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */ + +#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */ +#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */ +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */ +#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */ /* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ #define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ -#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ -#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ -#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ -#define IXGBE_MII_AUTONEG_REG 0x0 +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 -#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 -#define IXGBE_MAX_PHY_ADDR 32 +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 /* PHY IDs*/ -#define TN1010_PHY_ID 0x00A19410 -#define TNX_FW_REV 0xB -#define X540_PHY_ID 0x01540200 +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 #define X550_PHY_ID2 0x01540223 #define X550_PHY_ID3 0x01540221 -#define X557_PHY_ID 0x01540240 +#define X557_PHY_ID 0x01540240 #define X557_PHY_ID2 0x01540250 -#define QT2022_PHY_ID 0x0043A400 -#define ATH_PHY_ID 0x03429050 -#define AQ_FW_REV 0x20 +#define AQ_FW_REV 0x20 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 /* PHY Types */ -#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 +#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0 +#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0 /* Special PHY Init Routine */ -#define IXGBE_PHY_INIT_OFFSET_NL 0x002B -#define IXGBE_PHY_INIT_END_NL 0xFFFF -#define IXGBE_CONTROL_MASK_NL 0xF000 -#define IXGBE_DATA_MASK_NL 0x0FFF -#define IXGBE_CONTROL_SHIFT_NL 12 -#define IXGBE_DELAY_NL 0 -#define IXGBE_DATA_NL 1 -#define IXGBE_CONTROL_NL 0x000F -#define IXGBE_CONTROL_EOL_NL 0x0FFF -#define IXGBE_CONTROL_SOL_NL 0x0000 +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 /* General purpose Interrupt Enable */ -#define IXGBE_SDP0_GPIEN_8259X 0x00000001 /* SDP0 */ -#define IXGBE_SDP1_GPIEN_8259X 0x00000002 /* SDP1 */ -#define IXGBE_SDP2_GPIEN_8259X 0x00000004 /* SDP2 */ -#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ -#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ -#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ -#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 -#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 -#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 #define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 #define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 #define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 #define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 #define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 #define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 -#define IXGBE_SDP0_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) -#define IXGBE_SDP1_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) -#define IXGBE_SDP2_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) - -#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ -#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ -#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ -#define IXGBE_GPIE_EIAME 0x40000000 -#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 -#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 -#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ -#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ -#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ -#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ +#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) + +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ /* Packet Buffer Initialization */ -#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ -#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ -#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ -#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ -#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ -#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ -#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/ -#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ - -#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_MAX_PACKET_BUFFERS 8 + +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ #define IXGBE_MAX_PB 8 /* Packet buffer allocation strategies */ enum { - PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ #define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL - PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ #define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED }; /* Transmit Flow Control status */ -#define IXGBE_TFCS_TXOFF 0x00000001 -#define IXGBE_TFCS_TXOFF0 0x00000100 -#define IXGBE_TFCS_TXOFF1 0x00000200 -#define IXGBE_TFCS_TXOFF2 0x00000400 -#define IXGBE_TFCS_TXOFF3 0x00000800 -#define IXGBE_TFCS_TXOFF4 0x00001000 -#define IXGBE_TFCS_TXOFF5 0x00002000 -#define IXGBE_TFCS_TXOFF6 0x00004000 -#define IXGBE_TFCS_TXOFF7 0x00008000 +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 /* TCP Timer */ -#define IXGBE_TCPTIMER_KS 0x00000100 -#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 -#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 -#define IXGBE_TCPTIMER_LOOP 0x00000800 -#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF /* HLREG0 Bit Masks */ -#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ -#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ -#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ -#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ -#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ -#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ -#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ -#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ -#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ -#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ -#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ -#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ -#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ -#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ -#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ /* VMD_CTL bitmasks */ -#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 -#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 /* VT_CTL bitmasks */ -#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ -#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ -#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ -#define IXGBE_VT_CTL_POOL_SHIFT 7 -#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) /* VMOLR bitmasks */ #define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ #define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ -#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ -#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ -#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ -#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ -#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ /* VFRE bitmask */ -#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ /* RDHMPN and TDHMPN bitmasks */ -#define IXGBE_RDHMPN_RDICADDR 0x007FF800 -#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 -#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 -#define IXGBE_TDHMPN_TDICADDR 0x003FF800 -#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 -#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 - -#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 -#define IXGBE_RDMAM_DWORD_SHIFT 9 -#define IXGBE_RDMAM_DESC_COMP_FIFO 1 -#define IXGBE_RDMAM_DFC_CMD_FIFO 2 -#define IXGBE_RDMAM_TCN_STATUS_RAM 4 -#define IXGBE_RDMAM_WB_COLL_FIFO 5 -#define IXGBE_RDMAM_QSC_CNT_RAM 6 -#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 -#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA -#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 -#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 -#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 -#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 -#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 -#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 -#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 -#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 -#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 -#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 -#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 -#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 -#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 -#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 - -#define IXGBE_TXDESCIC_READY 0x80000000 +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_FCOE_RAM 7 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_QSC_RSC_RAM 0xB +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 +#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 +#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 +#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 + +#define IXGBE_TXDESCIC_READY 0x80000000 /* Receive Checksum Control */ -#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ -#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* FCRTL Bit Masks */ -#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ -#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ /* PAP bit masks*/ -#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ /* RMCS Bit Masks */ -#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */ /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ -#define IXGBE_RMCS_RAC 0x00000004 -#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ -#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ -#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ -#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ +#define IXGBE_RMCS_RAC 0x00000004 +/* Deficit Fixed Prio ena */ +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ /* FCCFG Bit Masks */ -#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ -#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ /* Interrupt register bitmasks */ /* Extended Interrupt Cause Read */ -#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ -#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ -#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ -#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ -#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ -#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ -#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ -#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ -#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ -#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ -#define IXGBE_EICR_GPI_SDP0_8259X 0x01000000 /* Gen Purpose INT on SDP0 */ -#define IXGBE_EICR_GPI_SDP1_8259X 0x02000000 /* Gen Purpose INT on SDP1 */ -#define IXGBE_EICR_GPI_SDP2_8259X 0x04000000 /* Gen Purpose INT on SDP2 */ -#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 -#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 -#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */ #define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 #define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 #define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 @@ -1617,361 +1892,373 @@ enum { #define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 #define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 #define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 -#define IXGBE_EICR_GPI_SDP0(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) -#define IXGBE_EICR_GPI_SDP1(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) -#define IXGBE_EICR_GPI_SDP2(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) +#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) -#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ -#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ -#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ -#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ -#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ /* Extended Interrupt Cause Set */ -#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ -#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) -#define IXGBE_EICS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) -#define IXGBE_EICS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) -#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ -#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ /* Extended Interrupt Mask Set */ -#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ -#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ -#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) -#define IXGBE_EIMS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) -#define IXGBE_EIMS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) -#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ -#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ /* Extended Interrupt Mask Clear */ -#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ -#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) -#define IXGBE_EIMC_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) -#define IXGBE_EIMC_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) -#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ -#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ #define IXGBE_EIMS_ENABLE_MASK ( \ - IXGBE_EIMS_RTX_QUEUE | \ - IXGBE_EIMS_LSC | \ - IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ IXGBE_EIMS_OTHER) /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ -#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ -#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ -#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ -#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ -#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ -#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ -#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ -#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ -#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ -#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ -#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ -#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ -#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ -#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ -#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ -#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ -#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ -#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ -#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ -#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ -#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ -#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ -#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ - -#define IXGBE_MAX_FTQF_FILTERS 128 -#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 -#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 -#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 -#define IXGBE_FTQF_PROTOCOL_SCTP 2 -#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 -#define IXGBE_FTQF_PRIORITY_SHIFT 2 -#define IXGBE_FTQF_POOL_MASK 0x0000003F -#define IXGBE_FTQF_POOL_SHIFT 8 -#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F -#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 -#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E -#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D -#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B -#define IXGBE_FTQF_DEST_PORT_MASK 0x17 -#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F -#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 -#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 /* Interrupt clear mask */ -#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF /* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_REG_NUM 25 -#define IXGBE_IVAR_REG_NUM_82599 64 -#define IXGBE_IVAR_TXRX_ENTRY 96 -#define IXGBE_IVAR_RX_ENTRY 64 -#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) -#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) -#define IXGBE_IVAR_TX_ENTRY 32 +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 -#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ -#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ -#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ /* ETYPE Queue Filter/Select Bit Masks */ -#define IXGBE_MAX_ETQF_FILTERS 8 -#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ -#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ -#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ -#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ -#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ -#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ #define IXGBE_ETQF_POOL_SHIFT 20 -#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ -#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 -#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ -#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ /* * ETQF filter list: one static filter per filter consumer. This is - * to avoid filter collisions later. Add new filters - * here!! + * to avoid filter collisions later. Add new filters + * here!! * * Current filters: - * EAPOL 802.1x (0x888e): Filter 0 - * FCoE (0x8906): Filter 2 - * 1588 (0x88f7): Filter 3 - * FIP (0x8914): Filter 4 - * LLDP (0x88CC): Filter 5 - * LACP (0x8809): Filter 6 - * FC (0x8808): Filter 7 + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 */ -#define IXGBE_ETQF_FILTER_EAPOL 0 -#define IXGBE_ETQF_FILTER_FCOE 2 -#define IXGBE_ETQF_FILTER_1588 3 -#define IXGBE_ETQF_FILTER_FIP 4 -#define IXGBE_ETQF_FILTER_LLDP 5 -#define IXGBE_ETQF_FILTER_LACP 6 -#define IXGBE_ETQF_FILTER_FC 7 - +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +#define IXGBE_ETQF_FILTER_LLDP 5 +#define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 /* VLAN Control Bit Masks */ -#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ -#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ -#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ -#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ -#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ /* VLAN pool filtering masks */ -#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ -#define IXGBE_VLVF_ENTRIES 64 -#define IXGBE_VLVF_VLANID_MASK 0x00000FFF - +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF /* Per VF Port VLAN insertion rules */ -#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ -#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ -#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ /* STATUS Bit Masks */ -#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ -#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ -#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */ -#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ -#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ /* ESDP Bit Masks */ -#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ -#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ -#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ -#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ -#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ -#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ -#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ -#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ -#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ -#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ -#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ -#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ -#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */ +#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */ +#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */ +#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */ +#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ + /* LEDCTL Bit Masks */ -#define IXGBE_LED_IVRT_BASE 0x00000040 -#define IXGBE_LED_BLINK_BASE 0x00000080 -#define IXGBE_LED_MODE_MASK_BASE 0x0000000F -#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) -#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i)) -#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) -#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) -#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) -#define IXGBE_X557_LED_MANUAL_SET_MASK BIT(8) +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8) #define IXGBE_X557_MAX_LED_INDEX 3 #define IXGBE_X557_LED_PROVISIONING 0xC430 /* LED modes */ -#define IXGBE_LED_LINK_UP 0x0 -#define IXGBE_LED_LINK_10G 0x1 -#define IXGBE_LED_MAC 0x2 -#define IXGBE_LED_FILTER 0x3 -#define IXGBE_LED_LINK_ACTIVE 0x4 -#define IXGBE_LED_LINK_1G 0x5 -#define IXGBE_LED_ON 0xE -#define IXGBE_LED_OFF 0xF +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF /* AUTOC Bit Masks */ #define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 -#define IXGBE_AUTOC_KX4_SUPP 0x80000000 -#define IXGBE_AUTOC_KX_SUPP 0x40000000 -#define IXGBE_AUTOC_PAUSE 0x30000000 -#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 -#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 -#define IXGBE_AUTOC_RF 0x08000000 -#define IXGBE_AUTOC_PD_TMR 0x06000000 -#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 -#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 -#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 -#define IXGBE_AUTOC_FECA 0x00040000 -#define IXGBE_AUTOC_FECR 0x00020000 -#define IXGBE_AUTOC_KR_SUPP 0x00010000 -#define IXGBE_AUTOC_AN_RESTART 0x00001000 -#define IXGBE_AUTOC_FLU 0x00000001 -#define IXGBE_AUTOC_LMS_SHIFT 13 -#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) - -#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 -#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 -#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 -#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 -#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) - -#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 -#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 -#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 -#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 -#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 - -#define IXGBE_MACC_FLU 0x00000001 -#define IXGBE_MACC_FSV_10G 0x00030000 -#define IXGBE_MACC_FS 0x00040000 -#define IXGBE_MAC_RX2TX_LPBK 0x00000002 - -/* Veto Bit definition */ -#define IXGBE_MMNGC_MNG_VETO 0x00000001 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 +#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 + +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* Veto Bit definiton */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 /* LINKS Bit Masks */ -#define IXGBE_LINKS_KX_AN_COMP 0x80000000 -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED 0x20000000 -#define IXGBE_LINKS_MODE 0x18000000 -#define IXGBE_LINKS_RX_MODE 0x06000000 -#define IXGBE_LINKS_TX_MODE 0x01800000 -#define IXGBE_LINKS_XGXS_EN 0x00400000 -#define IXGBE_LINKS_SGMII_EN 0x02000000 -#define IXGBE_LINKS_PCS_1G_EN 0x00200000 -#define IXGBE_LINKS_1G_AN_EN 0x00100000 -#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 -#define IXGBE_LINKS_1G_SYNC 0x00040000 -#define IXGBE_LINKS_10G_ALIGN 0x00020000 -#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 -#define IXGBE_LINKS_TL_FAULT 0x00001000 -#define IXGBE_LINKS_SIGNAL 0x00000F00 - -#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 -#define IXGBE_LINKS_SPEED_100_82599 0x10000000 -#define IXGBE_LINKS_SPEED_10_X550EM_A 0 -#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ -#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ - -#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 /* PCS1GLSTA Bit Masks */ -#define IXGBE_PCS1GLSTA_LINK_OK 1 -#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 -#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 -#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 -#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 -#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 -#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 -#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 -#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 /* PCS1GLCTL Bit Masks */ -#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ -#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 -#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 -#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 -#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 -#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 /* ANLP1 Bit Masks */ -#define IXGBE_ANLP1_PAUSE 0x0C00 -#define IXGBE_ANLP1_SYM_PAUSE 0x0400 -#define IXGBE_ANLP1_ASM_PAUSE 0x0800 -#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 /* SW Semaphore Register bitmasks */ -#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ -#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ /* SW_FW_SYNC/GSSR definitions */ #define IXGBE_GSSR_EEP_SM 0x0001 @@ -1982,40 +2269,43 @@ enum { #define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 #define IXGBE_GSSR_SW_MNG_SM 0x0400 #define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ -#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ -#define IXGBE_GSSR_I2C_MASK 0x1800 -#define IXGBE_GSSR_NVM_PHY_MASK 0xF +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF /* FW Status register bitmask */ -#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ /* EEC Register */ -#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ -#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ -#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ -#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ -#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ -#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ -#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ -#define IXGBE_EEC_FWE_SHIFT 4 -#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ -#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ -#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ -#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ -#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ -#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ -#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ /* EEPROM Addressing bits based on type (0-small, 1-large) */ -#define IXGBE_EEC_ADDR_SIZE 0x00000400 -#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ -#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ -#define IXGBE_EEC_SIZE_SHIFT 11 -#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 -#define IXGBE_EEPROM_OPCODE_BITS 8 +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define IXGBE_FLA_LOCKED 0x00000040 /* Part Number String Length */ -#define IXGBE_PBANUM_LENGTH 11 +#define IXGBE_PBANUM_LENGTH 11 /* Checksum and EEPROM pointers */ #define IXGBE_PBANUM_PTR_GUARD 0xFAFA @@ -2046,139 +2336,154 @@ enum { #define IXGBE_FW_PTR 0x0F #define IXGBE_PBANUM0_PTR 0x15 #define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_ALT_MAC_ADDR_PTR 0x37 #define IXGBE_FREE_SPACE_PTR 0X3E /* External Thermal Sensor Config */ -#define IXGBE_ETS_CFG 0x26 -#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 -#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 -#define IXGBE_ETS_TYPE_MASK 0x0038 -#define IXGBE_ETS_TYPE_SHIFT 3 -#define IXGBE_ETS_TYPE_EMC 0x000 -#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000 -#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 -#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 -#define IXGBE_ETS_DATA_LOC_SHIFT 10 -#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 -#define IXGBE_ETS_DATA_INDEX_SHIFT 8 -#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF - -#define IXGBE_SAN_MAC_ADDR_PTR 0x28 -#define IXGBE_DEVICE_CAPS 0x2C -#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 -#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_ETS_CFG 0x26 +#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 +#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 +#define IXGBE_ETS_TYPE_MASK 0x0038 +#define IXGBE_ETS_TYPE_SHIFT 3 +#define IXGBE_ETS_TYPE_EMC 0x000 +#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 +#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 +#define IXGBE_ETS_DATA_LOC_SHIFT 10 +#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 +#define IXGBE_ETS_DATA_INDEX_SHIFT 8 +#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF + +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04 + +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 #define IXGBE_MAX_MSIX_VECTORS_82599 0x40 -#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 #define IXGBE_MAX_MSIX_VECTORS_82598 0x13 /* MSI-X capability fields masks */ -#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF /* Legacy EEPROM word offsets */ -#define IXGBE_ISCSI_BOOT_CAPS 0x0033 -#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 -#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 /* EEPROM Commands - SPI */ -#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ -#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 -#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ -#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ -#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ -#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ /* EEPROM reset Write Enable latch */ -#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 -#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ -#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ -#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ -#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ -#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ /* EEPROM Read Register */ -#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ -#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ -#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ -#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ - -#define NVM_INIT_CTRL_3 0x38 -#define NVM_INIT_CTRL_3_LPLU 0x8 -#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 -#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 - -#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 -#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ -#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ - -#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ -#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define IXGBE_EEPROM_CCD_BIT 2 #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS -#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ #endif #ifndef IXGBE_EERD_EEWR_ATTEMPTS /* Number of 5 microseconds we wait for EERD read and * EERW write to complete */ -#define IXGBE_EERD_EEWR_ATTEMPTS 100000 +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 #endif #ifndef IXGBE_FLUDONE_ATTEMPTS /* # attempts we wait for flush update to complete */ -#define IXGBE_FLUDONE_ATTEMPTS 20000 +#define IXGBE_FLUDONE_ATTEMPTS 20000 #endif -#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ -#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ -#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ -#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ - -#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 -#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 -#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 -#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 -#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7) -#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 -#define IXGBE_FW_LESM_STATE_1 0x1 -#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ -#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 -#define IXGBE_FW_PATCH_VERSION_4 0x7 -#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ -#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ -#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ -#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ -#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ -#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ -#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ - -#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ -#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ -#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7) +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ + +/* FW header offset */ +#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_X540_FW_MODULE_MASK 0x7FFF +/* 4KB multiplier */ +#define IXGBE_X540_FW_MODULE_LENGTH 0x1000 +/* version word 2 (month & day) */ +#define IXGBE_X540_FW_PATCH_VERSION_2 0x5 +/* version word 3 (silicon compatibility & year) */ +#define IXGBE_X540_FW_PATCH_VERSION_3 0x6 +/* version word 4 (major & minor numbers) */ +#define IXGBE_X540_FW_PATCH_VERSION_4 0x7 + +#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ /* PCI Bus Info */ -#define IXGBE_PCI_DEVICE_STATUS 0xAA -#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 -#define IXGBE_PCI_LINK_STATUS 0xB2 -#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 -#define IXGBE_PCI_LINK_WIDTH 0x3F0 -#define IXGBE_PCI_LINK_WIDTH_1 0x10 -#define IXGBE_PCI_LINK_WIDTH_2 0x20 -#define IXGBE_PCI_LINK_WIDTH_4 0x40 -#define IXGBE_PCI_LINK_WIDTH_8 0x80 -#define IXGBE_PCI_LINK_SPEED 0xF -#define IXGBE_PCI_LINK_SPEED_2500 0x1 -#define IXGBE_PCI_LINK_SPEED_5000 0x2 -#define IXGBE_PCI_LINK_SPEED_8000 0x3 -#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E -#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 -#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_LINK_SPEED_8000 0x3 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 #define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf #define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 @@ -2194,57 +2499,73 @@ enum { /* Number of 100 microseconds we wait for PCI Express master disable */ #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + /* RAH */ -#define IXGBE_RAH_VIND_MASK 0x003C0000 -#define IXGBE_RAH_VIND_SHIFT 18 -#define IXGBE_RAH_AV 0x80000000 -#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF /* Header split receive */ -#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 -#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E -#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 #define IXGBE_RFCTL_RSC_DIS 0x00000020 -#define IXGBE_RFCTL_NFSW_DIS 0x00000040 -#define IXGBE_RFCTL_NFSR_DIS 0x00000080 -#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 -#define IXGBE_RFCTL_NFS_VER_SHIFT 8 -#define IXGBE_RFCTL_NFS_VER_2 0 -#define IXGBE_RFCTL_NFS_VER_3 1 -#define IXGBE_RFCTL_NFS_VER_4 2 -#define IXGBE_RFCTL_IPV6_DIS 0x00000400 -#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 -#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 -#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 -#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 /* Transmit Config masks */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ -#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ /* Enable short packet padding to 64 bytes */ -#define IXGBE_TX_PAD_ENABLE 0x00000400 -#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ /* This allows for 16K packets + 4k for vlan */ -#define IXGBE_MAX_FRAME_SZ 0x40040000 +#define IXGBE_MAX_FRAME_SZ 0x40040000 -#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ -#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ /* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ - -#define IXGBE_TSAUXC_EN_CLK 0x00000004 -#define IXGBE_TSAUXC_SYNCLK 0x00000008 -#define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ + +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_EN_TT0 0x00000001 +#define IXGBE_TSAUXC_EN_TT1 0x00000002 +#define IXGBE_TSAUXC_ST0 0x00000010 #define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 +#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0 +#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080 +#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100 + #define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ @@ -2257,8 +2578,15 @@ enum { #define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ #define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ +#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */ +#define IXGBE_TSIM_SYS_WRAP 0x00000001 #define IXGBE_TSIM_TXTS 0x00000002 +#define IXGBE_TSIM_TADJ 0x00000080 + +#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP +#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS +#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ #define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF #define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 @@ -2267,61 +2595,59 @@ enum { #define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 #define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 -#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 -#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 -#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 -#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 -#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 -#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 -#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 -#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 -#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 -#define IXGBE_RXMTRL_V2_SIGNALING_MSG 0x0C00 -#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 - -#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ -#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ -#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ -#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ -#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ -#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ /* Receive Priority Flow Control Enable */ -#define IXGBE_FCTRL_RPFCE 0x00004000 -#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ -#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ -#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ -#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ -#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ -#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */ - -#define IXGBE_MFLCN_RPFCE_SHIFT 4 +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */ +#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */ /* Multiple Receive Queue Control */ -#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ -#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ -#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ -#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ -#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ -#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ -#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ -#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ -#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ -#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ -#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ -#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 -#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 -#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 -#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 -#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 -#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 - -#define IXGBE_FWSM_TS_ENABLED 0x1 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Queue Drop Enable */ #define IXGBE_QDE_ENABLE 0x00000001 @@ -2329,301 +2655,383 @@ enum { #define IXGBE_QDE_IDX_MASK 0x00007F00 #define IXGBE_QDE_IDX_SHIFT 8 #define IXGBE_QDE_WRITE 0x00010000 - -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ - -#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 -#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 -#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 -#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 -#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_QDE_READ 0x00020000 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 /* Multiple Transmit Queue Command Register */ -#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ -#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ -#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ -#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ -#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ -#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ -#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ /* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ -#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ #define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ -#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ -#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ -#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ -#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ -#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 - -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ -#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */ +#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ +#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */ /* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 /* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 8) + * = (<< 2) + */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 -#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 /* RSS Hash results */ -#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 -#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 -#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 -#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 -#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 #define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 -#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 -#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 #define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 /* RSS Packet Types as indicated in the receive descriptor. */ -#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ -#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ -#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */ #define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ #define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ -#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ -#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ -#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ -#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ /* Security Processing bit Indication */ -#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 -#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 /* Masks to determine if packets should be dropped due to frame errors */ #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXD_ERR_CE | \ - IXGBE_RXD_ERR_LE | \ - IXGBE_RXD_ERR_PE | \ - IXGBE_RXD_ERR_OSE | \ - IXGBE_RXD_ERR_USE) + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXDADV_ERR_CE | \ - IXGBE_RXDADV_ERR_LE | \ - IXGBE_RXDADV_ERR_PE | \ - IXGBE_RXDADV_ERR_OSE | \ - IXGBE_RXDADV_ERR_USE) + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE /* Multicast bit mask */ -#define IXGBE_MCSTCTRL_MFE 0x4 +#define IXGBE_MCSTCTRL_MFE 0x4 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 /* Vlan-specific macros */ -#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ -#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ -#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ -#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT /* SR-IOV specific macros */ -#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) +#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) #define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) -#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) -#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) +#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) /* Translated register #defines */ +#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P))) +#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P))) +#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P))) +#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P))) +#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P))) +#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P))) +#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P))) +#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P))) +#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P))) +#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P))) +#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P))) +#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P))) +#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ + (0x012300 + (((P) - 24) * 4))) +#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P))) +#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P))) +#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P))) +#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P))) +#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \ + : (0x0D000 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \ + : (0x0D004 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \ + : (0x0D008 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \ + : (0x0D010 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \ + : (0x0D018 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \ + : (0x0D028 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \ + : (0x0D014 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) +#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) +#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) +#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P))) #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) +#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \ + : (0x0D00C + (0x40 * ((P) - 64)))) +#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P))) +#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x))) +#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x))) +#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x))) +#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x))) +#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x))) +#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x))) +#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x))) #define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) #define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) -#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \ +#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) -#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \ +#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif enum ixgbe_fdir_pballoc_type { IXGBE_FDIR_PBALLOC_NONE = 0, IXGBE_FDIR_PBALLOC_64K = 1, IXGBE_FDIR_PBALLOC_128K = 2, IXGBE_FDIR_PBALLOC_256K = 3, }; -#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 /* Flow Director register values */ -#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 -#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 -#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 -#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 -#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 -#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 -#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 -#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 -#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 #define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 #define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 #define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ #define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ -#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 -#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 -#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 -#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 - -#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 -#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 -#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 -#define IXGBE_FDIRM_VLANID 0x00000001 -#define IXGBE_FDIRM_VLANP 0x00000002 -#define IXGBE_FDIRM_POOL 0x00000004 -#define IXGBE_FDIRM_L4P 0x00000008 -#define IXGBE_FDIRM_FLEX 0x00000010 -#define IXGBE_FDIRM_DIPv6 0x00000020 - -#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF -#define IXGBE_FDIRFREE_FREE_SHIFT 0 -#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 -#define IXGBE_FDIRFREE_COLL_SHIFT 16 -#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F -#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 -#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 -#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 -#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF -#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 -#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 -#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 -#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF -#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 -#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 -#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 -#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 -#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 -#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 -#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 - -#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 -#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 -#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 -#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 -#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 -#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 -#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 -#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 -#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 -#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 -#define IXGBE_FDIRCMD_IPV6 0x00000080 -#define IXGBE_FDIRCMD_CLEARHT 0x00000100 -#define IXGBE_FDIRCMD_DROP 0x00000200 -#define IXGBE_FDIRCMD_INT 0x00000400 -#define IXGBE_FDIRCMD_LAST 0x00000800 -#define IXGBE_FDIRCMD_COLLISION 0x00001000 -#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 -#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 -#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 -#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT 23 -#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 -#define IXGBE_FDIR_INIT_DONE_POLL 10 -#define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 +#define IXGBE_FDIRM_L3P 0x00000040 + +#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */ +#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */ +#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */ +#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */ +#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */ + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 #define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 - -#define IXGBE_FDIR_DROP_QUEUE 127 +#define IXGBE_FDIR_DROP_QUEUE 127 /* Manageablility Host Interface defines */ #define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ @@ -2632,12 +3040,13 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ #define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ #define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ +#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ /* CEM Support */ #define FW_CEM_HDR_LEN 0x4 #define FW_CEM_CMD_DRIVER_INFO 0xDD #define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 -#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_CMD_RESERVED 0X0 #define FW_CEM_UNUSED_VER 0x0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 @@ -2654,7 +3063,7 @@ enum ixgbe_fdir_pballoc_type { #define FW_DISABLE_RXEN_CMD 0xDE #define FW_DISABLE_RXEN_LEN 0x1 #define FW_PHY_MGMT_REQ_CMD 0x20 -#define FW_PHY_TOKEN_REQ_CMD 0x0A +#define FW_PHY_TOKEN_REQ_CMD 0xA #define FW_PHY_TOKEN_REQ_LEN 2 #define FW_PHY_TOKEN_REQ 0 #define FW_PHY_TOKEN_REL 1 @@ -2672,56 +3081,59 @@ enum ixgbe_fdir_pballoc_type { #define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) #define FW_PHY_ACT_INIT_PHY 1 #define FW_PHY_ACT_SETUP_LINK 2 -#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) -#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) -#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) -#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) -#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) -#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) -#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) -#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) -#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) -#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) -#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) +#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0) +#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1) +#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2) +#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3) +#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4) +#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5) +#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6) +#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7) +#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8) +#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9) +#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10) #define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 -#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ - HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \ + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) #define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u #define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u #define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u #define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u -#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) -#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) -#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) -#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) -#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) +#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18) +#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19) +#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20) +#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22) +#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0) #define FW_PHY_ACT_GET_LINK_INFO 3 -#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) -#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) -#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) -#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) -#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) -#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) -#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) -#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) +#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19) +#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20) +#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21) +#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22) +#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24) +#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29) #define FW_PHY_ACT_FORCE_LINK_DOWN 4 -#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) +#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0) #define FW_PHY_ACT_PHY_SW_RESET 5 #define FW_PHY_ACT_PHY_HW_RESET 6 #define FW_PHY_ACT_GET_PHY_INFO 7 #define FW_PHY_ACT_UD_2 0x1002 -#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) -#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) -#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) -#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) -#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) -#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) +#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6) +#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5) +#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4) +#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3) +#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2) +#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1) #define FW_PHY_ACT_RETRIES 50 #define FW_PHY_INFO_SPEED_MASK 0xFFFu #define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u #define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu /* Host Interface Command Structures */ + +#pragma pack(push, 1) + struct ixgbe_hic_hdr { u8 cmd; u8 buf_len; @@ -2742,7 +3154,7 @@ struct ixgbe_hic_hdr2_req { struct ixgbe_hic_hdr2_rsp { u8 cmd; u8 buf_lenl; - u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ u8 checksum; }; @@ -2784,8 +3196,8 @@ struct ixgbe_hic_read_shadow_ram { struct ixgbe_hic_write_shadow_ram { union ixgbe_hic_hdr2 hdr; - __be32 address; - __be16 length; + u32 address; + u16 length; u16 pad2; u16 data; u16 pad3; @@ -2813,7 +3225,7 @@ struct ixgbe_hic_internal_phy_req { u16 rsv1; __be32 write_data; u16 pad; -} __packed; +}; struct ixgbe_hic_internal_phy_resp { struct ixgbe_hic_hdr hdr; @@ -2833,20 +3245,53 @@ struct ixgbe_hic_phy_activity_resp { __be32 data[FW_PHY_ACT_DATA_COUNT]; }; +#pragma pack(pop) + +/* Transmit Descriptor - Legacy */ +struct ixgbe_legacy_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 vlan; + } fields; + } upper; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 buffer_addr; /* Address of descriptor's data buf */ __le32 cmd_type_len; __le32 olinfo_status; } read; struct { - __le64 rsvd; /* Reserved */ + __le64 rsvd; /* Reserved */ __le32 nxtseq_seed; __le32 status; } wb; }; +/* Receive Descriptor - Legacy */ +struct ixgbe_legacy_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 vlan; +}; + /* Receive Descriptor - Advanced */ union ixgbe_adv_rx_desc { struct { @@ -2887,62 +3332,70 @@ struct ixgbe_adv_tx_context_desc { }; /* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ -#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ -#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE 1588 Time Stamp */ -#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ -#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ -#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ -#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ -#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ -#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ -#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ -#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */ -#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */ -#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */ -#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */ -#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */ -#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */ -#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */ -#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */ -#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */ +/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26 /* Autonegotiation advertised speeds */ typedef u32 ixgbe_autoneg_advertised; /* Link speed */ @@ -2954,19 +3407,40 @@ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 #define IXGBE_LINK_SPEED_5GB_FULL 0x0800 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 -#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ - IXGBE_LINK_SPEED_10GB_FULL) -#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ - IXGBE_LINK_SPEED_1GB_FULL | \ - IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* Physical layer type */ +typedef u64 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000 +#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000 +#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 +#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 /* Flow Control Data Sheet defined values * Calculation and defines taken from 802.1bb Annex O */ /* BitTimes (BT) conversion */ -#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) -#define IXGBE_B2BT(BT) (BT * 8) +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define IXGBE_B2BT(BT) (BT * 8) /* Calculate Delay to respond to PFC */ #define IXGBE_PFC_D 672 @@ -2976,8 +3450,8 @@ typedef u32 ixgbe_link_speed; #define IXGBE_CABLE_DO 5000 /* Delay Optical */ /* Calculate Interface Delay X540 */ -#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ -#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ #define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ #define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) @@ -3023,8 +3497,8 @@ typedef u32 ixgbe_link_speed; (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) /* Software ATR hash keys */ -#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 -#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 /* Software ATR input stream values and masks */ #define IXGBE_ATR_HASH_MASK 0x7fff @@ -3035,14 +3509,22 @@ typedef u32 ixgbe_link_speed; #define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 #define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 enum ixgbe_atr_flow_type { - IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, - IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, - IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, - IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, - IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, - IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, - IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, - IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, }; /* Flow Director ATR input struct. */ @@ -3050,28 +3532,34 @@ union ixgbe_atr_input { /* * Byte layout in order, all values with MSB first: * - * vm_pool - 1 byte - * flow_type - 1 byte - * vlan_id - 2 bytes - * src_ip - 16 bytes - * dst_ip - 16 bytes - * src_port - 2 bytes - * dst_port - 2 bytes - * flex_bytes - 2 bytes - * bkt_hash - 2 bytes + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes */ struct { - u8 vm_pool; - u8 flow_type; + u8 vm_pool; + u8 flow_type; __be16 vlan_id; __be32 dst_ip[4]; __be32 src_ip[4]; + u8 inner_mac[6]; + __be16 tunnel_type; + __be32 tni_vni; __be16 src_port; __be16 dst_port; __be16 flex_bytes; __be16 bkt_hash; } formatted; - __be32 dword_stream[11]; + __be32 dword_stream[14]; }; /* Flow Director compressed ATR hash input struct */ @@ -3090,10 +3578,11 @@ union ixgbe_atr_hash_dword { __be32 dword; }; -#define IXGBE_MVALS_INIT(m) \ +#define IXGBE_MVALS_INIT(m) \ IXGBE_CAT(EEC, m), \ IXGBE_CAT(FLA, m), \ IXGBE_CAT(GRC, m), \ + IXGBE_CAT(SRAMREL, m), \ IXGBE_CAT(FACTPS, m), \ IXGBE_CAT(SWSM, m), \ IXGBE_CAT(SWFW_SYNC, m), \ @@ -3116,10 +3605,21 @@ union ixgbe_atr_hash_dword { IXGBE_CAT(I2CCTL, m) enum ixgbe_mvals { - IXGBE_MVALS_INIT(IDX), + IXGBE_MVALS_INIT(_IDX), IXGBE_MVALS_IDX_LIMIT }; +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ixgbe_fcoe_boot_status { + ixgbe_fcoe_bootstatus_disabled = 0, + ixgbe_fcoe_bootstatus_enabled = 1, + ixgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_spi, @@ -3134,7 +3634,7 @@ enum ixgbe_mac_type { ixgbe_mac_X540, ixgbe_mac_X550, ixgbe_mac_X550EM_x, - ixgbe_mac_x550em_a, + ixgbe_mac_X550EM_a, ixgbe_num_macs }; @@ -3164,7 +3664,7 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_active_unknown, ixgbe_phy_qsfp_intel, ixgbe_phy_qsfp_unknown, - ixgbe_phy_sfp_unsupported, + ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ ixgbe_phy_sgmii, ixgbe_phy_fw, ixgbe_phy_generic @@ -3173,15 +3673,15 @@ enum ixgbe_phy_type { /* * SFP+ module type IDs: * - * ID Module Type + * ID Module Type * ============= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CU_CORE0 - 82599-specific - * 4 SFP_DA_CU_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific */ enum ixgbe_sfp_type { ixgbe_sfp_type_da_cu = 0, @@ -3234,6 +3734,8 @@ enum ixgbe_smart_speed { /* PCI bus types */ enum ixgbe_bus_type { ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, ixgbe_bus_type_pci_express, ixgbe_bus_type_internal, ixgbe_bus_type_reserved @@ -3241,27 +3743,27 @@ enum ixgbe_bus_type { /* PCI bus speeds */ enum ixgbe_bus_speed { - ixgbe_bus_speed_unknown = 0, - ixgbe_bus_speed_33 = 33, - ixgbe_bus_speed_66 = 66, - ixgbe_bus_speed_100 = 100, - ixgbe_bus_speed_120 = 120, - ixgbe_bus_speed_133 = 133, - ixgbe_bus_speed_2500 = 2500, - ixgbe_bus_speed_5000 = 5000, - ixgbe_bus_speed_8000 = 8000, + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_8000 = 8000, ixgbe_bus_speed_reserved }; /* PCI bus widths */ enum ixgbe_bus_width { - ixgbe_bus_width_unknown = 0, - ixgbe_bus_width_pcie_x1 = 1, - ixgbe_bus_width_pcie_x2 = 2, - ixgbe_bus_width_pcie_x4 = 4, - ixgbe_bus_width_pcie_x8 = 8, - ixgbe_bus_width_32 = 32, - ixgbe_bus_width_64 = 64, + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, ixgbe_bus_width_reserved }; @@ -3270,7 +3772,6 @@ struct ixgbe_addr_filter_info { u32 rar_used_count; u32 mta_in_use; u32 overflow_promisc; - bool uc_set_promisc; bool user_set_promisc; }; @@ -3280,15 +3781,15 @@ struct ixgbe_bus_info { enum ixgbe_bus_width width; enum ixgbe_bus_type type; - u8 func; + u16 func; u8 lan_id; - u8 instance_id; + u16 instance_id; }; /* Flow control parameters */ struct ixgbe_fc_info { - u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ - u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */ + u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ u16 pause_time; /* Flow Control Pause timer */ bool send_xon; /* Flow control send XON */ bool strict_ieee; /* Strict IEEE mode */ @@ -3349,8 +3850,6 @@ struct ixgbe_hw_stats { u64 mptc; u64 bptc; u64 xec; - u64 rqsmr[16]; - u64 tqsmr[8]; u64 qprc[16]; u64 qptc[16]; u64 qbrc[16]; @@ -3364,6 +3863,7 @@ struct ixgbe_hw_stats { u64 fdirmatch; u64 fdirmiss; u64 fccrc; + u64 fclast; u64 fcoerpdc; u64 fcoeprc; u64 fcoeptc; @@ -3371,6 +3871,8 @@ struct ixgbe_hw_stats { u64 fcoedwtc; u64 fcoe_noddp; u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; u64 b2ospc; u64 b2ogprc; u64 o2bgptc; @@ -3402,19 +3904,22 @@ struct ixgbe_mac_operations { s32 (*start_hw)(struct ixgbe_hw *); s32 (*clear_hw_cntrs)(struct ixgbe_hw *); enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + u64 (*get_supported_physical_layer)(struct ixgbe_hw *); s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *); s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *); void (*set_lan_id)(struct ixgbe_hw *); s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); s32 (*setup_sfp)(struct ixgbe_hw *); - s32 (*disable_rx_buff)(struct ixgbe_hw *); - s32 (*enable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*disable_sec_rx_path)(struct ixgbe_hw *); + s32 (*enable_sec_rx_path)(struct ixgbe_hw *); s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); void (*release_swfw_sync)(struct ixgbe_hw *, u32); void (*init_swfw_sync)(struct ixgbe_hw *); @@ -3425,7 +3930,6 @@ struct ixgbe_mac_operations { void (*disable_tx_laser)(struct ixgbe_hw *); void (*enable_tx_laser)(struct ixgbe_hw *); void (*flap_tx_laser)(struct ixgbe_hw *); - void (*stop_link_on_d3)(struct ixgbe_hw *); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); @@ -3433,8 +3937,8 @@ struct ixgbe_mac_operations { bool *); void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); - /* Packet Buffer Manipulation */ - void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int); /* LED */ s32 (*led_on)(struct ixgbe_hw *, u32); @@ -3445,16 +3949,23 @@ struct ixgbe_mac_operations { /* RAR, Multicast, VLAN */ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32); s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr, bool clear); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); + s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32, + bool); s32 (*init_uta_tables)(struct ixgbe_hw *); void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); @@ -3469,18 +3980,22 @@ struct ixgbe_mac_operations { const char *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map); void (*disable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw); void (*set_source_address_pruning)(struct ixgbe_hw *, bool, unsigned int); void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); - - /* DMA Coalescing */ - s32 (*dmac_config)(struct ixgbe_hw *hw); s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee); s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); + void (*disable_mdd)(struct ixgbe_hw *hw); + void (*enable_mdd)(struct ixgbe_hw *hw); + void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap); + void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf); }; struct ixgbe_phy_operations { @@ -3502,8 +4017,7 @@ struct ixgbe_phy_operations { s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); - s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); - s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); + void (*i2c_bus_clear)(struct ixgbe_hw *); s32 (*check_overtemp)(struct ixgbe_hw *); s32 (*set_phy_power)(struct ixgbe_hw *, bool on); s32 (*enter_lplu)(struct ixgbe_hw *); @@ -3529,82 +4043,87 @@ struct ixgbe_link_info { }; struct ixgbe_eeprom_info { - struct ixgbe_eeprom_operations ops; - enum ixgbe_eeprom_type type; - u32 semaphore_delay; - u16 word_size; - u16 address_bits; - u16 word_page_size; - u16 ctrl_word_3; + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; }; #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 struct ixgbe_mac_info { - struct ixgbe_mac_operations ops; - enum ixgbe_mac_type type; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; /* prefix for World Wide Node Name (WWNN) */ - u16 wwnn_prefix; + u16 wwnn_prefix; /* prefix for World Wide Port Name (WWPN) */ - u16 wwpn_prefix; - u16 max_msix_vectors; + u16 wwpn_prefix; #define IXGBE_MAX_MTA 128 - u32 mta_shadow[IXGBE_MAX_MTA]; - s32 mc_filter_type; - u32 mcft_size; - u32 vft_size; - u32 num_rar_entries; - u32 rar_highwater; - u32 rx_pb_size; - u32 max_tx_queues; - u32 max_rx_queues; - u32 orig_autoc; - u32 orig_autoc2; - bool orig_link_settings_stored; - bool autotry_restart; - u8 flags; - u8 san_mac_rar_index; + u32 mta_shadow[IXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_autoc; + u8 san_mac_rar_index; + bool get_link_status; + u32 orig_autoc2; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; struct ixgbe_thermal_sensor_data thermal_sensor_data; - bool set_lben; - u8 led_link_act; + bool thermal_sensor_enabled; + struct ixgbe_dmac_config dmac_config; + bool set_lben; + u32 max_link_up_time; + u8 led_link_act; }; struct ixgbe_phy_info { - struct ixgbe_phy_operations ops; - struct mdio_if_info mdio; - enum ixgbe_phy_type type; - u32 id; - enum ixgbe_sfp_type sfp_type; - bool sfp_setup_needed; - u32 revision; - enum ixgbe_media_type media_type; - u32 phy_semaphore_mask; - bool reset_disable; - ixgbe_autoneg_advertised autoneg_advertised; - ixgbe_link_speed speeds_supported; - ixgbe_link_speed eee_speeds_supported; - ixgbe_link_speed eee_speeds_advertised; - enum ixgbe_smart_speed smart_speed; - bool smart_speed_active; - bool multispeed_fiber; - bool reset_if_overtemp; - bool qsfp_shared_i2c_bus; - u32 nw_mng_if_sel; + struct ixgbe_phy_operations ops; + enum ixgbe_phy_type type; + u32 addr; + u32 id; + enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ixgbe_media_type media_type; + u32 phy_semaphore_mask; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; + ixgbe_link_speed eee_speeds_supported; + ixgbe_link_speed eee_speeds_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; + u32 nw_mng_if_sel; }; #include "ixgbe_mbx.h" struct ixgbe_mbx_operations { - s32 (*init_params)(struct ixgbe_hw *hw); - s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct ixgbe_hw *, u16); - s32 (*check_for_ack)(struct ixgbe_hw *, u16); - s32 (*check_for_rst)(struct ixgbe_hw *, u16); + void (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct ixgbe_hw *, u16); + s32 (*check_for_ack)(struct ixgbe_hw *, u16); + s32 (*check_for_rst)(struct ixgbe_hw *, u16); }; struct ixgbe_mbx_stats { @@ -3617,7 +4136,7 @@ struct ixgbe_mbx_stats { }; struct ixgbe_mbx_info { - const struct ixgbe_mbx_operations *ops; + struct ixgbe_mbx_operations ops; struct ixgbe_mbx_stats stats; u32 timeout; u32 usec_delay; @@ -3626,88 +4145,88 @@ struct ixgbe_mbx_info { }; struct ixgbe_hw { - u8 __iomem *hw_addr; - void *back; - struct ixgbe_mac_info mac; - struct ixgbe_addr_filter_info addr_ctrl; - struct ixgbe_fc_info fc; - struct ixgbe_phy_info phy; - struct ixgbe_link_info link; - struct ixgbe_eeprom_info eeprom; - struct ixgbe_bus_info bus; - struct ixgbe_mbx_info mbx; - const u32 *mvals; - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - bool adapter_stopped; - bool force_full_reset; - bool allow_unsupported_sfp; - bool wol_enabled; - bool need_crosstalk_fix; -}; - -struct ixgbe_info { - enum ixgbe_mac_type mac; - s32 (*get_invariants)(struct ixgbe_hw *); - const struct ixgbe_mac_operations *mac_ops; - const struct ixgbe_eeprom_operations *eeprom_ops; - const struct ixgbe_phy_operations *phy_ops; - const struct ixgbe_mbx_operations *mbx_ops; - const struct ixgbe_link_operations *link_ops; - const u32 *mvals; + u8 IOMEM *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_link_info link; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; + const u32 *mvals; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + bool need_crosstalk_fix; }; +#define ixgbe_call_func(hw, func, params, error) \ + (func != NULL) ? func params : error /* Error Codes */ -#define IXGBE_ERR_EEPROM -1 -#define IXGBE_ERR_EEPROM_CHECKSUM -2 -#define IXGBE_ERR_PHY -3 -#define IXGBE_ERR_CONFIG -4 -#define IXGBE_ERR_PARAM -5 -#define IXGBE_ERR_MAC_TYPE -6 -#define IXGBE_ERR_UNKNOWN_PHY -7 -#define IXGBE_ERR_LINK_SETUP -8 -#define IXGBE_ERR_ADAPTER_STOPPED -9 -#define IXGBE_ERR_INVALID_MAC_ADDR -10 -#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 -#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 -#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 -#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 -#define IXGBE_ERR_RESET_FAILED -15 -#define IXGBE_ERR_SWFW_SYNC -16 -#define IXGBE_ERR_PHY_ADDR_INVALID -17 -#define IXGBE_ERR_I2C -18 -#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 -#define IXGBE_ERR_SFP_NOT_PRESENT -20 -#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 -#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 -#define IXGBE_ERR_FDIR_REINIT_FAILED -23 -#define IXGBE_ERR_EEPROM_VERSION -24 -#define IXGBE_ERR_NO_SPACE -25 -#define IXGBE_ERR_OVERTEMP -26 -#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 -#define IXGBE_ERR_FC_NOT_SUPPORTED -28 -#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 -#define IXGBE_ERR_PBA_SECTION -31 -#define IXGBE_ERR_INVALID_ARGUMENT -32 -#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_SUCCESS 0 +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_OUT_OF_MEM -34 +#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36 +#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37 #define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 #define IXGBE_ERR_FW_RESP_INVALID -39 #define IXGBE_ERR_TOKEN_RETRY -40 -#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) -#define IXGBE_FUSES0_300MHZ BIT(5) -#define IXGBE_FUSES0_REV_MASK (3u << 6) +#define IXGBE_FUSES0_300MHZ (1 << 5) +#define IXGBE_FUSES0_REV_MASK (3 << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238) #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918) +#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C) #define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) #define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) @@ -3719,102 +4238,100 @@ struct ixgbe_info { #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31) - -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) - -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) -#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) - -#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28) -#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29) - -#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) -#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) - -#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10) -#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11) -#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) -#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) - -#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6) -#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15) -#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16) - -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2) - -#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16) - -#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3) -#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31) - -#define IXGBE_KX4_LINK_CNTL_1 0x4C -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29) -#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31) - -#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 -#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 +#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) +#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1) +#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2) +#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2) +#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3) +#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29) +#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1) + +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11) + +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 #define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 #define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF #define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 -#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) #define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 -#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 -#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) #define IXGBE_SB_IOSF_TARGET_KR_PHY 0 -#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 -#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 -#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 #define IXGBE_NW_MNG_IF_SEL 0x00011178 -#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) -#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) -#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ -#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1) +#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2) +#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21) +#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25) +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */ +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) + +#include "ixgbe_osdep2.h" + #endif /* _IXGBE_TYPE_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c index 6ea0d6a5fb90..2a62f443c4cd 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c @@ -1,7 +1,7 @@ /******************************************************************************* - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -12,10 +12,6 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -26,13 +22,11 @@ *******************************************************************************/ -#include -#include -#include - -#include "ixgbe.h" -#include "ixgbe_phy.h" #include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" #define IXGBE_X540_MAX_TX_QUEUES 128 #define IXGBE_X540_MAX_RX_QUEUES 128 @@ -41,46 +35,154 @@ #define IXGBE_X540_VFT_TBL_SIZE 128 #define IXGBE_X540_RX_PB_SIZE 384 -static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); -static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); -static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); -static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); - -enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) -{ - return ixgbe_media_type_copper; -} +STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); -s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +/** + * ixgbe_init_ops_X540 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X540. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; - /* set_phy_power was set by default to NULL */ + DEBUGFUNC("ixgbe_init_ops_X540"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_eerd_X540; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; + eeprom->ops.write = ixgbe_write_eewr_X540; + eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_generic; + phy->ops.reset = NULL; phy->ops.set_phy_power = ixgbe_set_copper_phy_power; - mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; - mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + /* MAC */ + mac->ops.reset_hw = ixgbe_reset_hw_X540; + mac->ops.get_media_type = ixgbe_get_media_type_X540; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X540; + mac->ops.read_analog_reg8 = NULL; + mac->ops.write_analog_reg8 = NULL; + mac->ops.start_hw = ixgbe_start_hw_X540; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; + mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + mac->ops.setup_link = ixgbe_setup_mac_link_X540; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; + mac->ops.check_link = ixgbe_check_mac_link_generic; + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* + * FWSM register + * ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; + + /* LEDs */ + mac->ops.blink_led_start = ixgbe_blink_led_start_X540; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_X540 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires + * ixgbe_get_media_type_X540 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return ixgbe_media_type_copper; +} + +/** + * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed **/ -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - return hw->phy.ops.setup_link_speed(hw, speed, - autoneg_wait_to_complete); + DEBUGFUNC("ixgbe_setup_mac_link_X540"); + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); } /** @@ -88,8 +190,7 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. + * and clears all interrupts, and perform a reset. **/ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { @@ -97,41 +198,43 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) u32 ctrl, i; u32 swfw_mask = hw->phy.phy_semaphore_mask; + DEBUGFUNC("ixgbe_reset_hw_X540"); + /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); - if (status) - return status; + if (status != IXGBE_SUCCESS) + goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); mac_reset_top: status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_dbg(hw, "semaphore failed with %d", status); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } - ctrl = IXGBE_CTRL_RST; ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { + usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; - udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "Reset polling failed to complete.\n"); + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Reset polling failed to complete.\n"); } - msleep(100); + msec_delay(100); /* * Double resets are required for recovery from certain error @@ -154,14 +257,14 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; + hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (is_valid_ether_addr(hw->mac.san_addr)) { + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; @@ -180,6 +283,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); +reset_hw_out: return status; } @@ -193,13 +297,43 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) **/ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) { - s32 ret_val; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_X540"); ret_val = ixgbe_start_hw_generic(hw); - if (ret_val) - return ret_val; + if (ret_val != IXGBE_SUCCESS) + goto out; - return ixgbe_start_hw_gen2(hw); + ret_val = ixgbe_start_hw_gen2(hw); + +out: + return ret_val; +} + +/** + * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + + return physical_layer; } /** @@ -215,21 +349,23 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) u32 eec; u16 eeprom_size; + DEBUGFUNC("ixgbe_init_eeprom_params_X540"); + if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = BIT(eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); - hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); } - return 0; + return IXGBE_SUCCESS; } /** @@ -240,21 +376,24 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) * * Reads a 16 bit word from the EEPROM using the EERD register. **/ -static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) { - s32 status; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; - - status = ixgbe_read_eerd_generic(hw, offset, data); + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_eerd_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_eerd_generic(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** - * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD + * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of words @@ -262,17 +401,21 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) * * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ -static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) { - s32 status; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; - - status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_eerd_buffer_generic(hw, offset, + words, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -284,16 +427,19 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, * * Write a 16 bit word to the EEPROM using the EEWR register. **/ -static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) { - s32 status; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; - - status = ixgbe_write_eewr_generic(hw, offset, data); + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_eewr_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_eewr_generic(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -306,17 +452,21 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) * * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ -static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) { - s32 status; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; - - status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data); + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_eewr_buffer_generic(hw, offset, + words, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -327,35 +477,37 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. * * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum **/ -static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { - u16 i; - u16 j; + u16 i, j; u16 checksum = 0; u16 length = 0; u16 pointer = 0; u16 word = 0; - u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; - /* - * Do not use hw->eeprom.ops.read because we do not want to take + /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores here. Instead use * ixgbe_read_eerd_generic */ - /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < checksum_last_word; i++) { + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); + + /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the + * checksum itself + */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { if (ixgbe_read_eerd_generic(hw, i, &word)) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; } - /* - * Include all data from pointers 0x3, 0x6-0xE. This excludes the + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ for (i = ptr_start; i < IXGBE_FW_PTR; i++) { @@ -363,8 +515,8 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) continue; if (ixgbe_read_eerd_generic(hw, i, &pointer)) { - hw_dbg(hw, "EEPROM read failed\n"); - break; + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; } /* Skip pointer section if the pointer is invalid. */ @@ -373,9 +525,8 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) continue; if (ixgbe_read_eerd_generic(hw, pointer, &length)) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; - break; } /* Skip pointer section if length is invalid. */ @@ -385,7 +536,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) for (j = pointer + 1; j <= pointer + length; j++) { if (ixgbe_read_eerd_generic(hw, j, &word)) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; @@ -405,20 +556,22 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ -static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, - u16 *checksum_val) +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, + u16 *checksum_val) { s32 status; u16 checksum; u16 read_checksum = 0; + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return status; } @@ -443,7 +596,8 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, * calculated checksum */ if (read_checksum != checksum) { - hw_dbg(hw, "Invalid EEPROM checksum"); + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); status = IXGBE_ERR_EEPROM_CHECKSUM; } @@ -465,23 +619,25 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ -static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) { s32 status; u16 checksum; + DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { - hw_dbg(hw, "EEPROM read failed\n"); + DEBUGOUT("EEPROM read failed\n"); return status; } if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + return IXGBE_ERR_SWFW_SYNC; status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) @@ -500,97 +656,112 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; } /** - * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device - * @hw: pointer to hardware structure + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure * - * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy - * EEPROM from shadow RAM to the flash device. + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. **/ -static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) { u32 flup; s32 status; + DEBUGFUNC("ixgbe_update_flash_X540"); + status = ixgbe_poll_flash_update_done_X540(hw); if (status == IXGBE_ERR_EEPROM) { - hw_dbg(hw, "Flash update time out\n"); - return status; + DEBUGOUT("Flash update time out\n"); + goto out; } - flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); + flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); status = ixgbe_poll_flash_update_done_X540(hw); - if (status == 0) - hw_dbg(hw, "Flash update complete\n"); + if (status == IXGBE_SUCCESS) + DEBUGOUT("Flash update complete\n"); else - hw_dbg(hw, "Flash update time out\n"); + DEBUGOUT("Flash update time out\n"); - if (hw->revision_id == 0) { - flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); + if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); if (flup & IXGBE_EEC_SEC1VAL) { flup |= IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); } status = ixgbe_poll_flash_update_done_X540(hw); - if (status == 0) - hw_dbg(hw, "Flash update complete\n"); + if (status == IXGBE_SUCCESS) + DEBUGOUT("Flash update complete\n"); else - hw_dbg(hw, "Flash update time out\n"); + DEBUGOUT("Flash update time out\n"); } - +out: return status; } /** - * ixgbe_poll_flash_update_done_X540 - Poll flash update status - * @hw: pointer to hardware structure + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure * - * Polls the FLUDONE (bit 26) of the EEC Register to determine when the - * flash update is done. + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. **/ -static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) { u32 i; u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { - reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - if (reg & IXGBE_EEC_FLUDONE) - return 0; - udelay(5); + reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (reg & IXGBE_EEC_FLUDONE) { + status = IXGBE_SUCCESS; + break; + } + msec_delay(5); } - return IXGBE_ERR_EEPROM; + + if (i == IXGBE_FLUDONE_ATTEMPTS) + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Flash update status polling timed out"); + + return status; } /** - * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire * - * Acquires the SWFW semaphore thought the SW_FW_SYNC register for - * the specified function (CSR, PHY0, PHY1, NVM, Flash) + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) **/ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; - u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; u32 fwmask = swmask << 5; + u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; u32 timeout = 200; u32 hwmask = 0; u32 swfw_sync; u32 i; + DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); + if (swmask & IXGBE_GSSR_EEP_SM) - hwmask = IXGBE_GSSR_FLASH_SM; + hwmask |= IXGBE_GSSR_FLASH_SM; - /* SW only mask does not have FW bit pair */ + /* SW only mask doesn't have FW bit pair */ if (mask & IXGBE_GSSR_SW_MNG_SM) swmask |= IXGBE_GSSR_SW_MNG_SM; @@ -600,29 +771,25 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) /* SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ - if (ixgbe_get_swfw_sync_semaphore(hw)) + if (ixgbe_get_swfw_sync_semaphore(hw)) { + DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; + } - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); if (!(swfw_sync & (fwmask | swmask | hwmask))) { swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), + swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 6000); - return 0; + return IXGBE_SUCCESS; } /* Firmware currently using resource (fwmask), hardware * currently using resource (hwmask), or other software * thread currently using resource (swmask) */ ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); - } - - /* Failed to get SW only semaphore */ - if (swmask == IXGBE_GSSR_SW_MNG_SM) { - hw_dbg(hw, "Failed to get SW only semaphore\n"); - return IXGBE_ERR_SWFW_SYNC; + msec_delay(5); } /* If the resource is not released by the FW/HW the SW can assume that @@ -630,15 +797,17 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) * of the requested resource(s) while ignoring the corresponding FW/HW * bits in the SW_FW_SYNC register. */ - if (ixgbe_get_swfw_sync_semaphore(hw)) + if (ixgbe_get_swfw_sync_semaphore(hw)) { + DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + } + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); if (swfw_sync & (fwmask | hwmask)) { swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 6000); - return 0; + msec_delay(5); + return IXGBE_SUCCESS; } /* If the resource is not released by other SW the SW can assume that * the other SW malfunctions. In that case the SW should clear all SW @@ -647,109 +816,128 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) */ if (swfw_sync & swmask) { u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | - IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM; + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | + IXGBE_GSSR_SW_MNG_SM; if (swi2c_mask) rmask |= IXGBE_GSSR_I2C_MASK; ixgbe_release_swfw_sync_X540(hw, rmask); ixgbe_release_swfw_sync_semaphore(hw); + DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; } ixgbe_release_swfw_sync_semaphore(hw); + DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; } /** - * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release * - * Releases the SWFW semaphore through the SW_FW_SYNC register - * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + * Releases the SWFW semaphore through the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) **/ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); u32 swfw_sync; + DEBUGFUNC("ixgbe_release_swfw_sync_X540"); + if (mask & IXGBE_GSSR_I2C_MASK) swmask |= mask & IXGBE_GSSR_I2C_MASK; ixgbe_get_swfw_sync_semaphore(hw); - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); swfw_sync &= ~swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 6000); + msec_delay(2); } /** - * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore - * @hw: pointer to hardware structure + * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure * - * Sets the hardware semaphores so SW/FW can gain control of shared resources - */ -static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) + * Sets the hardware semaphores so SW/FW can gain control of shared resources + **/ +STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) { + s32 status = IXGBE_ERR_EEPROM; u32 timeout = 2000; u32 i; u32 swsm; + DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); + /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { - /* If the SMBI bit is 0 when we read it, then the bit will be + /* + * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); - if (!(swsm & IXGBE_SWSM_SMBI)) + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; break; - usleep_range(50, 100); - } - - if (i == timeout) { - hw_dbg(hw, - "Software semaphore SMBI between device drivers not granted.\n"); - return IXGBE_ERR_EEPROM; + } + usec_delay(50); } /* Now get the semaphore between SW/FW through the REGSMP bit */ - for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); - if (!(swsm & IXGBE_SWFW_REGSMP)) - return 0; + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (!(swsm & IXGBE_SWFW_REGSMP)) + break; + + usec_delay(50); + } - usleep_range(50, 100); + /* + * Release semaphores and return error if SW NVM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "REGSMP Software NVM semaphore not granted.\n"); + ixgbe_release_swfw_sync_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); } - /* Release semaphores and return error if SW NVM semaphore - * was not granted because we do not have access to the EEPROM - */ - hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n"); - ixgbe_release_swfw_sync_semaphore(hw); - return IXGBE_ERR_EEPROM; + return status; } /** - * ixgbe_release_nvm_semaphore - Release hardware semaphore - * @hw: pointer to hardware structure + * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure * - * This function clears hardware semaphore bits. + * This function clears hardware semaphore bits. **/ -static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) { - u32 swsm; + u32 swsm; + + DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); swsm &= ~IXGBE_SWFW_REGSMP; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm); - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); swsm &= ~IXGBE_SWSM_SMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); IXGBE_WRITE_FLUSH(hw); } @@ -763,6 +951,8 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) **/ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) { + u32 rmask; + /* First try to grab the semaphore but we don't need to bother * looking to see whether we got the lock or not since we do * the same thing regardless of whether we got the lock or not. @@ -771,6 +961,15 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) */ ixgbe_get_swfw_sync_semaphore(hw); ixgbe_release_swfw_sync_semaphore(hw); + + /* Acquire and release all software resources. */ + rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | + IXGBE_GSSR_SW_MNG_SM; + + rmask |= IXGBE_GSSR_I2C_MASK; + ixgbe_acquire_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_X540(hw, rmask); } /** @@ -788,15 +987,18 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) ixgbe_link_speed speed; bool link_up; + DEBUGFUNC("ixgbe_blink_led_start_X540"); + if (index > 3) return IXGBE_ERR_PARAM; - /* Link should be up in order for the blink bit in the LED control + /* + * Link should be up in order for the blink bit in the LED control * register to work. Force link and speed in the MAC if link is down. * This will be reversed when we stop the blinking. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { + if (link_up == false) { macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); @@ -808,7 +1010,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); IXGBE_WRITE_FLUSH(hw); - return 0; + return IXGBE_SUCCESS; } /** @@ -827,6 +1029,8 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) if (index > 3) return IXGBE_ERR_PARAM; + DEBUGFUNC("ixgbe_blink_led_stop_X540"); + /* Restore the LED to its default value. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); @@ -840,104 +1044,5 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); IXGBE_WRITE_FLUSH(hw); - return 0; + return IXGBE_SUCCESS; } -static const struct ixgbe_mac_operations mac_ops_X540 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_X540, - .start_hw = &ixgbe_start_hw_X540, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_X540, - .enable_rx_dma = &ixgbe_enable_rx_dma_generic, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, - .get_device_caps = &ixgbe_get_device_caps_generic, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, - .read_analog_reg8 = NULL, - .write_analog_reg8 = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .set_rxpba = &ixgbe_set_rxpba_generic, - .check_link = &ixgbe_check_mac_link_generic, - .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .blink_led_start = &ixgbe_blink_led_start_X540, - .blink_led_stop = &ixgbe_blink_led_stop_X540, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_generic, - .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, - .clear_vmdq = &ixgbe_clear_vmdq_generic, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_generic, - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .setup_fc = ixgbe_setup_fc_generic, - .fc_autoneg = ixgbe_fc_autoneg, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = NULL, - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, - .release_swfw_sync = &ixgbe_release_swfw_sync_X540, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .disable_rx_buff = &ixgbe_disable_rx_buff_generic, - .enable_rx_buff = &ixgbe_enable_rx_buff_generic, - .get_thermal_sensor_data = NULL, - .init_thermal_sensor_thresh = NULL, - .prot_autoc_read = &prot_autoc_read_generic, - .prot_autoc_write = &prot_autoc_write_generic, - .enable_rx = &ixgbe_enable_rx_generic, - .disable_rx = &ixgbe_disable_rx_generic, -}; - -static const struct ixgbe_eeprom_operations eeprom_ops_X540 = { - .init_params = &ixgbe_init_eeprom_params_X540, - .read = &ixgbe_read_eerd_X540, - .read_buffer = &ixgbe_read_eerd_buffer_X540, - .write = &ixgbe_write_eewr_X540, - .write_buffer = &ixgbe_write_eewr_buffer_X540, - .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, - .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, - .update_checksum = &ixgbe_update_eeprom_checksum_X540, -}; - -static const struct ixgbe_phy_operations phy_ops_X540 = { - .identify = &ixgbe_identify_phy_generic, - .identify_sfp = &ixgbe_identify_sfp_module_generic, - .init = NULL, - .reset = NULL, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, - .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, - .set_phy_power = &ixgbe_set_copper_phy_power, -}; - -static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X540) -}; - -const struct ixgbe_info ixgbe_X540_info = { - .mac = ixgbe_mac_X540, - .get_invariants = &ixgbe_get_invariants_X540, - .mac_ops = &mac_ops_X540, - .eeprom_ops = &eeprom_ops_X540, - .phy_ops = &phy_ops_X540, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X540, -}; diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h index e21cd48491d3..4cace8523980 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h @@ -1,40 +1,58 @@ /******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_X540_H_ +#define _IXGBE_X540_H_ #include "ixgbe_type.h" -s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete); + bool link_up_wait_to_complete); s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); -enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw); + +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data); +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data); +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); + s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); -s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); + +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +#endif /* _IXGBE_X540_H_ */ + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c index 3236248bdb52..2cdaa276f8e3 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c @@ -1,106 +1,95 @@ /******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS - * e1000-devel Mailing List - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ -#include "ixgbe_x540.h" -#include "ixgbe_type.h" -#include "ixgbe_common.h" -#include "ixgbe_phy.h" - -static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); -static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); -static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); -static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); -static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); - -static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - struct ixgbe_link_info *link = &hw->link; - /* Start with X540 invariants, since so simular */ - ixgbe_get_invariants_X540(hw); - - if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) - phy->ops.set_phy_power = NULL; - - link->addr = IXGBE_CS4227; - - return 0; -} - -static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) -{ - struct ixgbe_phy_info *phy = &hw->phy; - - /* Start with X540 invariants, since so similar */ - ixgbe_get_invariants_X540(hw); - - phy->ops.set_phy_power = NULL; - - return 0; -} - -static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. - /* Start with X540 invariants, since so simular */ - ixgbe_get_invariants_X540(hw); + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. - if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) - phy->ops.set_phy_power = NULL; + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. - return 0; -} + The full GNU General Public License is included in this distribution in + the file called "COPYING". -static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) -{ - struct ixgbe_phy_info *phy = &hw->phy; + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - /* Start with X540 invariants, since so similar */ - ixgbe_get_invariants_X540(hw); +*******************************************************************************/ - phy->ops.set_phy_power = NULL; +#include "ixgbe_x550.h" +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" - return 0; -} +STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); +STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw); -/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control +/** + * ixgbe_init_ops_X550 - Inits func ptrs and MAC type * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X550. + * Does not touch the hardware. **/ -static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) { - u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; - if (hw->bus.lan_id) { - esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); - esdp |= IXGBE_ESDP_SDP1_DIR; + DEBUGFUNC("ixgbe_init_ops_X550"); + + ret_val = ixgbe_init_ops_X540(hw); + mac->ops.dmac_config = ixgbe_dmac_config_X550; + mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550; + mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550; + mac->ops.setup_eee = NULL; + mac->ops.set_source_address_pruning = + ixgbe_set_source_address_pruning_X550; + mac->ops.set_ethertype_anti_spoofing = + ixgbe_set_ethertype_anti_spoofing_X550; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + eeprom->ops.init_params = ixgbe_init_eeprom_params_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + + mac->ops.disable_mdd = ixgbe_disable_mdd_X550; + mac->ops.enable_mdd = ixgbe_enable_mdd_X550; + mac->ops.mdd_event = ixgbe_mdd_event_X550; + mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550; + mac->ops.disable_rx = ixgbe_disable_rx_x550; + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->mac.ops.led_on = NULL; + hw->mac.ops.led_off = NULL; + break; + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + hw->mac.ops.led_on = ixgbe_led_on_t_X550em; + hw->mac.ops.led_off = ixgbe_led_off_t_X550em; + break; + default: + break; } - esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); + return ret_val; } /** @@ -110,8 +99,8 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) * @value: pointer to receive value read * * Returns status code - */ -static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) + **/ +STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) { return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); } @@ -123,8 +112,8 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) * @value: value to write to register * * Returns status code - */ -static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) + **/ +STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) { return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); } @@ -136,14 +125,15 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) * @value: pointer to receive read value * * Returns status code - */ -static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) + **/ +STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) { s32 status; - status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); - if (status) - hw_err(hw, "port expander access failed with %d\n", status); + status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); return status; } @@ -154,15 +144,15 @@ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) * @value: value to write * * Returns status code - */ -static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) + **/ +STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) { s32 status; - status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, - value); - if (status) - hw_err(hw, "port expander access failed with %d\n", status); + status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); return status; } @@ -172,8 +162,8 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) * * This function assumes that the caller has acquired the proper semaphore. * Returns error code - */ -static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) + **/ +STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) { s32 status; u32 retry; @@ -182,106 +172,117 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) /* Trigger hard reset. */ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status) + if (status != IXGBE_SUCCESS) return status; reg |= IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status) + if (status != IXGBE_SUCCESS) return status; status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); - if (status) + if (status != IXGBE_SUCCESS) return status; reg &= ~IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); - if (status) + if (status != IXGBE_SUCCESS) return status; status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status) + if (status != IXGBE_SUCCESS) return status; reg &= ~IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status) + if (status != IXGBE_SUCCESS) return status; - usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100); + usec_delay(IXGBE_CS4227_RESET_HOLD); status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status) + if (status != IXGBE_SUCCESS) return status; reg |= IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status) + if (status != IXGBE_SUCCESS) return status; /* Wait for the reset to complete. */ - msleep(IXGBE_CS4227_RESET_DELAY); + msec_delay(IXGBE_CS4227_RESET_DELAY); for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, &value); - if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK) + if (status == IXGBE_SUCCESS && + value == IXGBE_CS4227_EEPROM_LOAD_OK) break; - msleep(IXGBE_CS4227_CHECK_DELAY); + msec_delay(IXGBE_CS4227_CHECK_DELAY); } if (retry == IXGBE_CS4227_RETRIES) { - hw_err(hw, "CS4227 reset did not complete\n"); + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "CS4227 reset did not complete."); return IXGBE_ERR_PHY; } status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); - if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { - hw_err(hw, "CS4227 EEPROM did not load successfully\n"); + if (status != IXGBE_SUCCESS || + !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "CS4227 EEPROM did not load successfully."); return IXGBE_ERR_PHY; } - return 0; + return IXGBE_SUCCESS; } /** * ixgbe_check_cs4227 - Check CS4227 and reset as needed * @hw: pointer to hardware structure - */ -static void ixgbe_check_cs4227(struct ixgbe_hw *hw) + **/ +STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw) { + s32 status = IXGBE_SUCCESS; u32 swfw_mask = hw->phy.phy_semaphore_mask; - s32 status; - u16 value; + u16 value = 0; u8 retry; for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_err(hw, "semaphore failed with %d\n", status); - msleep(IXGBE_CS4227_CHECK_DELAY); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + msec_delay(IXGBE_CS4227_CHECK_DELAY); continue; } /* Get status of reset flow. */ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); - if (!status && value == IXGBE_CS4227_RESET_COMPLETE) + + if (status == IXGBE_SUCCESS && + value == IXGBE_CS4227_RESET_COMPLETE) goto out; - if (status || value != IXGBE_CS4227_RESET_PENDING) + if (status != IXGBE_SUCCESS || + value != IXGBE_CS4227_RESET_PENDING) break; /* Reset is pending. Wait and check again. */ hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(IXGBE_CS4227_CHECK_DELAY); + msec_delay(IXGBE_CS4227_CHECK_DELAY); } + /* If still pending, assume other instance failed. */ if (retry == IXGBE_CS4227_RETRIES) { status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_err(hw, "semaphore failed with %d\n", status); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); return; } } /* Reset the CS4227. */ status = ixgbe_reset_cs4227(hw); - if (status) { - hw_err(hw, "CS4227 reset failed: %d", status); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "CS4227 reset failed: %d", status); goto out; } @@ -291,44 +292,156 @@ static void ixgbe_check_cs4227(struct ixgbe_hw *hw) ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, IXGBE_CS4227_RESET_PENDING); hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(10000, 12000); + msec_delay(10); status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_err(hw, "semaphore failed with %d", status); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); return; } /* Record completion for next time. */ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, - IXGBE_CS4227_RESET_COMPLETE); + IXGBE_CS4227_RESET_COMPLETE); out: hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(hw->eeprom.semaphore_delay); + msec_delay(hw->eeprom.semaphore_delay); } -/** ixgbe_identify_phy_x550em - Get PHY type based on device id - * @hw: pointer to hardware structure +/** + * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @dev_type: always unused + * @phy_data: Pointer to read data from PHY register + */ +STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr, + u32 dev_type, u16 *phy_data) +{ + u32 i, data, command; + UNREFERENCED_1PARAMETER(dev_type); + + /* Setup and write the read command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + + if (command & IXGBE_MSCA_MDI_COMMAND) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY read command did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Read operation is complete. Get the data from MSRWD */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)data; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @dev_type: always unused + * @phy_data: Data to write to the PHY register + */ +STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr, + u32 dev_type, u16 phy_data) +{ + u32 i, command; + UNREFERENCED_1PARAMETER(dev_type); + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the write command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + + if (command & IXGBE_MSCA_MDI_COMMAND) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure * - * Returns error code + * Returns error code */ -static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { + hw->mac.ops.set_lan_id(hw); + + ixgbe_read_mng_if_sel_x550em(hw); + switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ - hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); ixgbe_check_cs4227(hw); /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_A_SFP_N: return ixgbe_identify_module_generic(hw); + break; case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; break; @@ -341,15 +454,12 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) hw->phy.type = ixgbe_phy_x550em_kr; break; case IXGBE_DEV_ID_X550EM_A_10G_T: - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); case IXGBE_DEV_ID_X550EM_X_1G_T: hw->phy.type = ixgbe_phy_ext_1g_t; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: @@ -364,81 +474,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) default: break; } - return 0; -} - -static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) -{ - return IXGBE_NOT_IMPLEMENTED; -} - -static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) -{ - return IXGBE_NOT_IMPLEMENTED; -} - -/** - * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * - * Returns an error code on error. - **/ -static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val) -{ - return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); -} - -/** - * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * - * Returns an error code on error. - **/ -static s32 -ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val) -{ - return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); -} - -/** - * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * - * Returns an error code on error. - **/ -static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, - u8 addr, u16 reg, u16 val) -{ - return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); -} - -/** - * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * - * Returns an error code on error. - **/ -static s32 -ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, - u8 addr, u16 reg, u16 val) -{ - return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); + return IXGBE_SUCCESS; } /** @@ -456,7 +492,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, } hic; u16 retries = FW_PHY_ACT_RETRIES; s32 rc; - u32 i; + u16 i; do { memset(&hic, 0, sizeof(hic)); @@ -464,22 +500,23 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; hic.cmd.port_number = hw->bus.lan_id; - hic.cmd.activity_id = cpu_to_le16(activity); - for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) - hic.cmd.data[i] = cpu_to_be32((*data)[i]); + hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity); + for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) + hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]); - rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, + sizeof(hic.cmd), IXGBE_HI_COMMAND_TIMEOUT, true); - if (rc) + if (rc != IXGBE_SUCCESS) return rc; if (hic.rsp.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) { for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) - (*data)[i] = be32_to_cpu(hic.rsp.data[i]); - return 0; + (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]); + return IXGBE_SUCCESS; } - usleep_range(20, 30); + usec_delay(20); --retries; } while (retries > 0); @@ -512,36 +549,28 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) s32 rc; u16 i; - if (hw->phy.id) - return 0; - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); if (rc) return rc; hw->phy.speeds_supported = 0; phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; - for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { if (phy_speeds & ixgbe_fw_map[i].fw_speed) hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; } + if (!hw->phy.autoneg_advertised) + hw->phy.autoneg_advertised = hw->phy.speeds_supported; hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; - if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) + if (hw->phy.id == IXGBE_PHY_REVISION_MASK) return IXGBE_ERR_PHY_ADDR_INVALID; - - hw->phy.autoneg_advertised = hw->phy.speeds_supported; - hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; - return 0; + return IXGBE_SUCCESS; } -static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); /** * ixgbe_identify_phy_fw - Get PHY type based on firmware command * @hw: pointer to hardware structure @@ -550,26 +579,14 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, */ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) { - s32 rc; - u16 value=0; - if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; -#if 0 /* Try also to get PHY ID through MDIO by using C22 in read_reg op. - * By hilbert - */ - rc = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &value); - hw_err(hw, "####rc:%x, PHY ID-1:%x\n", rc, value); -#endif - hw->phy.type = ixgbe_phy_fw; -#if 0 /* We still need read/write ops later, don't NULL it. By hilbert */ - hw->phy.ops.read_reg = NULL; - hw->phy.ops.write_reg = NULL; -#endif + /*hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL;*/ return ixgbe_get_phy_id_fw(hw); } @@ -579,7 +596,7 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) * * Returns error code */ -static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) +s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) { u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; @@ -587,21 +604,193 @@ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); } -/** - * ixgbe_setup_fw_link - Setup firmware-controlled PHYs - * @hw: pointer to hardware structure - */ -static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) +STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) { - u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; - u16 i; - + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +STATIC s32 +ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +STATIC s32 +ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** +* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM"); + + /* Similar to X550 so start there. */ + ret_val = ixgbe_init_ops_X550(hw); + + /* Since this function eventually calls + * ixgbe_init_ops_540 by design, we are setting + * the pointers to NULL explicitly here to overwrite + * the values being set in the x540 function. + */ + /* Thermal sensor not supported in x550EM */ + mac->ops.get_thermal_sensor_data = NULL; + mac->ops.init_thermal_sensor_thresh = NULL; + mac->thermal_sensor_enabled = false; + + /* FCOE not supported in x550EM */ + mac->ops.get_san_mac_addr = NULL; + mac->ops.set_san_mac_addr = NULL; + mac->ops.get_wwn_prefix = NULL; + mac->ops.get_fcoe_boot_status = NULL; + + /* IPsec not supported in x550EM */ + mac->ops.disable_sec_rx_path = NULL; + mac->ops.enable_sec_rx_path = NULL; + + /* AUTOC register is not present in x550EM. */ + mac->ops.prot_autoc_read = NULL; + mac->ops.prot_autoc_write = NULL; + + /* X550EM bus type is internal*/ + hw->bus.type = ixgbe_bus_type_internal; + mac->ops.get_bus_info = ixgbe_get_bus_info_X550em; + + + mac->ops.get_media_type = ixgbe_get_media_type_X550em; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em; + mac->ops.reset_hw = ixgbe_reset_hw_X550em; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X550em; + + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) + mac->ops.setup_fc = ixgbe_setup_fc_generic; + else + mac->ops.setup_fc = ixgbe_setup_fc_X550em; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_X550em; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + mac->ops.setup_fc = NULL; + phy->ops.identify = ixgbe_identify_phy_fw; + phy->ops.set_phy_power = NULL; + phy->ops.get_firmware_version = NULL; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + mac->ops.setup_fc = NULL; + phy->ops.identify = ixgbe_identify_phy_x550em; + phy->ops.set_phy_power = NULL; + break; + default: + phy->ops.identify = ixgbe_identify_phy_x550em; + } + + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + + return ret_val; +} + +/** + * ixgbe_setup_fw_link - Setup firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + u16 i; + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) return 0; if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } @@ -622,7 +811,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) break; } - for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) setup[0] |= ixgbe_fw_map[i].fw_speed; } @@ -636,14 +825,14 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) return rc; if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) return IXGBE_ERR_OVERTEMP; - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs + * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs * @hw: pointer to hardware structure * - * Called at init time to set up flow control. + * Called at init time to set up flow control. */ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) { @@ -653,180 +842,544 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) return ixgbe_setup_fw_link(hw); } -/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params - * @hw: pointer to hardware structure +/** + * ixgbe_setup_eee_fw - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. - **/ -static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) + * Enable/disable EEE based on enable_eee flag. + * This function controls EEE for firmware-based PHY implementations. + */ +static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee) { - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - u32 eec; - u16 eeprom_size; + if (!!hw->phy.eee_speeds_advertised == enable_eee) + return IXGBE_SUCCESS; + if (enable_eee) + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + else + hw->phy.eee_speeds_advertised = 0; + return hw->phy.ops.setup_link(hw); +} - if (eeprom->type == ixgbe_eeprom_uninitialized) { - eeprom->semaphore_delay = 10; - eeprom->type = ixgbe_flash; +/** +* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM_a. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val; - eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = BIT(eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + DEBUGFUNC("ixgbe_init_ops_X550EM_a"); + + /* Start with generic X550EM init */ + ret_val = ixgbe_init_ops_X550EM(hw); + + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) { + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; + } else { + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a; + } + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a; + + switch (mac->ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + mac->ops.setup_fc = NULL; + mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; + break; + case ixgbe_media_type_backplane: + mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; + mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; + break; + default: + break; + } - hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; + mac->ops.setup_fc = ixgbe_fc_autoneg_fw; + mac->ops.setup_eee = ixgbe_setup_eee_fw; + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + break; + default: + break; } - return 0; + return ret_val; } /** - * ixgbe_iosf_wait - Wait for IOSF command completion - * @hw: pointer to hardware structure - * @ctrl: pointer to location to receive final IOSF control value - * - * Return: failing status on timeout - * - * Note: ctrl can be NULL if the IOSF control register value is not needed - */ -static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) +* ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM_x. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw) { - u32 i, command; + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_link_info *link = &hw->link; + s32 ret_val; - /* Check every 10 usec to see if the address cycle completed. - * The SB IOSF BUSY bit will clear when the operation is - * complete. - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); - if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) - break; - udelay(10); - } - if (ctrl) - *ctrl = command; - if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { - hw_dbg(hw, "IOSF wait timed out\n"); - return IXGBE_ERR_PHY; + DEBUGFUNC("ixgbe_init_ops_X550EM_x"); + + /* Start with generic X550EM init */ + ret_val = ixgbe_init_ops_X550EM(hw); + + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em; + link->ops.read_link = ixgbe_read_i2c_combined_generic; + link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked; + link->ops.write_link = ixgbe_write_i2c_combined_generic; + link->ops.write_link_unlocked = + ixgbe_write_i2c_combined_generic_unlocked; + link->addr = IXGBE_CS4227; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) { + mac->ops.setup_fc = NULL; + mac->ops.setup_eee = NULL; + mac->ops.init_led_link_act = NULL; } - return 0; + return ret_val; } -/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the - * IOSF device +/** + * ixgbe_dmac_config_X550 * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @phy_data: Pointer to read data from the register + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. **/ -static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *data) +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw) { - u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; - u32 command, error; - s32 ret; + u32 reg, high_pri_tc; - ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); - if (ret) - return ret; + DEBUGFUNC("ixgbe_dmac_config_X550"); - ret = ixgbe_iosf_wait(hw, NULL); - if (ret) + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + /* Disable DMA Coalescing if the watchdog timer is 0 */ + if (!hw->mac.dmac_config.watchdog_timer) goto out; - command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | - (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + ixgbe_dmac_config_tcs_X550(hw); - /* Write IOSF control register */ - IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + /* Configure DMA Coalescing Control Register */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); - ret = ixgbe_iosf_wait(hw, &command); + /* Set the watchdog timer in units of 40.96 usec */ + reg &= ~IXGBE_DMACR_DMACWT_MASK; + reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096; - if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { - error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> - IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; - hw_dbg(hw, "Failed to read, error %x\n", error); - return IXGBE_ERR_PHY; + reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK; + /* If fcoe is enabled, set high priority traffic class */ + if (hw->mac.dmac_config.fcoe_en) { + high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc; + reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) & + IXGBE_DMACR_HIGH_PRI_TC_MASK); } + reg |= IXGBE_DMACR_EN_MNG_IND; - if (!ret) - *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + /* Enable DMA coalescing after configuration */ + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); out: - hw->mac.ops.release_swfw_sync(hw, gssr); - return ret; + return IXGBE_SUCCESS; } /** - * ixgbe_get_phy_token - Get the token for shared PHY access - * @hw: Pointer to hardware structure - */ -static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) + * ixgbe_dmac_config_tcs_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC. The dmac enable bit must + * be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw) { - struct ixgbe_hic_phy_token_req token_cmd; - s32 status; + u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb; - token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; - token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; - token_cmd.hdr.cmd_or_resp.cmd_resv = 0; - token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - token_cmd.port_number = hw->bus.lan_id; - token_cmd.command_type = FW_PHY_TOKEN_REQ; - token_cmd.pad = 0; - status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (status) - return status; - if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) - return 0; - if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) - return IXGBE_ERR_FW_RESP_INVALID; + DEBUGFUNC("ixgbe_dmac_config_tcs_X550"); - return IXGBE_ERR_TOKEN_RETRY; + /* Configure DMA coalescing enabled */ + switch (hw->mac.dmac_config.link_speed) { + case IXGBE_LINK_SPEED_10_FULL: + case IXGBE_LINK_SPEED_100_FULL: + pb_headroom = IXGBE_DMACRXT_100M; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + pb_headroom = IXGBE_DMACRXT_1G; + break; + default: + pb_headroom = IXGBE_DMACRXT_10G; + break; + } + + maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >> + IXGBE_MHADD_MFS_SHIFT) / 1024); + + /* Set the per Rx packet buffer receive threshold */ + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { + reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc)); + reg &= ~IXGBE_DMCTH_DMACRXT_MASK; + + if (tc < hw->mac.dmac_config.num_tcs) { + /* Get Rx PB size */ + rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc)); + rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >> + IXGBE_RXPBSIZE_SHIFT; + + /* Calculate receive buffer threshold in kilobytes */ + if (rx_pb_size > pb_headroom) + rx_pb_size = rx_pb_size - pb_headroom; + else + rx_pb_size = 0; + + /* Minimum of MFS shall be set for DMCTH */ + reg |= (rx_pb_size > maxframe_size_kb) ? + rx_pb_size : maxframe_size_kb; + } + IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg); + } + return IXGBE_SUCCESS; } /** - * ixgbe_put_phy_token - Put the token for shared PHY access - * @hw: Pointer to hardware structure - */ -static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) + * ixgbe_dmac_update_tcs_X550 + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enables dmac. + **/ +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw) { - struct ixgbe_hic_phy_token_req token_cmd; - s32 status; + u32 reg; - token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; - token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; - token_cmd.hdr.cmd_or_resp.cmd_resv = 0; - token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - token_cmd.port_number = hw->bus.lan_id; - token_cmd.command_type = FW_PHY_TOKEN_REL; - token_cmd.pad = 0; - status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (status) - return status; - if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) - return 0; + DEBUGFUNC("ixgbe_dmac_update_tcs_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + ixgbe_dmac_config_tcs_X550(hw); + + /* Enable DMA coalescing after configuration */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_X550"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + +/** + * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; + u32 pfvfspoof; + + DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550"); + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_iosf_wait - Wait for IOSF command completion + * @hw: pointer to hardware structure + * @ctrl: pointer to location to receive final IOSF control value + * + * Returns failing status on timeout + * + * Note: ctrl can be NULL if the IOSF control register value is not needed + **/ +STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) +{ + u32 i, command = 0; + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + usec_delay(10); + } + if (ctrl) + *ctrl = command; + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n"); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register + * of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to write, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to read, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + + if (ret == IXGBE_SUCCESS) + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_get_phy_token - Get the token for shared phy access + * @hw: Pointer to hardware structure + */ + +s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REQ; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, + sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) { + DEBUGOUT1("Issuing host interface command failed with Status = %d\n", + status); + return status; + } + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return IXGBE_SUCCESS; + if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) { + DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n", + token_cmd.hdr.cmd_or_resp.ret_status); + return IXGBE_ERR_FW_RESP_INVALID; + } + + DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n"); + return IXGBE_ERR_TOKEN_RETRY; +} + +/** + * ixgbe_put_phy_token - Put the token for shared phy access + * @hw: Pointer to hardware structure + */ + +s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REL; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, + sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return IXGBE_SUCCESS; + + DEBUGOUT("Put PHY Token host interface command failed"); return IXGBE_ERR_FW_RESP_INVALID; } /** - * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register + * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register + * of the IOSF device * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Data to write to the register **/ -static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - __always_unused u32 device_type, - u32 data) +s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) { struct ixgbe_hic_internal_phy_req write_cmd; + s32 status; + UNREFERENCED_1PARAMETER(device_type); memset(&write_cmd, 0, sizeof(write_cmd)); write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; @@ -834,29 +1387,32 @@ static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; write_cmd.port_number = hw->bus.lan_id; write_cmd.command_type = FW_INT_PHY_REQ_WRITE; - write_cmd.address = cpu_to_be16(reg_addr); - write_cmd.write_data = cpu_to_be32(data); + write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr); + write_cmd.write_data = IXGBE_CPU_TO_BE32(data); + + status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + IXGBE_HI_COMMAND_TIMEOUT, false); - return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd), - IXGBE_HI_COMMAND_TIMEOUT, false); + return status; } /** - * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register + * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Pointer to read data from the register **/ -static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - __always_unused u32 device_type, - u32 *data) +s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) { union { struct ixgbe_hic_internal_phy_req cmd; struct ixgbe_hic_internal_phy_resp rsp; } hic; s32 status; + UNREFERENCED_1PARAMETER(device_type); memset(&hic, 0, sizeof(hic)); hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; @@ -864,2170 +1420,2408 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; hic.cmd.port_number = hw->bus.lan_id; hic.cmd.command_type = FW_INT_PHY_REQ_READ; - hic.cmd.address = cpu_to_be16(reg_addr); + hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr); - status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, + sizeof(hic.cmd), IXGBE_HI_COMMAND_TIMEOUT, true); /* Extract the register value from the response. */ - *data = be32_to_cpu(hic.rsp.read_data); + *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data); return status; } -/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif +/** + * ixgbe_disable_mdd_X550 * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM * - * Reads a 16 bit word(s) from the EEPROM using the hostif. + * Disable malicious driver detection **/ -static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw) { - const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; - struct ixgbe_hic_read_shadow_ram buffer; - u32 current_word = 0; - u16 words_to_read; - s32 status; - u32 i; + u32 reg; - /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, mask); - if (status) { - hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); - return status; - } + DEBUGFUNC("ixgbe_disable_mdd_X550"); - while (words) { - if (words > FW_MAX_READ_BUFFER_SIZE / 2) - words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; - else - words_to_read = words; + /* Disable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + /* Disable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} - /* convert offset from words to bytes */ - buffer.address = cpu_to_be32((offset + current_word) * 2); - buffer.length = cpu_to_be16(words_to_read * 2); - buffer.pad2 = 0; - buffer.pad3 = 0; +/** + * ixgbe_enable_mdd_X550 + * @hw: pointer to hardware structure + * + * Enable malicious driver detection + **/ +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw) +{ + u32 reg; - status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT); - if (status) { - hw_dbg(hw, "Host interface command failed\n"); - goto out; - } + DEBUGFUNC("ixgbe_enable_mdd_X550"); - for (i = 0; i < words_to_read; i++) { - u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + - 2 * i; - u32 value = IXGBE_READ_REG(hw, reg); - - data[current_word] = (u16)(value & 0xffff); - current_word++; - i++; - if (i < words_to_read) { - value >>= 16; - data[current_word] = (u16)(value & 0xffff); - current_word++; - } - } - words -= words_to_read; - } + /* Enable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); -out: - hw->mac.ops.release_swfw_sync(hw, mask); - return status; + /* Enable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); } -/** ixgbe_checksum_ptr_x550 - Checksum one pointer region +/** + * ixgbe_restore_mdd_vf_X550 * @hw: pointer to hardware structure - * @ptr: pointer offset in eeprom - * @size: size of section pointed by ptr, if 0 first word will be used as size - * @csum: address of checksum to update + * @vf: vf index * - * Returns error status for any failure + * Restore VF that was disabled during malicious driver detection event **/ -static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, - u16 size, u16 *csum, u16 *buffer, - u32 buffer_size) +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) { - u16 buf[256]; - s32 status; - u16 length, bufsz, i, start; - u16 *local_buffer; - - bufsz = sizeof(buf) / sizeof(buf[0]); - - /* Read a chunk at the pointer location */ - if (!buffer) { - status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); - if (status) { - hw_dbg(hw, "Failed to read EEPROM image\n"); - return status; - } - local_buffer = buf; - } else { - if (buffer_size < ptr) - return IXGBE_ERR_PARAM; - local_buffer = &buffer[ptr]; - } - - if (size) { - start = 0; - length = size; - } else { - start = 1; - length = local_buffer[0]; - - /* Skip pointer section if length is invalid. */ - if (length == 0xFFFF || length == 0 || - (ptr + length) >= hw->eeprom.word_size) - return 0; - } + u32 idx, reg, num_qs, start_q, bitmask; - if (buffer && ((u32)start + (u32)length > buffer_size)) - return IXGBE_ERR_PARAM; - - for (i = start; length; i++, length--) { - if (i == bufsz && !buffer) { - ptr += bufsz; - i = 0; - if (length < bufsz) - bufsz = length; + DEBUGFUNC("ixgbe_restore_mdd_vf_X550"); - /* Read a chunk at the pointer location */ - status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, - bufsz, buf); - if (status) { - hw_dbg(hw, "Failed to read EEPROM image\n"); - return status; - } - } - *csum += local_buffer[i]; + /* Map VF to queues */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + num_qs = 8; /* 16 VFs / pools */ + bitmask = 0x000000FF; + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + num_qs = 4; /* 32 VFs / pools */ + bitmask = 0x0000000F; + break; + default: /* 64 VFs / pools */ + num_qs = 2; + bitmask = 0x00000003; + break; } - return 0; + start_q = vf * num_qs; + + /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */ + idx = start_q / 32; + reg = 0; + reg |= (bitmask << (start_q % 32)); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg); } -/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum +/** + * ixgbe_mdd_event_X550 * @hw: pointer to hardware structure - * @buffer: pointer to buffer containing calculated checksum - * @buffer_size: size of buffer + * @vf_bitmap: vf bitmap of malicious vfs * - * Returns a negative error code on error, or the 16-bit checksum + * Handle malicious driver detection event. **/ -static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, - u32 buffer_size) +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap) { - u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; - u16 *local_buffer; - s32 status; - u16 checksum = 0; - u16 pointer, i, size; + u32 wqbr; + u32 i, j, reg, q, shift, vf, idx; - hw->eeprom.ops.init_params(hw); + DEBUGFUNC("ixgbe_mdd_event_X550"); - if (!buffer) { - /* Read pointer area */ - status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, - IXGBE_EEPROM_LAST_WORD + 1, - eeprom_ptrs); - if (status) { - hw_dbg(hw, "Failed to read EEPROM image\n"); - return status; - } - local_buffer = eeprom_ptrs; - } else { - if (buffer_size < IXGBE_EEPROM_LAST_WORD) - return IXGBE_ERR_PARAM; - local_buffer = buffer; + /* figure out pool size for mapping to vf's */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + shift = 3; /* 16 VFs / pools */ + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + shift = 2; /* 32 VFs / pools */ + break; + default: + shift = 1; /* 64 VFs / pools */ + break; } - /* For X550 hardware include 0x0-0x41 in the checksum, skip the - * checksum word itself - */ - for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) - if (i != IXGBE_EEPROM_CHECKSUM) - checksum += local_buffer[i]; + /* Read WQBR_TX and WQBR_RX and check for malicious queues */ + for (i = 0; i < 4; i++) { + wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)); + wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i)); - /* Include all data from pointers 0x3, 0x6-0xE. This excludes the - * FW, PHY module, and PCIe Expansion/Option ROM pointers. - */ - for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { - if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + if (!wqbr) continue; - pointer = local_buffer[i]; + /* Get malicious queue */ + for (j = 0; j < 32 && wqbr; j++) { - /* Skip pointer section if the pointer is invalid. */ - if (pointer == 0xFFFF || pointer == 0 || - pointer >= hw->eeprom.word_size) - continue; + if (!(wqbr & (1 << j))) + continue; - switch (i) { - case IXGBE_PCIE_GENERAL_PTR: - size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; - break; - case IXGBE_PCIE_CONFIG0_PTR: - case IXGBE_PCIE_CONFIG1_PTR: - size = IXGBE_PCIE_CONFIG_SIZE; - break; - default: - size = 0; - break; - } + /* Get queue from bitmask */ + q = j + (i * 32); - status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, - buffer, buffer_size); - if (status) - return status; + /* Map queue to vf */ + vf = (q >> shift); + + /* Set vf bit in vf_bitmap */ + idx = vf / 32; + vf_bitmap[idx] |= (1 << (vf % 32)); + wqbr &= ~(1 << j); + } } +} - checksum = (u16)IXGBE_EEPROM_SUM - checksum; +/** + * ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + */ +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; - return (s32)checksum; + DEBUGFUNC("ixgbe_get_media_type_X550em"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_XFI: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + media_type = ixgbe_media_type_copper; + break; + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + media_type = ixgbe_media_type_backplane; + hw->phy.type = ixgbe_phy_sgmii; + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; } -/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum +/** + * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported * @hw: pointer to hardware structure - * - * Returns a negative error code on error, or the 16-bit checksum - **/ -static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) + * @linear: true if SFP module is linear + */ +STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) { - return ixgbe_calc_checksum_X550(hw, NULL, 0); + DEBUGFUNC("ixgbe_supported_sfp_modules_X550em"); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + *linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + *linear = false; + break; + case ixgbe_sfp_type_unknown: + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return IXGBE_SUCCESS; } -/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command +/** + * ixgbe_identify_sfp_module_X550em - Identifies SFP modules * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM * - * Reads a 16 bit word from the EEPROM using the hostif. + * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ -static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw) { - const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; - struct ixgbe_hic_read_shadow_ram buffer; s32 status; + bool linear; - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + DEBUGFUNC("ixgbe_identify_sfp_module_X550em"); - /* convert offset from words to bytes */ - buffer.address = cpu_to_be32(offset * 2); - /* one word */ - buffer.length = cpu_to_be16(sizeof(u16)); + status = ixgbe_identify_module_generic(hw); - status = hw->mac.ops.acquire_swfw_sync(hw, mask); - if (status) + if (status != IXGBE_SUCCESS) return status; - status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT); - if (!status) { - *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, - FW_NVM_DATA_OFFSET); - } + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); - hw->mac.ops.release_swfw_sync(hw, mask); return status; } -/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum +/** + * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops * @hw: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. - **/ -static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, - u16 *checksum_val) + */ +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) { s32 status; - u16 checksum; - u16 read_checksum = 0; - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - if (status) { - hw_dbg(hw, "EEPROM read failed\n"); - return status; - } + bool linear; - status = hw->eeprom.ops.calc_checksum(hw); - if (status < 0) - return status; + DEBUGFUNC("ixgbe_setup_sfp_modules_X550em"); - checksum = (u16)(status & 0xffff); + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); - status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, - &read_checksum); - if (status) + if (status != IXGBE_SUCCESS) return status; - /* Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) { - status = IXGBE_ERR_EEPROM_CHECKSUM; - hw_dbg(hw, "Invalid EEPROM checksum"); - } - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; - return status; + return IXGBE_SUCCESS; } -/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the hostif. - **/ -static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - u16 data) +/** +* ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the +* internal PHY +* @hw: pointer to hardware structure +**/ +STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) { s32 status; - struct ixgbe_hic_write_shadow_ram buffer; + u32 link_ctrl; - buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + /* Restart auto-negotiation. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); - /* one word */ - buffer.length = cpu_to_be16(sizeof(u16)); - buffer.data = data; - buffer.address = cpu_to_be32(offset * 2); + if (status) { + DEBUGOUT("Auto-negotiation did not complete\n"); + return status; + } - status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - return status; -} + link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); -/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the hostif. - **/ -static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - s32 status = 0; + if (hw->mac.type == ixgbe_mac_X550EM_a) { + u32 flx_mask_st20; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { - status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - hw_dbg(hw, "write ee hostif failed to get semaphore"); - status = IXGBE_ERR_SWFW_SYNC; + /* Indicate to FW that AN restart has been asserted */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); + + if (status) { + DEBUGOUT("Auto-negotiation did not complete\n"); + return status; + } + + flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); } return status; } -/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device - * @hw: pointer to hardware structure - * - * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. - **/ -static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +/** + * ixgbe_setup_sgmii - Set up link for sgmii + * @hw: pointer to hardware structure + */ +STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) { - s32 status = 0; - union ixgbe_hic_hdr2 buffer; + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; - buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; - buffer.req.buf_lenh = 0; - buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; - buffer.req.checksum = FW_DEFAULT_CHECKSUM; + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; - status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - return status; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + rc = ixgbe_restart_an_internal_phy_x550em(hw); + if (rc) + return rc; + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); } /** - * ixgbe_get_bus_info_X550em - Set PCI bus info + * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs * @hw: pointer to hardware structure - * - * Sets bus link width and speed to unknown because X550em is - * not a PCI device. - **/ -static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) + */ +STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) { - hw->bus.type = ixgbe_bus_type_internal; - hw->bus.width = ixgbe_bus_width_unknown; - hw->bus.speed = ixgbe_bus_speed_unknown; + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; - hw->mac.ops.set_lan_id(hw); + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; - return 0; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + rc = ixgbe_restart_an_internal_phy_x550em(hw); + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); } -/** ixgbe_disable_rx_x550 - Disable RX unit - * - * Enables the Rx DMA unit for x550 - **/ -static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +/** + * ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + */ +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) { - u32 rxctrl, pfdtxgswc; - s32 status; - struct ixgbe_hic_disable_rxen fw_cmd; - - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (rxctrl & IXGBE_RXCTRL_RXEN) { - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); - if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { - pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); - hw->mac.set_lben = true; - } else { - hw->mac.set_lben = false; - } + struct ixgbe_mac_info *mac = &hw->mac; - fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; - fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; - fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - fw_cmd.port_number = hw->bus.lan_id; + DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); - status = ixgbe_host_interface_command(hw, &fw_cmd, - sizeof(struct ixgbe_hic_disable_rxen), - IXGBE_HI_COMMAND_TIMEOUT, true); + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.set_rate_select_speed = + ixgbe_set_soft_rate_select_speed; - /* If we fail - disable RX using register write */ - if (status) { - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (rxctrl & IXGBE_RXCTRL_RXEN) { - rxctrl &= ~IXGBE_RXCTRL_RXEN; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) || + (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP)) + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550a; + else + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550em; + break; + case ixgbe_media_type_copper: + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) + break; + if (hw->mac.type == ixgbe_mac_X550EM_a) { + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { + mac->ops.setup_link = ixgbe_setup_sgmii_fw; + mac->ops.check_link = + ixgbe_check_mac_link_generic; + } else { + mac->ops.setup_link = + ixgbe_setup_mac_link_t_X550em; } + } else { + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.check_link = ixgbe_check_link_t_X550em; } + break; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) + mac->ops.setup_link = ixgbe_setup_sgmii; + break; + default: + break; } } -/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash +/** + * ixgbe_get_link_capabilities_x550em - Determines link capabilities * @hw: pointer to hardware structure - * - * After writing EEPROM to shadow RAM using EEWR register, software calculates - * checksum and updates the EEPROM and instructs the hardware to update - * the flash. - **/ -static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + */ +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) { - s32 status; - u16 checksum = 0; + DEBUGFUNC("ixgbe_get_link_capabilities_X550em"); - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); - if (status) { - hw_dbg(hw, "EEPROM read failed\n"); - return status; + if (hw->phy.type == ixgbe_phy_fw) { + *autoneg = true; + *speed = hw->phy.speeds_supported; + return 0; } - status = ixgbe_calc_eeprom_checksum_X550(hw); - if (status < 0) - return status; + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { - checksum = (u16)(status & 0xffff); + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; - status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - if (status) - return status; + /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 + || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return IXGBE_SUCCESS; + } - status = ixgbe_update_flash_X550(hw); + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + switch (hw->phy.type) { + case ixgbe_phy_ext_1g_t: + case ixgbe_phy_sgmii: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case ixgbe_phy_x550em_kr: + if (hw->mac.type == ixgbe_mac_X550EM_a) { + /* check different backplane modes */ + if (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + break; + } else if (hw->device_id == + IXGBE_DEV_ID_X550EM_A_KR_L) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + /* fall through */ + default: + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + break; + } + *autoneg = true; + } - return status; + return IXGBE_SUCCESS; } -/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of words - * @data: word(s) write to the EEPROM +/** + * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause + * @hw: pointer to hardware structure + * @lsc: pointer to boolean flag which indicates whether external Base T + * PHY interrupt is lsc * + * Determime if external Base T PHY interrupt cause is high temperature + * failure alarm or link status change. * - * Write a 16 bit word(s) to the EEPROM using the hostif. - **/ -static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, - u16 *data) + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) { - s32 status = 0; - u32 i = 0; + u32 status; + u16 reg; - /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - if (status) { - hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); + *lsc = false; + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) return status; - } - for (i = 0; i < words; i++) { - status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, - data[i]); - if (status) { - hw_dbg(hw, "Eeprom buffered write failed\n"); - break; + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS || + !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + return status; + + /* Global alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* If high temperature failure, then return over temp error and exit */ + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { + /* power down the PHY in case the PHY FW didn't already */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* if device fault was due to high temp alarm handle and exit */ + if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { + /* power down the PHY in case the PHY FW didn't */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; } } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); - return status; + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* Indicate LSC */ + if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) + *lsc = true; + + return IXGBE_SUCCESS; } -/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the - * IOSF device +/** + * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts + * @hw: pointer to hardware structure * - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @data: Data to write to the register - **/ -static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 data) + * Enable link status change and temperature failure alarm for the external + * Base T PHY + * + * Returns PHY access status + */ +STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) { - u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; - u32 command, error; - s32 ret; + u32 status; + u16 reg; + bool lsc; - ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); - if (ret) - return ret; + /* Clear interrupt flags */ + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); - ret = ixgbe_iosf_wait(hw, NULL); - if (ret) - goto out; + /* Enable link status change alarm */ - command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | - (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + /* Enable the LASI interrupts on X552 devices to receive notifications + * of the link configurations of the external PHY and correspondingly + * support the configuration of the internal iXFI link, since iXFI does + * not support auto-negotiation. This is not required for X553 devices + * having KR support, which performs auto-negotiations and which is used + * as the internal link to the external PHY. Hence adding a check here + * to avoid enabling LASI interrupts for X553 devices. + */ + if (hw->mac.type != ixgbe_mac_X550EM_a) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); - /* Write IOSF control register */ - IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + if (status != IXGBE_SUCCESS) + return status; - /* Write IOSF data register */ - IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; - ret = ixgbe_iosf_wait(hw, &command); + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); - if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { - error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> - IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; - hw_dbg(hw, "Failed to write, error %x\n", error); - return IXGBE_ERR_PHY; + if (status != IXGBE_SUCCESS) + return status; } -out: - hw->mac.ops.release_swfw_sync(hw, gssr); - return ret; + /* Enable high temperature failure and global fault alarms */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + return status; } /** - * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration + * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. * @hw: pointer to hardware structure + * @speed: link speed * - * iXfI configuration needed for ixgbe_mac_X550EM_x devices. + * Configures the integrated KR PHY. **/ -static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) +STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed) { s32 status; u32 reg_val; - /* Disable training protocol FSM. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status) return status; - reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status) - return status; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); - /* Disable Flex from training TXFFE. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; + /* Advertise 10G support. */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status) - return status; + /* Advertise 1G support. */ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status) - return status; + if (hw->mac.type == ixgbe_mac_X550EM_a) { + /* Set lane mode to KR auto negotiation */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - /* Enable override for coefficients. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; + if (status) + return status; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - return status; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + } + + return ixgbe_restart_an_internal_phy_x550em(hw); } /** - * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the - * internal PHY - * @hw: pointer to hardware structure - **/ -static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) + * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) { - s32 status; - u32 link_ctrl; + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; - /* Restart auto-negotiation. */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return IXGBE_SUCCESS; - if (status) { - hw_dbg(hw, "Auto-negotiation did not complete\n"); - return status; - } + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); + if (rc) + return rc; + memset(store, 0, sizeof(store)); - link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); + if (rc) + return rc; - if (hw->mac.type == ixgbe_mac_x550em_a) { - u32 flx_mask_st20; + return ixgbe_setup_fw_link(hw); +} - /* Indicate to FW that AN restart has been asserted */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); +/** + * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp + * @hw: pointer to hardware structure + */ +static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; - if (status) { - hw_dbg(hw, "Auto-negotiation did not complete\n"); - return status; - } + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); + if (rc) + return rc; - flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); + if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { + ixgbe_shutdown_fw_phy(hw); + return IXGBE_ERR_OVERTEMP; } - - return status; + return IXGBE_SUCCESS; } -/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. +/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register * @hw: pointer to hardware structure - * @speed: the link speed to force * - * Configures the integrated KR PHY to use iXFI mode. Used to connect an - * internal and external PHY at a specific speed, without autonegotiation. + * Read NW_MNG_IF_SEL register and save field values, and check for valid field + * values. **/ -static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) { - struct ixgbe_mac_info *mac = &hw->mac; - s32 status; - u32 reg_val; + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - /* iXFI is only supported with X552 */ - if (mac->type != ixgbe_mac_X550EM_x) - return IXGBE_ERR_LINK_SETUP; + /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set + * PHY address. This register field was has only been used for X552. + */ + if (hw->mac.type == ixgbe_mac_X550EM_a && + hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { + hw->phy.addr = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + } - /* Disable AN and force speed to 10G Serial. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + hw->phy.addr = (hw->bus.lan_id == 0) ? (1) : (0); + } - reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + return IXGBE_SUCCESS; +} - /* Select forced link speed for internal PHY. */ - switch (*speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; +/** + * ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + */ +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_phy_ops_X550em"); + + hw->mac.ops.set_lan_id(hw); + ixgbe_read_mng_if_sel_x550em(hw); + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22; + phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22; + hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; + hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; + phy->ops.check_overtemp = ixgbe_check_overtemp_fw; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + + break; + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; + hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + break; + default: + break; + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED || + ret_val == IXGBE_ERR_PHY_ADDR_INVALID) + return ret_val; + + /* Setup function pointers based on detected hardware */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_ext_1g_t: + /* link is managed by FW */ + phy->ops.setup_link = NULL; + phy->ops.reset = NULL; + break; + case ixgbe_phy_x550em_xfi: + /* link is managed by HW */ + phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + /* If internal link mode is XFI, then setup iXFI internal link, + * else setup KR now. + */ + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; + + /* setup SW LPLU only for first revision of X550EM_x */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + !(IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; + + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; break; - case IXGBE_LINK_SPEED_1GB_FULL: - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + case ixgbe_phy_sgmii: + phy->ops.setup_link = NULL; + break; + case ixgbe_phy_fw: + phy->ops.setup_link = ixgbe_setup_fw_link; + phy->ops.reset = ixgbe_reset_phy_fw; break; default: - /* Other link speeds are not supported by internal KR PHY. */ - return IXGBE_ERR_LINK_SETUP; - } - - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status) - return status; - - /* Additional configuration needed for x550em_x */ - if (hw->mac.type == ixgbe_mac_X550EM_x) { - status = ixgbe_setup_ixfi_x550em_x(hw); - if (status) - return status; + break; } - - /* Toggle port SW reset by AN reset. */ - status = ixgbe_restart_an_internal_phy_x550em(hw); - - return status; + return ret_val; } /** - * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported + * ixgbe_set_mdio_speed - Set MDIO clock speed * @hw: pointer to hardware structure - * @linear: true if SFP module is linear */ -static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) { - switch (hw->phy.sfp_type) { - case ixgbe_sfp_type_not_present: - return IXGBE_ERR_SFP_NOT_PRESENT; - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - *linear = true; + u32 hlreg0; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_QSFP: + /* Config MDIO clock speed before the first MDIO PHY access */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - case ixgbe_sfp_type_da_act_lmt_core0: - case ixgbe_sfp_type_da_act_lmt_core1: - case ixgbe_sfp_type_1g_sx_core0: - case ixgbe_sfp_type_1g_sx_core1: - case ixgbe_sfp_type_1g_lx_core0: - case ixgbe_sfp_type_1g_lx_core1: - *linear = false; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + /* Select fast MDIO clock speed for these devices */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; - case ixgbe_sfp_type_unknown: - case ixgbe_sfp_type_1g_cu_core0: - case ixgbe_sfp_type_1g_cu_core1: default: - return IXGBE_ERR_SFP_NOT_SUPPORTED; + break; } - - return 0; } /** - * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP. + * ixgbe_reset_hw_X550em - Perform hardware reset * @hw: pointer to hardware structure * - * Configures the extern PHY and the integrated KR PHY for SFP support. + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. */ -static s32 -ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) { + ixgbe_link_speed link_speed; s32 status; - u16 reg_slice, reg_val; - bool setup_linear = false; + u32 ctrl = 0; + u32 i; + bool link_up = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; - /* Check if SFP module is supported and linear */ - status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + DEBUGFUNC("ixgbe_reset_hw_X550em"); - /* If no SFP module present, then return success. Return success since - * there is no reason to configure CS4227 and SFP not present error is - * not accepted in the setup MAC link flow. - */ - if (status == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) { + DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status); + return status; + } + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + ixgbe_set_mdio_speed(hw); + + /* PHY ops must be identified and initialized prior to reset */ + status = hw->phy.ops.init(hw); if (status) + DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n", + status); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || + status == IXGBE_ERR_PHY_ADDR_INVALID) { + DEBUGOUT("Returning from reset HW due to PHY init failure\n"); return status; + } - /* Configure internal PHY for KR/KX. */ - ixgbe_setup_kr_speed_x550em(hw, speed); + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) { + DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n", + status); + return status; + } + } - /* Configure CS4227 LINE side to proper mode. */ - reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); - if (setup_linear) - reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; - else - reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } - status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, - reg_val); + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; - return status; -} + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) { + if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP) + return IXGBE_ERR_OVERTEMP; + } -/** - * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode - * @hw: pointer to hardware structure - * @speed: the link speed to force - * - * Configures the integrated PHY for native SFI mode. Used to connect the - * internal PHY directly to an SFP cage, without autonegotiation. - **/ -static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -{ - struct ixgbe_mac_info *mac = &hw->mac; - s32 status; - u32 reg_val; +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } - /* Disable all AN and force speed to 10G Serial. */ - status = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } - /* Select forced link speed for internal PHY. */ - switch (*speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; - break; - default: - /* Other link speeds are not supported by internal PHY. */ - return IXGBE_ERR_LINK_SETUP; + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); } - status = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + msec_delay(50); - /* Toggle port SW reset by AN reset. */ - status = ixgbe_restart_an_internal_phy_x550em(hw); + /* Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + ixgbe_set_mdio_speed(hw); + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) + ixgbe_setup_mux_ctl(hw); + + if (status != IXGBE_SUCCESS) + DEBUGOUT1("Reset HW failed, STATUS = %d\n", status); return status; } /** - * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP + * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. * @hw: pointer to hardware structure - * - * Configure the the integrated PHY for native SFP support. */ -static s32 -ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) { - bool setup_linear = false; - u32 reg_phy_int; - s32 ret_val; + u32 status; + u16 reg; - /* Check if SFP module is supported and linear */ - ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); - /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. + if (status != IXGBE_SUCCESS) + return status; + + /* If PHY FW reset completed bit is set then this is the first + * SW instance after a power on so the PHY FW must be un-stalled. */ - if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); - if (ret_val) - return ret_val; + if (status != IXGBE_SUCCESS) + return status; - /* Configure internal PHY for native SFI based on module type */ - ret_val = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); - if (ret_val) - return ret_val; + reg &= ~IXGBE_MDIO_POWER_UP_STALL; - reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; - if (!setup_linear) - reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); - ret_val = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); - if (ret_val) - return ret_val; + if (status != IXGBE_SUCCESS) + return status; + } - /* Setup SFI internal link. */ - return ixgbe_setup_sfi_x550a(hw, &speed); + return status; } /** - * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP - * @hw: pointer to hardware structure + * ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + /* leave link alone for 2.5G */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + return IXGBE_SUCCESS; + + if (ixgbe_check_reset_blocked(hw)) + return 0; + + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); +} + +/** + * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP + * @hw: pointer to hardware structure * - * Configure the the integrated PHY for SFP support. - */ -static s32 -ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) + * Configure the external PHY and the integrated KR PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - u32 reg_slice, slice_offset; - bool setup_linear = false; - u16 reg_phy_ext; s32 ret_val; + u16 reg_slice, reg_val; + bool setup_linear = false; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); /* Check if SFP module is supported and linear */ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. + * there is no reason to configure CS4227 and SFP not present error is + * not excepted in the setup MAC link flow. */ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return 0; + return IXGBE_SUCCESS; - if (ret_val) + if (ret_val != IXGBE_SUCCESS) return ret_val; /* Configure internal PHY for KR/KX. */ ixgbe_setup_kr_speed_x550em(hw, speed); - if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) - return IXGBE_ERR_PHY_ADDR_INVALID; - - /* Get external PHY SKU id */ - ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - if (ret_val) - return ret_val; - - /* When configuring quad port CS4223, the MAC instance is part - * of the slice offset. - */ - if (reg_phy_ext == IXGBE_CS4223_SKU_ID) - slice_offset = (hw->bus.lan_id + - (hw->bus.instance_id << 1)) << 12; - else - slice_offset = hw->bus.lan_id << 12; - - /* Configure CS4227/CS4223 LINE side to proper mode. */ - reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; - - ret_val = hw->phy.ops.read_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - if (ret_val) - return ret_val; - - reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | - (IXGBE_CS4227_EDC_MODE_SR << 1)); - + /* Configure CS4227 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + + (hw->bus.lan_id << 12); if (setup_linear) - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; else - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; - - ret_val = hw->phy.ops.write_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); - if (ret_val) - return ret_val; - - /* Flush previous write with a read */ - return hw->phy.ops.read_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + return ret_val; } /** - * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Setup internal/external PHY link speed based on link speed, then set - * external PHY auto advertised link speed. + * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode + * @hw: pointer to hardware structure + * @speed: the link speed to force * - * Returns error status for any failure + * Configures the integrated PHY for native SFI mode. Used to connect the + * internal PHY directly to an SFP cage, without autonegotiation. **/ -static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait) +STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { + struct ixgbe_mac_info *mac = &hw->mac; s32 status; - ixgbe_link_speed force_speed; - - /* Setup internal/external PHY link speed to iXFI (10G), unless - * only 1G is auto advertised then setup KX link. - */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - force_speed = IXGBE_LINK_SPEED_10GB_FULL; - else - force_speed = IXGBE_LINK_SPEED_1GB_FULL; - - /* If X552 and internal link mode is XFI, then setup XFI internal link. - */ - if (hw->mac.type == ixgbe_mac_X550EM_x && - !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - status = ixgbe_setup_ixfi_x550em(hw, &force_speed); - - if (status) - return status; - } - - return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); -} - -/** ixgbe_check_link_t_X550em - Determine link and speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true when link is up - * @link_up_wait_to_complete: bool used to wait for link up or not - * - * Check that both the MAC and X557 external PHY have link. - **/ -static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, - bool link_up_wait_to_complete) -{ - u32 status; - u16 i, autoneg_status; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; - - status = ixgbe_check_mac_link_generic(hw, speed, link_up, - link_up_wait_to_complete); + u32 reg_val; - /* If check link fails or MAC link is not up, then return */ - if (status || !(*link_up)) + /* Disable all AN and force speed to 10G Serial. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) return status; - /* MAC link is up, so check external PHY link. - * Link status is latching low, and can only be used to detect link - * drop, and not the current status of the link without performing - * back-to-back reads. - */ - for (i = 0; i < 2; i++) { - status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; - if (status) - return status; + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal PHY. */ + return IXGBE_ERR_LINK_SETUP; } - /* If external PHY link is not up, then indicate link not up */ - if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) - *link_up = false; - - return 0; -} - -/** - * ixgbe_setup_sgmii - Set up link for sgmii - * @hw: pointer to hardware structure - */ -static s32 -ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, - __always_unused bool autoneg_wait_to_complete) -{ - struct ixgbe_mac_info *mac = &hw->mac; - u32 lval, sval, flx_val; - s32 rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); - if (rc) - return rc; - - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, lval); - if (rc) - return rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); - if (rc) - return rc; - - sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; - sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, sval); - if (rc) - return rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); - if (rc) - return rc; - - rc = mac->ops.read_iosf_sb_reg(hw, + status = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); - if (rc) - return rc; - - flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; - flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); - if (rc) - return rc; + /* Toggle port SW reset by AN reset. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); - rc = ixgbe_restart_an_internal_phy_x550em(hw); - return rc; + return status; } /** - * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs - * @hw: pointer to hardware structure - */ -static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait) + * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - struct ixgbe_mac_info *mac = &hw->mac; - u32 lval, sval, flx_val; - s32 rc; + s32 ret_val; + u16 reg_phy_ext; + bool setup_linear = false; + u32 reg_slice, reg_phy_int, slice_offset; - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); - if (rc) - return rc; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, lval); - if (rc) - return rc; + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); - if (rc) - return rc; + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return IXGBE_SUCCESS; - sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; - sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, sval); - if (rc) - return rc; + if (ret_val != IXGBE_SUCCESS) + return ret_val; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, lval); - if (rc) - return rc; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { + /* Configure internal PHY for native SFI based on module type */ + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); - if (rc) - return rc; + if (ret_val != IXGBE_SUCCESS) + return ret_val; - flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; - flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; + if (!setup_linear) + reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); - if (rc) - return rc; + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); - ixgbe_restart_an_internal_phy_x550em(hw); + if (ret_val != IXGBE_SUCCESS) + return ret_val; - return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); -} + /* Setup SFI internal link. */ + ret_val = ixgbe_setup_sfi_x550a(hw, &speed); + } else { + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); -/** - * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure - * - * Enable flow control according to IEEE clause 37. - */ -static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; - u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; - ixgbe_link_speed speed; - bool link_up; + if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) { + /* Find Address */ + DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n"); + return IXGBE_ERR_PHY_ADDR_INVALID; + } - /* AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - */ - if (hw->fc.disable_fc_autoneg) - goto out; + /* Get external PHY SKU id */ + ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ + if (reg_phy_ext == IXGBE_CS4223_SKU_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else + slice_offset = hw->bus.lan_id << 12; - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) - goto out; + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; - /* Check if auto-negotiation has completed */ - status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); - if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { - status = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } + ret_val = hw->phy.ops.read_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - /* Negotiate the flow control */ - status = ixgbe_negotiate_fc(hw, info[0], info[0], - FW_PHY_ACT_GET_LINK_INFO_FC_RX, - FW_PHY_ACT_GET_LINK_INFO_FC_TX, - FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, - FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); + if (ret_val != IXGBE_SUCCESS) + return ret_val; -out: - if (!status) { - hw->fc.fc_was_autonegged = true; - } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; + reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | + (IXGBE_CS4227_EDC_MODE_SR << 1)); + + if (setup_linear) + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->phy.ops.write_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); + + /* Flush previous write with a read */ + ret_val = hw->phy.ops.read_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); } + return ret_val; } -/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers +/** + * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration * @hw: pointer to hardware structure + * + * iXfI configuration needed for ixgbe_mac_X550EM_x devices. **/ -static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) +STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; - switch (mac->ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - mac->ops.setup_fc = NULL; - mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; - break; - case ixgbe_media_type_copper: - if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && - hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { - mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; - break; - } - mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; - mac->ops.setup_fc = ixgbe_fc_autoneg_fw; - mac->ops.setup_link = ixgbe_setup_sgmii_fw; - mac->ops.check_link = ixgbe_check_mac_link_generic; - break; - case ixgbe_media_type_backplane: - mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; - mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; - break; - default: - break; - } + /* Disable training protocol FSM. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Disable Flex from training TXFFE. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Enable override for coefficients. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + return status; } -/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers +/** + * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. **/ -static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; - mac->ops.setup_fc = ixgbe_setup_fc_x550em; + /* iXFI is only supported with X552 */ + if (mac->type != ixgbe_mac_X550EM_x) + return IXGBE_ERR_LINK_SETUP; - switch (mac->ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - /* CS4227 does not support autoneg, so disable the laser control - * functions for SFP+ fiber - */ - mac->ops.disable_tx_laser = NULL; - mac->ops.enable_tx_laser = NULL; - mac->ops.flap_tx_laser = NULL; - mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_SFP_N: - mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; - break; - case IXGBE_DEV_ID_X550EM_A_SFP: - mac->ops.setup_mac_link = - ixgbe_setup_mac_link_sfp_x550a; - break; - default: - mac->ops.setup_mac_link = - ixgbe_setup_mac_link_sfp_x550em; - break; - } - mac->ops.set_rate_select_speed = - ixgbe_set_soft_rate_select_speed; - break; - case ixgbe_media_type_copper: - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) - break; - mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; - mac->ops.setup_fc = ixgbe_setup_fc_generic; - mac->ops.check_link = ixgbe_check_link_t_X550em; + /* Disable AN and force speed to 10G Serial. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; break; - case ixgbe_media_type_backplane: - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || - hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) - mac->ops.setup_link = ixgbe_setup_sgmii; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; break; default: - break; + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Additional configuration needed for x550em_x */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + status = ixgbe_setup_ixfi_x550em_x(hw); + if (status != IXGBE_SUCCESS) + return status; } - /* Additional modification for X550em_a devices */ - if (hw->mac.type == ixgbe_mac_x550em_a) - ixgbe_init_mac_link_ops_X550em_a(hw); + /* Toggle port SW reset by AN reset. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; } -/** ixgbe_setup_sfp_modules_X550em - Setup SFP module - * @hw: pointer to hardware structure +/** + * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status + * @hw: address of hardware structure + * @link_up: address of boolean to indicate link status + * + * Returns error code if unable to get link status. */ -static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) { - s32 status; - bool linear; + u32 ret; + u16 autoneg_status; - /* Check if SFP module is supported */ - status = ixgbe_supported_sfp_modules_X550em(hw, &linear); - if (status) - return status; + *link_up = false; - ixgbe_init_mac_link_ops_X550em(hw); - hw->phy.ops.reset = NULL; + /* read this twice back to back to indicate current status */ + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); - return 0; + return IXGBE_SUCCESS; } -/** ixgbe_get_link_capabilities_x550em - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: true when autoneg or autotry is enabled - **/ -static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) +/** + * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link + * @hw: point to hardware structure + * + * Configures the link between the integrated KR PHY and the external X557 PHY + * The driver will call this function when it gets a link status change + * interrupt from the X557 PHY. This function configures the link speed + * between the PHYs to match the link speed of the BASE-T link. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + */ +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) { - if (hw->phy.type == ixgbe_phy_fw) { - *autoneg = true; - *speed = hw->phy.speeds_supported; - return 0; - } + ixgbe_link_speed force_speed; + bool link_up; + u32 status; + u16 speed; - /* SFP */ - if (hw->phy.media_type == ixgbe_media_type_fiber) { - /* CS4227 SFP must not enable auto-negotiation */ - *autoneg = false; + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; - if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { - *speed = IXGBE_LINK_SPEED_1GB_FULL; - return 0; - } + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* If link is down, there is no setup necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; - /* Link capabilities are based on SFP */ - if (hw->phy.multispeed_fiber) - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - else - *speed = IXGBE_LINK_SPEED_10GB_FULL; - } else { - switch (hw->phy.type) { - case ixgbe_phy_x550em_kx4: - *speed = IXGBE_LINK_SPEED_1GB_FULL | - IXGBE_LINK_SPEED_2_5GB_FULL | - IXGBE_LINK_SPEED_10GB_FULL; - break; - case ixgbe_phy_x550em_xfi: - *speed = IXGBE_LINK_SPEED_1GB_FULL | - IXGBE_LINK_SPEED_10GB_FULL; - break; - case ixgbe_phy_ext_1g_t: - case ixgbe_phy_sgmii: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case ixgbe_phy_x550em_kr: - if (hw->mac.type == ixgbe_mac_x550em_a) { - /* check different backplane modes */ - if (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { - *speed = IXGBE_LINK_SPEED_2_5GB_FULL; - break; - } else if (hw->device_id == - IXGBE_DEV_ID_X550EM_A_KR_L) { - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - } - } - /* fall through */ - default: - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; + if (!link_up) + return IXGBE_SUCCESS; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + if (status != IXGBE_SUCCESS) + return status; + + /* If link is still down - no setup is required so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + if (!link_up) + return IXGBE_SUCCESS; + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; } - *autoneg = true; + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); + } else { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); } - return 0; } /** - * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause - * @hw: pointer to hardware structure - * @lsc: pointer to boolean flag which indicates whether external Base T - * PHY interrupt is lsc - * - * Determime if external Base T PHY interrupt cause is high temperature - * failure alarm or link status change. + * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback. + * @hw: pointer to hardware structure * - * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature - * failure alarm, else return PHY access status. + * Configures the integrated KR PHY to use internal loopback mode. **/ -static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) { - u32 status; - u16 reg; - - *lsc = false; - - /* Vendor alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, - MDIO_MMD_VEND1, - ®); + s32 status; + u32 reg_val; - if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) + /* Disable AN and force speed to 10G Serial. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) return status; - - /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, - MDIO_MMD_VEND1, - ®); - - if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | - IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) return status; - /* Global alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, - MDIO_MMD_VEND1, - ®); + /* Set near-end loopback clocks. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; - if (status) + /* Set loopback enable. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) return status; - /* If high temperature failure, then return over temp error and exit */ - if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { - /* power down the PHY in case the PHY FW didn't already */ - ixgbe_set_copper_phy_power(hw, false); - return IXGBE_ERR_OVERTEMP; - } - if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { - /* device fault alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, - MDIO_MMD_VEND1, - ®); - if (status) - return status; + /* Training bypass. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - /* if device fault was due to high temp alarm handle and exit */ - if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { - /* power down the PHY in case the PHY FW didn't */ - ixgbe_set_copper_phy_power(hw, false); - return IXGBE_ERR_OVERTEMP; - } - } + return status; +} - /* Vendor alarm 2 triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, - MDIO_MMD_AN, ®); +/** + * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + s32 status; - if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) - return status; + DEBUGFUNC("ixgbe_read_ee_hostif_X550"); + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - /* link connect/disconnect event occurred */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, - MDIO_MMD_AN, ®); + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) return status; - /* Indicate LSC */ - if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) - *lsc = true; + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + if (!status) { + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + } - return 0; + hw->mac.ops.release_swfw_sync(hw, mask); + return status; } /** - * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts - * @hw: pointer to hardware structure - * - * Enable link status change and temperature failure alarm for the external - * Base T PHY + * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM * - * Returns PHY access status + * Reads a 16 bit word(s) from the EEPROM using the hostif. **/ -static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) { - u32 status; - u16 reg; - bool lsc; + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; - /* Clear interrupt flags */ - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550"); - /* Enable link status change alarm */ + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); + return status; + } - /* Enable the LASI interrupts on X552 devices to receive notifications - * of the link configurations of the external PHY and correspondingly - * support the configuration of the internal iXFI link, since iXFI does - * not support auto-negotiation. This is not required for X553 devices - * having KR support, which performs auto-negotiations and which is used - * as the internal link to the external PHY. Hence adding a check here - * to avoid enabling LASI interrupts for X553 devices. - */ - if (hw->mac.type != ixgbe_mac_x550em_a) { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, ®); - if (status) - return status; + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; - reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, reg); - if (status) - return status; - } + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2); - /* Enable high temperature failure and global fault alarms */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, - MDIO_MMD_VEND1, - ®); - if (status) - return status; + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); - reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | - IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, - MDIO_MMD_VEND1, - reg); - if (status) - return status; + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); - /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, - MDIO_MMD_VEND1, - ®); - if (status) - return status; + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } - reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | - IXGBE_MDIO_GLOBAL_ALARM_1_INT); +out: + hw->mac.ops.release_swfw_sync(hw, mask); + return status; +} - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, - MDIO_MMD_VEND1, - reg); - if (status) - return status; +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; - /* Enable chip-wide vendor alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, - MDIO_MMD_VEND1, - ®); - if (status) - return status; + DEBUGFUNC("ixgbe_write_ee_hostif_data_X550"); - reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, - MDIO_MMD_VEND1, - reg); + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); return status; } /** - * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt - * @hw: pointer to hardware structure - * - * Handle external Base T PHY interrupt. If high temperature - * failure alarm then return error, else if link status change - * then setup internal/external PHY link + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM * - * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature - * failure alarm, else return PHY access status. + * Write a 16 bit word to the EEPROM using the hostif. **/ -static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) { - struct ixgbe_phy_info *phy = &hw->phy; - bool lsc; - u32 status; + s32 status = IXGBE_SUCCESS; - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); - if (status) - return status; + DEBUGFUNC("ixgbe_write_ee_hostif_X550"); - if (lsc && phy->ops.setup_internal_link) - return phy->ops.setup_internal_link(hw); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } - return 0; + return status; } /** - * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. - * @hw: pointer to hardware structure - * @speed: link speed + * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM * - * Configures the integrated KR PHY. + * Write a 16 bit word(s) to the EEPROM using the hostif. **/ -static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed) +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) { - s32 status; - u32 reg_val; - - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; - - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); - - /* Advertise 10G support. */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; - - /* Advertise 1G support. */ - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; - - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + s32 status = IXGBE_SUCCESS; + u32 i = 0; - if (hw->mac.type == ixgbe_mac_x550em_a) { - /* Set lane mode to KR auto negotiation */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550"); - if (status) - return status; + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + goto out; + } - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; + } } - return ixgbe_restart_an_internal_phy_x550em(hw); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +out: + + return status; } /** - * ixgbe_setup_kr_x550em - Configure the KR PHY + * ixgbe_checksum_ptr_x550 - Checksum one pointer region * @hw: pointer to hardware structure - **/ -static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * + * Returns error status for any failure + */ +STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) { - /* leave link alone for 2.5G */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) - return 0; - - if (ixgbe_check_reset_blocked(hw)) - return 0; + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; - return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); -} + bufsz = sizeof(buf) / sizeof(buf[0]); -/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status - * @hw: address of hardware structure - * @link_up: address of boolean to indicate link status - * - * Returns error code if unable to get link status. - **/ -static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) -{ - u32 ret; - u16 autoneg_status; + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } - *link_up = false; + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; - /* read this twice back to back to indicate current status */ - ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - if (ret) - return ret; + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return IXGBE_SUCCESS; + } - ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_status); - if (ret) - return ret; + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; - *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; - return 0; + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return IXGBE_SUCCESS; } -/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link - * @hw: point to hardware structure - * - * Configures the link between the integrated KR PHY and the external X557 PHY - * The driver will call this function when it gets a link status change - * interrupt from the X557 PHY. This function configures the link speed - * between the PHYs to match the link speed of the BASE-T link. +/** + * ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer * - * A return of a non-zero value indicates an error, and the base driver should - * not report link up. + * Returns a negative error code on error, or the 16-bit checksum **/ -static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) { - ixgbe_link_speed force_speed; - bool link_up; - u32 status; - u16 speed; + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550"); - if (!(hw->mac.type == ixgbe_mac_X550EM_x && - !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - return ixgbe_setup_kr_speed_x550em(hw, speed); - } + hw->eeprom.ops.init_params(hw); - /* If link is not up, then there is no setup necessary so return */ - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status) - return status; + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } - if (!link_up) - return 0; + /* + * For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, - MDIO_MMD_AN, - &speed); - if (status) - return status; + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; - /* If link is not still up, then no setup is necessary so return */ - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status) - return status; + pointer = local_buffer[i]; - if (!link_up) - return 0; + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; - /* clear everything but the speed and duplex bits */ - speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } - switch (speed) { - case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: - force_speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: - force_speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - default: - /* Internal PHY does not support anything else */ - return IXGBE_ERR_INVALID_LINK_SETTINGS; + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; } - return ixgbe_setup_ixfi_x550em(hw, &force_speed); + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); } -/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI +/** + * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. **/ -static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) { s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; - status = ixgbe_reset_phy_generic(hw); + checksum = (u16)(status & 0xffff); + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); if (status) return status; - /* Configure Link Status Alarm and Temperature Threshold interrupts */ - return ixgbe_enable_lasi_ext_t_x550em(hw); -} - -/** - * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs. - * @hw: pointer to hardware structure - * @led_idx: led number to turn on - **/ -static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) -{ - u16 phy_data; - - if (led_idx >= IXGBE_X557_MAX_LED_INDEX) - return IXGBE_ERR_PARAM; + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); + } - /* To turn on the LED, set mode to ON. */ - hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - MDIO_MMD_VEND1, &phy_data); - phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; - hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - MDIO_MMD_VEND1, phy_data); + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; - return 0; + return status; } /** - * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs. - * @hw: pointer to hardware structure - * @led_idx: led number to turn off + * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. **/ -static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) { - u16 phy_data; - - if (led_idx >= IXGBE_X557_MAX_LED_INDEX) - return IXGBE_ERR_PARAM; - - /* To turn on the LED, set mode to ON. */ - hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - MDIO_MMD_VEND1, &phy_data); - phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; - hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - MDIO_MMD_VEND1, phy_data); + s32 status; + u16 checksum = 0; - return 0; -} + DEBUGFUNC("ixgbe_update_eeprom_checksum_X550"); -/** - * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware - * @hw: pointer to the HW structure - * @maj: driver version major number - * @min: driver version minor number - * @build: driver version build number - * @sub: driver version sub build number - * @len: length of driver_ver string - * @driver_ver: driver string - * - * Sends driver version number to firmware through the manageability - * block. On success return 0 - * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - **/ -static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub, u16 len, - const char *driver_ver) -{ - struct ixgbe_hic_drv_info2 fw_cmd; - s32 ret_val; - int i; + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } - if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) - return IXGBE_ERR_INVALID_ARGUMENT; + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; - fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; - fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = (u8)hw->bus.func; - fw_cmd.ver_maj = maj; - fw_cmd.ver_min = min; - fw_cmd.ver_build = build; - fw_cmd.ver_sub = sub; - fw_cmd.hdr.checksum = 0; - memcpy(fw_cmd.driver_string, driver_ver, len); - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + checksum = (u16)(status & 0xffff); - for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(fw_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (ret_val) - continue; + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; - if (fw_cmd.hdr.cmd_or_resp.ret_status != - FW_CEM_RESP_STATUS_SUCCESS) - return IXGBE_ERR_HOST_INTERFACE_COMMAND; - return 0; - } + status = ixgbe_update_flash_X550(hw); - return ret_val; + return status; } -/** ixgbe_get_lcd_x550em - Determine lowest common denominator +/** + * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device * @hw: pointer to hardware structure - * @lcd_speed: pointer to lowest common link speed * - * Determine lowest common link speed with link partner. + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. **/ -static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, - ixgbe_link_speed *lcd_speed) +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) { - u16 an_lp_status; - s32 status; - u16 word = hw->eeprom.ctrl_word_3; - - *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 status = IXGBE_SUCCESS; + union ixgbe_hic_hdr2 buffer; - status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, - MDIO_MMD_AN, - &an_lp_status); - if (status) - return status; + DEBUGFUNC("ixgbe_update_flash_X550"); - /* If link partner advertised 1G, return 1G */ - if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { - *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; - return status; - } + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; - /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ - if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || - (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) - return status; + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); - /* Link partner not capable of lower speeds, return 10G */ - *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; return status; } /** - * ixgbe_setup_fc_x550em - Set up flow control - * @hw: pointer to hardware structure - */ -static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) + * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw) { - bool pause, asm_dir; - u32 reg_val; - s32 rc = 0; + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; - /* Validate the requested mode */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; - } + DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em"); - /* 10gig parts do not have a word in the EEPROM to determine the - * default flow control setting, so we explicitly set it to full. - */ - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; + hw->phy.ops.identify(hw); - /* Determine PAUSE and ASM_DIR bits. */ - switch (hw->fc.requested_mode) { - case ixgbe_fc_none: - pause = false; - asm_dir = false; + switch (hw->phy.type) { + case ixgbe_phy_x550em_kr: + if (hw->mac.type == ixgbe_mac_X550EM_a) { + if (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { + physical_layer = + IXGBE_PHYSICAL_LAYER_2500BASE_KX; + break; + } else if (hw->device_id == + IXGBE_DEV_ID_X550EM_A_KR_L) { + physical_layer = + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + } + } + /* fall through */ + case ixgbe_phy_x550em_xfi: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; break; - case ixgbe_fc_tx_pause: - pause = false; - asm_dir = true; + case ixgbe_phy_x550em_kx4: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; break; - case ixgbe_fc_rx_pause: - /* Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE, as such we fall - * through to the fc_full statement. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - /* Fallthrough */ - case ixgbe_fc_full: - pause = true; - asm_dir = true; + case ixgbe_phy_x550em_ext_t: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; break; - default: - hw_err(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - rc = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - ®_val); - if (rc) - return rc; - - reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); - if (pause) - reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; - if (asm_dir) - reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - rc = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - reg_val); - - /* This device does not fully support AN. */ - hw->fc.disable_fc_autoneg = true; + case ixgbe_phy_fw: + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T; break; - case IXGBE_DEV_ID_X550EM_X_XFI: - hw->fc.disable_fc_autoneg = true; + case ixgbe_phy_sgmii: + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_ext_1g_t: + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; break; default: break; } - return rc; + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); + + return physical_layer; } /** - * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure + * ixgbe_get_bus_info_x550em - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets bus link width and speed to unknown because X550em is + * not a PCI device. **/ -static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) { - u32 link_s1, lp_an_page_low, an_cntl_1; - s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; - ixgbe_link_speed speed; - bool link_up; - /* AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - */ - if (hw->fc.disable_fc_autoneg) { - hw_err(hw, "Flow control autoneg is disabled"); - goto out; - } + DEBUGFUNC("ixgbe_get_bus_info_x550em"); - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { - hw_err(hw, "The link is down"); - goto out; - } + hw->bus.width = ixgbe_bus_width_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; - /* Check at auto-negotiation has completed */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_S1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); + hw->mac.ops.set_lan_id(hw); - if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { - hw_dbg(hw, "Auto-Negotiation did not complete\n"); - status = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } + return IXGBE_SUCCESS; +} - /* Read the 10g AN autoc and LP ability registers and resolve - * local flow control settings accordingly - */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); +/** + * ixgbe_disable_rx_x550 - Disable RX unit + * + * Enables the Rx DMA unit for x550 + **/ +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; - if (status) { - hw_dbg(hw, "Auto-Negotiation did not complete\n"); - goto out; - } + DEBUGFUNC("ixgbe_enable_rx_dma_x550"); - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } - if (status) { - hw_dbg(hw, "Auto-Negotiation did not complete\n"); - goto out; - } + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; - status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, - IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, - IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, - IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); -out: - if (!status) { - hw->fc.fc_was_autonegged = true; - } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } } } /** - * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings - * @hw: pointer to hardware structure - **/ -static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) -{ - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; -} - -/** ixgbe_enter_lplu_x550em - Transition to low power states + * ixgbe_enter_lplu_x550em - Transition to low power states * @hw: pointer to hardware structure * - * Configures Low Power Link Up on transition to low power states - * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting - * the X557 PHY immediately prior to entering LPLU. + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the + * X557 PHY immediately prior to entering LPLU. **/ -static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) { u16 an_10g_cntl_reg, autoneg_reg, speed; s32 status; @@ -3035,45 +3829,52 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) u32 save_autoneg; bool link_up; + /* SW LPLU not required on later HW revisions. */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + (IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + return IXGBE_SUCCESS; + /* If blocked by MNG FW, then don't restart AN */ if (ixgbe_check_reset_blocked(hw)) - return 0; + return IXGBE_SUCCESS; status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status) + if (status != IXGBE_SUCCESS) return status; - status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3, - &hw->eeprom.ctrl_word_3); - if (status) + status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); + + if (status != IXGBE_SUCCESS) return status; - /* If link is down, LPLU disabled in NVM, WoL disabled, or - * manageability disabled, then force link down by entering - * low power mode. + /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability + * disabled, then force link down by entering low power mode. */ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || !(hw->wol_enabled || ixgbe_mng_present(hw))) - return ixgbe_set_copper_phy_power(hw, false); + return ixgbe_set_copper_phy_power(hw, FALSE); /* Determine LCD */ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); - if (status) + + if (status != IXGBE_SUCCESS) return status; /* If no valid LCD link speed, then force link down and exit. */ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) - return ixgbe_set_copper_phy_power(hw, false); + return ixgbe_set_copper_phy_power(hw, FALSE); status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, - MDIO_MMD_AN, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &speed); - if (status) + + if (status != IXGBE_SUCCESS) return status; /* If no link now, speed is invalid so take link down */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status) + if (status != IXGBE_SUCCESS) return ixgbe_set_copper_phy_power(hw, false); /* clear everything but the speed bits */ @@ -3088,22 +3889,25 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) /* Clear AN completed indication */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, - MDIO_MMD_AN, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - if (status) + + if (status != IXGBE_SUCCESS) return status; - status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, - &an_10g_cntl_reg); - if (status) + status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_10g_cntl_reg); + + if (status != IXGBE_SUCCESS) return status; status = hw->phy.ops.read_reg(hw, - IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - MDIO_MMD_AN, - &autoneg_reg); - if (status) + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) return status; save_autoneg = hw->phy.autoneg_advertised; @@ -3118,449 +3922,275 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) } /** - * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs - * @hw: pointer to hardware structure - */ -static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) -{ - u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; - - if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) - return 0; - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); - if (rc) - return rc; - memset(store, 0, sizeof(store)); - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); - if (rc) - return rc; - - return ixgbe_setup_fw_link(hw); -} - -/** - * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp - * @hw: pointer to hardware structure - */ -static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) -{ - u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); - if (rc) - return rc; - - if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { - ixgbe_shutdown_fw_phy(hw); - return IXGBE_ERR_OVERTEMP; - } - return 0; -} - -/** - * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register - * @hw: pointer to hardware structure - * - * Read NW_MNG_IF_SEL register and save field values. - */ -static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) -{ - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode. - */ - hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - - /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set - * PHY address. This register field was has only been used for X552. - */ - if (hw->mac.type == ixgbe_mac_x550em_a && - hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { - hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; -#if 1 /* Since by Intel FW(LEK8),LAN controller 1 default set port 0 use phy address 0 - * and port 1 use phy address 1, we swap it for Porsche2 platform. - * By hilbert. - */ - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { - /*hw_err(hw, "####swap phy address used for different lan id in LAN conroller-1\n");*/ - hw->phy.mdio.prtad = (hw->bus.lan_id == 0) ? (1) : (0); - /*hw_err(hw, "####lan id: %d, phy address:%d\n", - hw->bus.lan_id, - hw->phy.mdio.prtad);*/ - } -#endif - } -} - -/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init - * @hw: pointer to hardware structure - * - * Initialize any function pointers that were not able to be - * set during init_shared_code because the PHY/SFP type was - * not known. Perform the SFP init if necessary. - **/ -static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) -{ - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; - - hw->mac.ops.set_lan_id(hw); - - ixgbe_read_mng_if_sel_x550em(hw); - - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { - phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; - ixgbe_setup_mux_ctl(hw); - } - - /* Identify the PHY or SFP module */ - ret_val = phy->ops.identify(hw); - - /* Setup function pointers based on detected hardware */ - ixgbe_init_mac_link_ops_X550em(hw); - if (phy->sfp_type != ixgbe_sfp_type_unknown) - phy->ops.reset = NULL; - - /* Set functions pointers based on phy type */ - switch (hw->phy.type) { - case ixgbe_phy_x550em_kx4: - phy->ops.setup_link = NULL; - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; - case ixgbe_phy_x550em_kr: - phy->ops.setup_link = ixgbe_setup_kr_x550em; - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; - case ixgbe_phy_x550em_xfi: - /* link is managed by HW */ - phy->ops.setup_link = NULL; - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; - case ixgbe_phy_x550em_ext_t: - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode - */ - phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - - /* If internal link mode is XFI, then setup iXFI internal link, - * else setup KR now. - */ - phy->ops.setup_internal_link = - ixgbe_setup_internal_phy_t_x550em; - - /* setup SW LPLU only for first revision */ - if (hw->mac.type == ixgbe_mac_X550EM_x && - !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) & - IXGBE_FUSES0_REV_MASK)) - phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; - - phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; - phy->ops.reset = ixgbe_reset_phy_t_X550em; - break; - case ixgbe_phy_sgmii: - phy->ops.setup_link = NULL; - break; - case ixgbe_phy_fw: - phy->ops.setup_link = ixgbe_setup_fw_link; - phy->ops.reset = ixgbe_reset_phy_fw; - break; - case ixgbe_phy_ext_1g_t: - phy->ops.setup_link = NULL; - phy->ops.read_reg = NULL; - phy->ops.write_reg = NULL; - phy->ops.reset = NULL; - break; - default: - break; - } - - return ret_val; -} - -/** ixgbe_get_media_type_X550em - Get media type + * ixgbe_get_lcd_x550em - Determine lowest common denominator * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed * - * Returns the media type (fiber, copper, backplane) - * - */ -static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) -{ - enum ixgbe_media_type media_type; - - /* Detect if there is a copper PHY attached. */ - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_SGMII: - case IXGBE_DEV_ID_X550EM_A_SGMII_L: - hw->phy.type = ixgbe_phy_sgmii; - /* Fallthrough */ - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_KX4: - case IXGBE_DEV_ID_X550EM_X_XFI: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - media_type = ixgbe_media_type_backplane; - break; - case IXGBE_DEV_ID_X550EM_X_SFP: - case IXGBE_DEV_ID_X550EM_A_SFP: - case IXGBE_DEV_ID_X550EM_A_SFP_N: - media_type = ixgbe_media_type_fiber; - break; - case IXGBE_DEV_ID_X550EM_X_1G_T: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - media_type = ixgbe_media_type_copper; - break; - default: - media_type = ixgbe_media_type_unknown; - break; - } - return media_type; -} - -/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. - ** @hw: pointer to hardware structure + * Determine lowest common link speed with link partner. **/ -static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) { + u16 an_lp_status; s32 status; - u16 reg; + u16 word = hw->eeprom.ctrl_word_3; - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_TX_VENDOR_ALARMS_3, - MDIO_MMD_PMAPMD, - ®); - if (status) - return status; + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; - /* If PHY FW reset completed bit is set then this is the first - * SW instance after a power on so the PHY FW must be un-stalled. - */ - if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - MDIO_MMD_VEND1, - ®); - if (status) - return status; + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_lp_status); - reg &= ~IXGBE_MDIO_POWER_UP_STALL; + if (status != IXGBE_SUCCESS) + return status; - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - MDIO_MMD_VEND1, - reg); - if (status) - return status; + /* If link partner advertised 1G, return 1G */ + if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { + *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; + return status; } + /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ + if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || + (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) + return status; + + /* Link partner not capable of lower speeds, return 10G */ + *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; return status; } /** - * ixgbe_set_mdio_speed - Set MDIO clock speed - * @hw: pointer to hardware structure - */ -static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) + * ixgbe_setup_fc_X550em - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) { - u32 hlreg0; + s32 ret_val = IXGBE_SUCCESS; + u32 pause, asm_dir, reg_val; + + DEBUGFUNC("ixgbe_setup_fc_X550em"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = 0; + asm_dir = 0; + break; + case ixgbe_fc_tx_pause: + pause = 0; + asm_dir = 1; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + pause = 1; + asm_dir = 1; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + } switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_SGMII: - case IXGBE_DEV_ID_X550EM_A_SGMII_L: - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_A_SFP: - /* Config MDIO clock speed before the first MDIO PHY access */ - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 &= ~IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (ret_val != IXGBE_SUCCESS) + goto out; + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; break; - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - /* Select fast MDIO clock speed for these devices */ - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 |= IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->fc.disable_fc_autoneg = true; break; default: break; } + +out: + return ret_val; } -/** ixgbe_reset_hw_X550em - Perform hardware reset - ** @hw: pointer to hardware structure - ** - ** Resets the hardware by resetting the transmit and receive units, masks - ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) - ** reset. +/** + * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. **/ -static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) { - ixgbe_link_speed link_speed; - s32 status; - u32 ctrl = 0; - u32 i; - bool link_up = false; - u32 swfw_mask = hw->phy.phy_semaphore_mask; - - /* Call adapter stop to disable Tx/Rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); - if (status) - return status; - - /* flush pending Tx transactions */ - ixgbe_clear_tx_pending(hw); - - /* PHY ops must be identified and initialized prior to reset */ - - /* Identify PHY and related function pointers */ - status = hw->phy.ops.init(hw); + u32 link_s1, lp_an_page_low, an_cntl_1; + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; - /* start the external PHY */ - if (hw->phy.type == ixgbe_phy_x550em_ext_t) { - status = ixgbe_init_ext_t_x550em(hw); - if (status) - return status; + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; } - /* Setup SFP module if there is one present. */ - if (hw->phy.sfp_setup_needed) { - status = hw->mac.ops.setup_sfp(hw); - hw->phy.sfp_setup_needed = false; + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; } - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - return status; + /* Check at auto-negotiation has completed */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_S1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); - /* Reset PHY */ - if (!hw->phy.reset_disable && hw->phy.ops.reset) - hw->phy.ops.reset(hw); + if (status != IXGBE_SUCCESS || + (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } -mac_reset_top: - /* Issue global reset to the MAC. Needs to be SW reset if link is up. - * If link reset is used when link is up, it might reset the PHY when - * mng is using it. If link is down or the flag to force full link - * reset is set, then perform link reset. + /* Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly */ - ctrl = IXGBE_CTRL_LNK_RST; - - if (!hw->force_full_reset) { - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up) - ctrl = IXGBE_CTRL_RST; - } + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status) { - hw_dbg(hw, "semaphore failed with %d", status); - return IXGBE_ERR_SWFW_SYNC; + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; } - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - usleep_range(1000, 1200); - - /* Poll for reset bit to self-clear meaning reset is complete */ - for (i = 0; i < 10; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST_MASK)) - break; - udelay(1); - } + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); - if (ctrl & IXGBE_CTRL_RST_MASK) { - status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "Reset polling failed to complete.\n"); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; } - msleep(50); + status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, + IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); - /* Double resets are required for recovery from certain error - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. - */ - if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { - hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - goto mac_reset_top; +out: + if (status == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; } - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. - */ - hw->mac.num_rar_entries = 128; - hw->mac.ops.init_rx_addrs(hw); - - ixgbe_set_mdio_speed(hw); - - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) - ixgbe_setup_mux_ctl(hw); - - return status; } -/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype - * anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for Ethertype anti-spoofing - * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing +/** + * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings + * @hw: pointer to hardware structure + * **/ -static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, - bool enable, int vf) +void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) { - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; - u32 pfvfspoof; - - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - if (enable) - pfvfspoof |= BIT(vf_target_shift); - else - pfvfspoof &= ~BIT(vf_target_shift); - - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; } -/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning +/** + * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure - * @enable: enable or disable source address pruning - * @pool: Rx pool to set source address pruning for + * + * Enable flow control according to IEEE clause 37. **/ -static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, - bool enable, - unsigned int pool) +void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) { - u64 pfflp; + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + ixgbe_link_speed speed; + bool link_up; - /* max rx pool is 63 */ - if (pool > 63) - return; + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } - pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); - pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } - if (enable) - pfflp |= (1ULL << pool); - else - pfflp &= ~(1ULL << pool); + /* Check if auto-negotiation has completed */ + status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); + if (status != IXGBE_SUCCESS || + !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } - IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); - IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); + /* Negotiate the flow control */ + status = ixgbe_negotiate_fc(hw, info[0], info[0], + FW_PHY_ACT_GET_LINK_INFO_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_FC_TX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); + +out: + if (status == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } } /** @@ -3569,14 +4199,17 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, * * Called at init time to set up flow control. **/ -static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) +s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) { - s32 status = 0; + s32 status = IXGBE_SUCCESS; u32 an_cntl = 0; + DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a"); + /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } @@ -3591,8 +4224,8 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); - if (status) { - hw_dbg(hw, "Auto-Negotiation did not complete\n"); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); return status; } @@ -3633,7 +4266,8 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; break; default: - hw_err(hw, "Flow control param set incorrectly\n"); + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); return IXGBE_ERR_CONFIG; } @@ -3652,7 +4286,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @state: set mux if 1, clear if 0 */ -static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) +STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) { u32 esdp; @@ -3668,16 +4302,18 @@ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) } /** - * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire + * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire * - * Acquires the SWFW semaphore and sets the I2C MUX - */ -static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) + * Acquires the SWFW semaphore and sets the I2C MUX + **/ +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { s32 status; + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em"); + status = ixgbe_acquire_swfw_sync_X540(hw, mask); if (status) return status; @@ -3685,18 +4321,20 @@ static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) if (mask & IXGBE_GSSR_I2C_MASK) ixgbe_set_mux(hw, 1); - return 0; + return IXGBE_SUCCESS; } /** - * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release + * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release * - * Releases the SWFW semaphore and sets the I2C MUX - */ -static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) + * Releases the SWFW semaphore and sets the I2C MUX + **/ +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { + DEBUGFUNC("ixgbe_release_swfw_sync_X550em"); + if (mask & IXGBE_GSSR_I2C_MASK) ixgbe_set_mux(hw, 0); @@ -3704,51 +4342,68 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) } /** - * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire + * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire * - * Acquires the SWFW semaphore and get the shared PHY token as needed + * Acquires the SWFW semaphore and get the shared phy token as needed */ -static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; int retries = FW_PHY_TOKEN_RETRIES; - s32 status; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a"); while (--retries) { - status = 0; + status = IXGBE_SUCCESS; if (hmask) status = ixgbe_acquire_swfw_sync_X540(hw, hmask); - if (status) + if (status) { + DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", + status); return status; + } if (!(mask & IXGBE_GSSR_TOKEN_SM)) - return 0; + return IXGBE_SUCCESS; status = ixgbe_get_phy_token(hw); - if (!status) - return 0; + if (status == IXGBE_ERR_TOKEN_RETRY) + DEBUGOUT1("Could not acquire PHY token, Status = %d\n", + status); + + if (status == IXGBE_SUCCESS) + return IXGBE_SUCCESS; + if (hmask) ixgbe_release_swfw_sync_X540(hw, hmask); - if (status != IXGBE_ERR_TOKEN_RETRY) + + if (status != IXGBE_ERR_TOKEN_RETRY) { + DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n", + status); return status; - msleep(FW_PHY_TOKEN_DELAY); + } } + DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n", + hw->phy.id); return status; } /** - * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release + * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release * - * Release the SWFW semaphore and puts the shared PHY token as needed + * Releases the SWFW semaphore and puts the shared phy token as needed */ -static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + DEBUGFUNC("ixgbe_release_swfw_sync_X550a"); + if (mask & IXGBE_GSSR_TOKEN_SM) ixgbe_put_phy_token(hw); @@ -3757,20 +4412,22 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) } /** - * ixgbe_read_phy_reg_x550a - Reads specified PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @phy_data: Pointer to read data from PHY register + * ixgbe_read_phy_reg_x550a - Reads specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register * - * Reads a value from a specified PHY register using the SWFW lock and PHY - * Token. The PHY Token is needed since the MDIO is shared between to MAC - * instances. - */ -static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) + * Reads a value from a specified PHY register using the SWFW lock and PHY + * Token. The PHY Token is needed since the MDIO is shared between to MAC + * instances. + **/ +s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) { - u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; s32 status; + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_read_phy_reg_x550a"); if (hw->mac.ops.acquire_swfw_sync(hw, mask)) return IXGBE_ERR_SWFW_SYNC; @@ -3783,341 +4440,272 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, } /** - * ixgbe_write_phy_reg_x550a - Writes specified PHY register + * ixgbe_write_phy_reg_x550a - Writes specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register using the SWFW lock and PHY Token. + * The PHY Token is needed since the MDIO is shared between to MAC instances. + **/ +s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_write_phy_reg_x550a"); + + if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) { + status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, mask); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 5 bit device type - * @phy_data: Data to write to the PHY register * - * Writes a value to specified PHY register using the SWFW lock and PHY Token. - * The PHY Token is needed since the MDIO is shared between to MAC instances. + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. */ -static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + bool lsc; + u32 status; + + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + if (status != IXGBE_SUCCESS) + return status; + + if (lsc) + return ixgbe_setup_internal_phy(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Setup internal/external PHY link speed based on link speed, then set + * external PHY auto advertised link speed. + * + * Returns error status for any failure + **/ +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; s32 status; + ixgbe_link_speed force_speed; - if (hw->mac.ops.acquire_swfw_sync(hw, mask)) - return IXGBE_ERR_SWFW_SYNC; + DEBUGFUNC("ixgbe_setup_mac_link_t_X550em"); -#if 0 /* To use C22 MDI access function created by our own. - * By hilbert - */ - status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); -#else - status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); -#endif - hw->mac.ops.release_swfw_sync(hw, mask); + /* Setup internal/external PHY link speed to iXFI (10G), unless + * only 1G is auto advertised then setup KX link. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; - return status; + /* If X552 and internal link mode is XFI, then setup XFI internal link. + */ + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status != IXGBE_SUCCESS) + return status; + } + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); } -#define X550_COMMON_MAC \ - .init_hw = &ixgbe_init_hw_generic, \ - .start_hw = &ixgbe_start_hw_X540, \ - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \ - .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \ - .get_mac_addr = &ixgbe_get_mac_addr_generic, \ - .get_device_caps = &ixgbe_get_device_caps_generic, \ - .stop_adapter = &ixgbe_stop_adapter_generic, \ - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ - .read_analog_reg8 = NULL, \ - .write_analog_reg8 = NULL, \ - .set_rxpba = &ixgbe_set_rxpba_generic, \ - .check_link = &ixgbe_check_mac_link_generic, \ - .blink_led_start = &ixgbe_blink_led_start_X540, \ - .blink_led_stop = &ixgbe_blink_led_stop_X540, \ - .set_rar = &ixgbe_set_rar_generic, \ - .clear_rar = &ixgbe_clear_rar_generic, \ - .set_vmdq = &ixgbe_set_vmdq_generic, \ - .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \ - .clear_vmdq = &ixgbe_clear_vmdq_generic, \ - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \ - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \ - .enable_mc = &ixgbe_enable_mc_generic, \ - .disable_mc = &ixgbe_disable_mc_generic, \ - .clear_vfta = &ixgbe_clear_vfta_generic, \ - .set_vfta = &ixgbe_set_vfta_generic, \ - .fc_enable = &ixgbe_fc_enable_generic, \ - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ - .init_uta_tables = &ixgbe_init_uta_tables_generic, \ - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ - .set_source_address_pruning = \ - &ixgbe_set_source_address_pruning_X550, \ - .set_ethertype_anti_spoofing = \ - &ixgbe_set_ethertype_anti_spoofing_X550, \ - .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ - .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ - .get_thermal_sensor_data = NULL, \ - .init_thermal_sensor_thresh = NULL, \ - .enable_rx = &ixgbe_enable_rx_generic, \ - .disable_rx = &ixgbe_disable_rx_x550, \ - -static const struct ixgbe_mac_operations mac_ops_X550 = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_generic, - .led_off = ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = &ixgbe_reset_hw_X540, - .get_media_type = &ixgbe_get_media_type_X540, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, - .setup_link = &ixgbe_setup_mac_link_X540, - .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .setup_sfp = NULL, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, - .release_swfw_sync = &ixgbe_release_swfw_sync_X540, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .prot_autoc_read = prot_autoc_read_generic, - .prot_autoc_write = prot_autoc_write_generic, - .setup_fc = ixgbe_setup_fc_generic, - .fc_autoneg = ixgbe_fc_autoneg, -}; +/** + * ixgbe_check_link_t_X550em - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Check that both the MAC and X557 external PHY have link. + **/ +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 status; + u16 i, autoneg_status = 0; -static const struct ixgbe_mac_operations mac_ops_X550EM_x = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_t_x550em, - .led_off = ixgbe_led_off_t_x550em, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = &ixgbe_reset_hw_X550em, - .get_media_type = &ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, - .get_wwn_prefix = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, - .get_bus_info = &ixgbe_get_bus_info_X550em, - .setup_sfp = ixgbe_setup_sfp_modules_X550em, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, - .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .setup_fc = NULL, /* defined later */ - .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, -}; + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; -static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { - X550_COMMON_MAC - .led_on = NULL, - .led_off = NULL, - .init_led_link_act = NULL, - .reset_hw = &ixgbe_reset_hw_X550em, - .get_media_type = &ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, - .get_wwn_prefix = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, - .get_bus_info = &ixgbe_get_bus_info_X550em, - .setup_sfp = ixgbe_setup_sfp_modules_X550em, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, - .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, - .init_swfw_sync = &ixgbe_init_swfw_sync_X540, - .setup_fc = NULL, - .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, -}; + status = ixgbe_check_mac_link_generic(hw, speed, link_up, + link_up_wait_to_complete); -static struct ixgbe_mac_operations mac_ops_x550em_a = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_t_x550em, - .led_off = ixgbe_led_off_t_x550em, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = ixgbe_reset_hw_X550em, - .get_media_type = ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, - .get_wwn_prefix = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .get_link_capabilities = ixgbe_get_link_capabilities_X550em, - .get_bus_info = ixgbe_get_bus_info_X550em, - .setup_sfp = ixgbe_setup_sfp_modules_X550em, - .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, - .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, - .setup_fc = ixgbe_setup_fc_x550em, - .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, -}; + /* If check link fails or MAC link is not up, then return */ + if (status != IXGBE_SUCCESS || !(*link_up)) + return status; -static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { - X550_COMMON_MAC - .led_on = ixgbe_led_on_generic, - .led_off = ixgbe_led_off_generic, - .init_led_link_act = ixgbe_init_led_link_act_generic, - .reset_hw = ixgbe_reset_hw_X550em, - .get_media_type = ixgbe_get_media_type_X550em, - .get_san_mac_addr = NULL, - .get_wwn_prefix = NULL, - .setup_link = NULL, /* defined later */ - .get_link_capabilities = ixgbe_get_link_capabilities_X550em, - .get_bus_info = ixgbe_get_bus_info_X550em, - .setup_sfp = ixgbe_setup_sfp_modules_X550em, - .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, - .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, - .setup_fc = ixgbe_setup_fc_x550em, - .fc_autoneg = ixgbe_fc_autoneg, - .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, - .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, -}; + /* MAC link is up, so check external PHY link. + * X557 PHY. Link status is latching low, and can only be used to detect + * link drop, and not the current status of the link without performing + * back-to-back reads. + */ + for (i = 0; i < 2; i++) { + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); -#define X550_COMMON_EEP \ - .read = &ixgbe_read_ee_hostif_X550, \ - .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ - .write = &ixgbe_write_ee_hostif_X550, \ - .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \ - .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ - .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ - .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ - -static const struct ixgbe_eeprom_operations eeprom_ops_X550 = { - X550_COMMON_EEP - .init_params = &ixgbe_init_eeprom_params_X550, -}; + if (status != IXGBE_SUCCESS) + return status; + } -static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { - X550_COMMON_EEP - .init_params = &ixgbe_init_eeprom_params_X540, -}; + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + *link_up = false; -#define X550_COMMON_PHY \ - .identify_sfp = &ixgbe_identify_module_generic, \ - .reset = NULL, \ - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \ - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \ - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \ - .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ - .setup_link = &ixgbe_setup_phy_link_generic, \ - .set_phy_power = NULL, - -static const struct ixgbe_phy_operations phy_ops_X550 = { - X550_COMMON_PHY - .check_overtemp = &ixgbe_tn_check_overtemp, - .init = NULL, - .identify = &ixgbe_identify_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, -}; + return IXGBE_SUCCESS; +} -static const struct ixgbe_phy_operations phy_ops_X550EM_x = { - X550_COMMON_PHY - .check_overtemp = &ixgbe_tn_check_overtemp, - .init = &ixgbe_init_phy_ops_X550em, - .identify = &ixgbe_identify_phy_x550em, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, -}; +/** + * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +{ + s32 status; -static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { - X550_COMMON_PHY - .check_overtemp = NULL, - .init = ixgbe_init_phy_ops_X550em, - .identify = ixgbe_identify_phy_x550em, - .read_reg = NULL, - .write_reg = NULL, - .read_reg_mdi = NULL, - .write_reg_mdi = NULL, -}; + status = ixgbe_reset_phy_generic(hw); -static const struct ixgbe_phy_operations phy_ops_x550em_a = { - X550_COMMON_PHY - .check_overtemp = &ixgbe_tn_check_overtemp, - .init = &ixgbe_init_phy_ops_X550em, - .identify = &ixgbe_identify_phy_x550em, - .read_reg = &ixgbe_read_phy_reg_x550a, - .write_reg = &ixgbe_write_phy_reg_x550a, - .read_reg_mdi = &ixgbe_read_phy_reg_mdi, - .write_reg_mdi = &ixgbe_write_phy_reg_mdi, -}; + if (status != IXGBE_SUCCESS) + return status; -static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { - X550_COMMON_PHY - .check_overtemp = ixgbe_check_overtemp_fw, - .init = ixgbe_init_phy_ops_X550em, - .identify = ixgbe_identify_phy_fw, -#if 0 /* Declare C22 MDI directly access functions. By hilbert */ - .read_reg = NULL, - .write_reg = NULL, - .read_reg_mdi = NULL, - .write_reg_mdi = NULL, -#else - .read_reg = &ixgbe_read_phy_reg_x550a, - .write_reg = &ixgbe_write_phy_reg_x550a, - .read_reg_mdi = &ixgbe_read_phy_reg_mdio, - .write_reg_mdi = &ixgbe_write_phy_reg_mdio, -#endif -}; + /* Configure Link Status Alarm and Temperature Threshold interrupts */ + return ixgbe_enable_lasi_ext_t_x550em(hw); +} -static const struct ixgbe_link_operations link_ops_x550em_x = { - .read_link = &ixgbe_read_i2c_combined_generic, - .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, - .write_link = &ixgbe_write_i2c_combined_generic, - .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked, -}; +/** + * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn on + **/ +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; -static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X550) -}; + DEBUGFUNC("ixgbe_led_on_t_X550em"); -static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X550EM_x) -}; + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; -static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(X550EM_a) -}; + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); -const struct ixgbe_info ixgbe_X550_info = { - .mac = ixgbe_mac_X550, - .get_invariants = &ixgbe_get_invariants_X540, - .mac_ops = &mac_ops_X550, - .eeprom_ops = &eeprom_ops_X550, - .phy_ops = &phy_ops_X550, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X550, -}; + /* Some designs have the LEDs wired to the MAC */ + return ixgbe_led_on_generic(hw, led_idx); +} -const struct ixgbe_info ixgbe_X550EM_x_info = { - .mac = ixgbe_mac_X550EM_x, - .get_invariants = &ixgbe_get_invariants_X550_x, - .mac_ops = &mac_ops_X550EM_x, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_X550EM_x, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X550EM_x, - .link_ops = &link_ops_x550em_x, -}; +/** + * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn off + **/ +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; -const struct ixgbe_info ixgbe_x550em_x_fw_info = { - .mac = ixgbe_mac_X550EM_x, - .get_invariants = ixgbe_get_invariants_X550_x_fw, - .mac_ops = &mac_ops_X550EM_x_fw, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_x550em_x_fw, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_X550EM_x, -}; + DEBUGFUNC("ixgbe_led_off_t_X550em"); -const struct ixgbe_info ixgbe_x550em_a_info = { - .mac = ixgbe_mac_x550em_a, - .get_invariants = &ixgbe_get_invariants_X550_a, - .mac_ops = &mac_ops_x550em_a, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_x550em_a, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_x550em_a, -}; + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; -const struct ixgbe_info ixgbe_x550em_a_fw_info = { - .mac = ixgbe_mac_x550em_a, - .get_invariants = ixgbe_get_invariants_X550_a_fw, - .mac_ops = &mac_ops_x550em_a_fw, - .eeprom_ops = &eeprom_ops_X550EM_x, - .phy_ops = &phy_ops_x550em_a_fw, - .mbx_ops = &mbx_ops_generic, - .mvals = ixgbe_mvals_x550em_a, -}; + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + /* Some designs have the LEDs wired to the MAC */ + return ixgbe_led_off_generic(hw, led_idx); +} + +/** + * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return IXGBE_SUCCESS + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, const char *driver_ver) +{ + struct ixgbe_hic_drv_info2 fw_cmd; + s32 ret_val = IXGBE_SUCCESS; + int i; + + DEBUGFUNC("ixgbe_set_fw_drv_ver_x550"); + + if ((len == 0) || (driver_ver == NULL) || + (len > sizeof(fw_cmd.driver_string))) + return IXGBE_ERR_INVALID_ARGUMENT; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + memcpy(fw_cmd.driver_string, driver_ver, len); + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != IXGBE_SUCCESS) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = IXGBE_SUCCESS; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h new file mode 100644 index 000000000000..ff2c4ea43eaa --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h @@ -0,0 +1,115 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_X550_H_ +#define _IXGBE_X550_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw); + +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw); +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size); +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw); +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, +u16 *data); +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool); +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf); +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver, u16 len, const char *str); +s32 ixgbe_get_phy_token(struct ixgbe_hw *); +s32 ixgbe_put_phy_token(struct ixgbe_hw *); +s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf); +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw); +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw); +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw); +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed); +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw); +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +#endif /* _IXGBE_X550_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.c new file mode 100644 index 000000000000..79593f83d9b3 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.c @@ -0,0 +1,2375 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +int ixgbe_dcb_netlink_register(void) +{ + return 0; +} + +int ixgbe_dcb_netlink_unregister(void) +{ + return 0; +} + +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter __always_unused *adapter, int __always_unused tc_max) +{ + return 0; +} +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (netdev_get_num_tc(dev)) { + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (skb->priority == TC_PRIO_CONTROL) { + qoffset = kc_adapter->dcb_tc - 1; + } else { + qoffset = skb->vlan_tci; + qoffset &= IXGBE_TX_FLAGS_VLAN_PRIO_MASK; + qoffset >>= 13; + } + + qcount = kc_adapter->ring_feature[RING_F_RSS].indices; + qoffset *= qcount; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return kc_adapter->dcb_tc; + else + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + kc_adapter->dcb_tc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/****************************************************************************** + * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright, + * inferred copyright from kernel + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags) +{ + unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); + u8 nexthdr = ipv6_hdr(skb)->nexthdr; + unsigned int len; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} +#endif /* < 3.8.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + struct adapter_struct *kc_adapter = netdev_priv(dev); + int queue_index = -1; + + if (kc_adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + queue_index = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : + smp_processor_id(); + while (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index -= dev->real_num_tx_queues; + return queue_index; + } + + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +const unsigned char pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCI_SPEED_UNKNOWN, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int err = dma_set_mask(dev, mask); + + if (!err) + /* coherent mask for the same size will always succeed if + * dma_set_mask does. However we store the error anyways, due + * to some kernels which use gcc's warn_unused_result on their + * definition of dma_set_coherent_mask. + */ + err = dma_set_coherent_mask(dev, mask); + return err; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.13.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} +#endif /* 3.15.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + unsigned int gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#endif /* 3.17.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.h new file mode 100644 index 000000000000..b936fcb9d10b --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.h @@ -0,0 +1,5610 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct ixgbe_adapter +#define adapter_q_vector ixgbe_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO (1 << 15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE (1 << 27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef num_online_cpus +#define num_online_cpus() smp_num_cpus +#endif + +#ifndef cpu_online +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef DECLARE_BITMAP +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) +#ifdef CONFIG_SUSE_KERNEL +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,21)) +/* SLES12 SP2 GA is 4.4.21-69 */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +/* SLES12 SP3 Beta3 is 4.4.68-2 */ +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,68)) +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#endif /* __KLOCWORK__ */ + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +extern void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +extern struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +extern int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +extern int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +extern void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +extern int snprintf(char * buf, size_t size, const char *fmt, ...); +extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full (1 << 12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full (1 << 12) +#endif +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +extern unsigned long _kc_find_next_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +extern void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY (1 << 0) +#define WAKE_UCAST (1 << 1) +#define WAKE_MCAST (1 << 2) +#define WAKE_BCAST (1 << 3) +#define WAKE_ARP (1 << 4) +#define WAKE_MAGIC (1 << 5) +#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +extern char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +extern void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +extern int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +extern void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +extern void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +extern void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full (1 << 15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full (1 << 15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +extern int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#ifndef IXGBE_PROCFS +#define IXGBE_PROCFS +#endif /* IXGBE_PROCFS */ +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + +#ifndef IXGBE_SYSFS +#define IXGBE_SYSFS +#endif /* IXGBE_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef IXGBE_HWMON +#define IXGBE_HWMON +#endif /* IXGBE_HWMON */ +#endif /* CONFIG_HWMON */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#include +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +extern void _kc_netif_tx_stop_all_queues(struct net_device *); +extern void _kc_netif_tx_wake_all_queues(struct net_device *); +extern void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +extern void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +extern void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full (1 << 17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full (1 << 18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full (1 << 19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full (1 << 17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full (1 << 18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full (1 << 19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU (1 << 26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +extern int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +extern u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) do { if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN (1 << 7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN (1 << 8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP (1 << 0) +#define SKBTX_IN_PROGRESS (1 << 2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM (1 << 29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +extern u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +extern int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full (1 << 21) +#define SUPPORTED_20000baseKR2_Full (1 << 22) +#define ADVERTISED_20000baseMLD2_Full (1 << 21) +#define ADVERTISED_20000baseKR2_Full (1 << 22) +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, + int, int, unsigned int); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full (1 << 23) +#define SUPPORTED_40000baseCR4_Full (1 << 24) +#define SUPPORTED_40000baseSR4_Full (1 << 25) +#define SUPPORTED_40000baseLR4_Full (1 << 26) +#define ADVERTISED_40000baseKR4_Full (1 << 23) +#define ADVERTISED_40000baseCR4_Full (1 << 24) +#define ADVERTISED_40000baseSR4_Full (1 << 25) +#define ADVERTISED_40000baseLR4_Full (1 << 26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +extern int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +extern int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#ifdef HAVE_FDB_DEL_NLATTR +extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#else +extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#endif +#else +extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0))) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#ifdef ETHTOOL_GLINKSETTINGS +#define HAVE_ETHTOOL_25G_BITS +#endif /* ETHTOOL_GLINKSETTINGS */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ + +#endif /* RHEL >= 7.0 && RHEL < 8.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +extern int __kc_pcie_get_minimum_link(struct pci_dev *dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif +#else /* >= 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#ifndef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#else /* >= 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#ifndef pci_enable_msix_range +extern int __kc_pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +extern void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + unsigned int gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +extern struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +extern void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +#define napi_complete_done _kc_napi_complete_done + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if ( !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +extern void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#endif +#else /* >= 4,1,0 */ +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +extern int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif + +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif + +#else /* 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NETIF_TRANS_UPDATE +#endif +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#ifdef ETHTOOL_GLINKSETTINGS +#define HAVE_ETHTOOL_25G_BITS +#endif /* ETHTOOL_GLINKSETTINGS */ +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#else +#endif /* 4.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif + +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif +#endif +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#else /* > 4.13 */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#endif /* 4.13.0 */ + +#endif /* _KCOMPAT_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat_ethtool.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat_ethtool.c new file mode 100644 index 000000000000..16fbd7475720 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat_ethtool.c @@ -0,0 +1,1169 @@ +/******************************************************************************* + + Intel(R) 10GbE PCI Express Linux Network Driver + Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * net/core/ethtool.c - Ethtool ioctl handler + * Copyright (c) 2003 Matthew Wilcox + * + * This file is where we call all the ethtool_ops commands to get + * the information ethtool needs. We fall back to calling do_ioctl() + * for drivers which haven't been converted to ethtool_ops yet. + * + * It's GPL, stupid. + * + * Modification by sfeldma@pobox.com to work as backward compat + * solution for pre-ethtool_ops kernels. + * - copied struct ethtool_ops from ethtool.h + * - defined SET_ETHTOOL_OPS + * - put in some #ifndef NETIF_F_xxx wrappers + * - changes refs to dev->ethtool_ops to ethtool_ops + * - changed dev_ethtool to ethtool_ioctl + * - remove EXPORT_SYMBOL()s + * - added _kc_ prefix in built-in ethtool_op_xxx ops. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "kcompat.h" + +#undef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full (1 << 12) +#undef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full (1 << 12) +#undef SPEED_10000 +#define SPEED_10000 10000 + +#undef ethtool_ops +#define ethtool_ops _kc_ethtool_ops + +struct _kc_ethtool_ops { + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + u32 (*get_rx_csum)(struct net_device *); + int (*set_rx_csum)(struct net_device *, u32); + u32 (*get_tx_csum)(struct net_device *); + int (*set_tx_csum)(struct net_device *, u32); + u32 (*get_sg)(struct net_device *); + int (*set_sg)(struct net_device *, u32); + u32 (*get_tso)(struct net_device *); + int (*set_tso)(struct net_device *, u32); + int (*self_test_count)(struct net_device *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*phys_id)(struct net_device *, u32); + int (*get_stats_count)(struct net_device *); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, + u64 *); +} *ethtool_ops = NULL; + +#undef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) + +/* + * Some useful ethtool_ops methods that are device independent. If we find that + * all drivers want to do the same thing here, we can turn these into dev_() + * function calls. + */ + +#undef ethtool_op_get_link +#define ethtool_op_get_link _kc_ethtool_op_get_link +u32 _kc_ethtool_op_get_link(struct net_device *dev) +{ + return netif_carrier_ok(dev) ? 1 : 0; +} + +#undef ethtool_op_get_tx_csum +#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum +u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) +{ +#ifdef NETIF_F_IP_CSUM + return (dev->features & NETIF_F_IP_CSUM) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_tx_csum +#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum +int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_IP_CSUM + if (data) +#ifdef NETIF_F_IPV6_CSUM + dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); +#else + dev->features |= NETIF_F_IP_CSUM; + else + dev->features &= ~NETIF_F_IP_CSUM; +#endif +#endif + + return 0; +} + +#undef ethtool_op_get_sg +#define ethtool_op_get_sg _kc_ethtool_op_get_sg +u32 _kc_ethtool_op_get_sg(struct net_device *dev) +{ +#ifdef NETIF_F_SG + return (dev->features & NETIF_F_SG) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_sg +#define ethtool_op_set_sg _kc_ethtool_op_set_sg +int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_SG + if (data) + dev->features |= NETIF_F_SG; + else + dev->features &= ~NETIF_F_SG; +#endif + + return 0; +} + +#undef ethtool_op_get_tso +#define ethtool_op_get_tso _kc_ethtool_op_get_tso +u32 _kc_ethtool_op_get_tso(struct net_device *dev) +{ +#ifdef NETIF_F_TSO + return (dev->features & NETIF_F_TSO) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_tso +#define ethtool_op_set_tso _kc_ethtool_op_set_tso +int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) +{ +#ifdef NETIF_F_TSO + if (data) + dev->features |= NETIF_F_TSO; + else + dev->features &= ~NETIF_F_TSO; +#endif + + return 0; +} + +/* Handlers for each ethtool command */ + +static int ethtool_get_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd = { ETHTOOL_GSET }; + int err; + + if (!ethtool_ops->get_settings) + return -EOPNOTSUPP; + + err = ethtool_ops->get_settings(dev, &cmd); + if (err < 0) + return err; + + if (copy_to_user(useraddr, &cmd, sizeof(cmd))) + return -EFAULT; + return 0; +} + +static int ethtool_set_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd; + + if (!ethtool_ops->set_settings) + return -EOPNOTSUPP; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + return ethtool_ops->set_settings(dev, &cmd); +} + +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) +{ + struct ethtool_drvinfo info; + struct ethtool_ops *ops = ethtool_ops; + + if (!ops->get_drvinfo) + return -EOPNOTSUPP; + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GDRVINFO; + ops->get_drvinfo(dev, &info); + + if (ops->self_test_count) + info.testinfo_len = ops->self_test_count(dev); + if (ops->get_stats_count) + info.n_stats = ops->get_stats_count(dev); + if (ops->get_regs_len) + info.regdump_len = ops->get_regs_len(dev); + if (ops->get_eeprom_len) + info.eedump_len = ops->get_eeprom_len(dev); + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +static int ethtool_get_regs(struct net_device *dev, char *useraddr) +{ + struct ethtool_regs regs; + struct ethtool_ops *ops = ethtool_ops; + void *regbuf; + int reglen, ret; + + if (!ops->get_regs || !ops->get_regs_len) + return -EOPNOTSUPP; + + if (copy_from_user(®s, useraddr, sizeof(regs))) + return -EFAULT; + + reglen = ops->get_regs_len(dev); + if (regs.len > reglen) + regs.len = reglen; + + regbuf = kmalloc(reglen, GFP_USER); + if (!regbuf) + return -ENOMEM; + + ops->get_regs(dev, ®s, regbuf); + + ret = -EFAULT; + if (copy_to_user(useraddr, ®s, sizeof(regs))) + goto out; + useraddr += offsetof(struct ethtool_regs, data); + if (copy_to_user(useraddr, regbuf, reglen)) + goto out; + ret = 0; + +out: + kfree(regbuf); + return ret; +} + +static int ethtool_get_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; + + if (!ethtool_ops->get_wol) + return -EOPNOTSUPP; + + ethtool_ops->get_wol(dev, &wol); + + if (copy_to_user(useraddr, &wol, sizeof(wol))) + return -EFAULT; + return 0; +} + +static int ethtool_set_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol; + + if (!ethtool_ops->set_wol) + return -EOPNOTSUPP; + + if (copy_from_user(&wol, useraddr, sizeof(wol))) + return -EFAULT; + + return ethtool_ops->set_wol(dev, &wol); +} + +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GMSGLVL }; + + if (!ethtool_ops->get_msglevel) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_msglevel(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_msglevel) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_msglevel(dev, edata.data); + return 0; +} + +static int ethtool_nway_reset(struct net_device *dev) +{ + if (!ethtool_ops->nway_reset) + return -EOPNOTSUPP; + + return ethtool_ops->nway_reset(dev); +} + +static int ethtool_get_link(struct net_device *dev, void *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GLINK }; + + if (!ethtool_ops->get_link) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_link(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->get_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + ret = -EFAULT; + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) + goto out; + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->set_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->set_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + ethtool_ops->get_coalesce(dev, &coalesce); + + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) + return -EFAULT; + return 0; +} + +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) + return -EFAULT; + + return ethtool_ops->set_coalesce(dev, &coalesce); +} + +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + ethtool_ops->get_ringparam(dev, &ringparam); + + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) + return -EFAULT; + + return ethtool_ops->set_ringparam(dev, &ringparam); +} + +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + ethtool_ops->get_pauseparam(dev, &pauseparam); + + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) + return -EFAULT; + + return ethtool_ops->set_pauseparam(dev, &pauseparam); +} + +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GRXCSUM }; + + if (!ethtool_ops->get_rx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_rx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_rx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_rx_csum(dev, edata.data); + return 0; +} + +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTXCSUM }; + + if (!ethtool_ops->get_tx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tx_csum(dev, edata.data); +} + +static int ethtool_get_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GSG }; + + if (!ethtool_ops->get_sg) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_sg(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_sg) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_sg(dev, edata.data); +} + +static int ethtool_get_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTSO }; + + if (!ethtool_ops->get_tso) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tso(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tso) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tso(dev, edata.data); +} + +static int ethtool_self_test(struct net_device *dev, char *useraddr) +{ + struct ethtool_test test; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->self_test || !ops->self_test_count) + return -EOPNOTSUPP; + + if (copy_from_user(&test, useraddr, sizeof(test))) + return -EFAULT; + + test.len = ops->self_test_count(dev); + data = kmalloc(test.len * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->self_test(dev, &test, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &test, sizeof(test))) + goto out; + useraddr += sizeof(test); + if (copy_to_user(useraddr, data, test.len * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_strings(struct net_device *dev, void *useraddr) +{ + struct ethtool_gstrings gstrings; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_strings) + return -EOPNOTSUPP; + + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) + return -EFAULT; + + switch (gstrings.string_set) { + case ETH_SS_TEST: + if (!ops->self_test_count) + return -EOPNOTSUPP; + gstrings.len = ops->self_test_count(dev); + break; + case ETH_SS_STATS: + if (!ops->get_stats_count) + return -EOPNOTSUPP; + gstrings.len = ops->get_stats_count(dev); + break; + default: + return -EINVAL; + } + + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_strings(dev, gstrings.string_set, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) + goto out; + useraddr += sizeof(gstrings); + if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_phys_id(struct net_device *dev, void *useraddr) +{ + struct ethtool_value id; + + if (!ethtool_ops->phys_id) + return -EOPNOTSUPP; + + if (copy_from_user(&id, useraddr, sizeof(id))) + return -EFAULT; + + return ethtool_ops->phys_id(dev, id.data); +} + +static int ethtool_get_stats(struct net_device *dev, void *useraddr) +{ + struct ethtool_stats stats; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->get_ethtool_stats || !ops->get_stats_count) + return -EOPNOTSUPP; + + if (copy_from_user(&stats, useraddr, sizeof(stats))) + return -EFAULT; + + stats.n_stats = ops->get_stats_count(dev); + data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_ethtool_stats(dev, &stats, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &stats, sizeof(stats))) + goto out; + useraddr += sizeof(stats); + if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +/* The main entry point in this file. Called from net/core/dev.c */ + +#define ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr) +{ + struct net_device *dev = __dev_get_by_name(ifr->ifr_name); + void *useraddr = (void *) ifr->ifr_data; + u32 ethcmd; + + /* + * XXX: This can be pushed down into the ethtool_* handlers that + * need it. Keep existing behavior for the moment. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!dev || !netif_device_present(dev)) + return -ENODEV; + + if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) + return -EFAULT; + + switch (ethcmd) { + case ETHTOOL_GSET: + return ethtool_get_settings(dev, useraddr); + case ETHTOOL_SSET: + return ethtool_set_settings(dev, useraddr); + case ETHTOOL_GDRVINFO: + return ethtool_get_drvinfo(dev, useraddr); + case ETHTOOL_GREGS: + return ethtool_get_regs(dev, useraddr); + case ETHTOOL_GWOL: + return ethtool_get_wol(dev, useraddr); + case ETHTOOL_SWOL: + return ethtool_set_wol(dev, useraddr); + case ETHTOOL_GMSGLVL: + return ethtool_get_msglevel(dev, useraddr); + case ETHTOOL_SMSGLVL: + return ethtool_set_msglevel(dev, useraddr); + case ETHTOOL_NWAY_RST: + return ethtool_nway_reset(dev); + case ETHTOOL_GLINK: + return ethtool_get_link(dev, useraddr); + case ETHTOOL_GEEPROM: + return ethtool_get_eeprom(dev, useraddr); + case ETHTOOL_SEEPROM: + return ethtool_set_eeprom(dev, useraddr); + case ETHTOOL_GCOALESCE: + return ethtool_get_coalesce(dev, useraddr); + case ETHTOOL_SCOALESCE: + return ethtool_set_coalesce(dev, useraddr); + case ETHTOOL_GRINGPARAM: + return ethtool_get_ringparam(dev, useraddr); + case ETHTOOL_SRINGPARAM: + return ethtool_set_ringparam(dev, useraddr); + case ETHTOOL_GPAUSEPARAM: + return ethtool_get_pauseparam(dev, useraddr); + case ETHTOOL_SPAUSEPARAM: + return ethtool_set_pauseparam(dev, useraddr); + case ETHTOOL_GRXCSUM: + return ethtool_get_rx_csum(dev, useraddr); + case ETHTOOL_SRXCSUM: + return ethtool_set_rx_csum(dev, useraddr); + case ETHTOOL_GTXCSUM: + return ethtool_get_tx_csum(dev, useraddr); + case ETHTOOL_STXCSUM: + return ethtool_set_tx_csum(dev, useraddr); + case ETHTOOL_GSG: + return ethtool_get_sg(dev, useraddr); + case ETHTOOL_SSG: + return ethtool_set_sg(dev, useraddr); + case ETHTOOL_GTSO: + return ethtool_get_tso(dev, useraddr); + case ETHTOOL_STSO: + return ethtool_set_tso(dev, useraddr); + case ETHTOOL_TEST: + return ethtool_self_test(dev, useraddr); + case ETHTOOL_GSTRINGS: + return ethtool_get_strings(dev, useraddr); + case ETHTOOL_PHYS_ID: + return ethtool_phys_id(dev, useraddr); + case ETHTOOL_GSTATS: + return ethtool_get_stats(dev, useraddr); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +#define mii_if_info _kc_mii_if_info +struct _kc_mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + + unsigned int full_duplex : 1; /* is full duplex? */ + unsigned int force_media : 1; /* is autoneg. disabled? */ + + struct net_device *dev; + int (*mdio_read) (struct net_device *dev, int phy_id, int location); + void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); +}; + +struct ethtool_cmd; +struct mii_ioctl_data; + +#undef mii_link_ok +#define mii_link_ok _kc_mii_link_ok +#undef mii_nway_restart +#define mii_nway_restart _kc_mii_nway_restart +#undef mii_ethtool_gset +#define mii_ethtool_gset _kc_mii_ethtool_gset +#undef mii_ethtool_sset +#define mii_ethtool_sset _kc_mii_ethtool_sset +#undef mii_check_link +#define mii_check_link _kc_mii_check_link +extern int _kc_mii_link_ok (struct mii_if_info *mii); +extern int _kc_mii_nway_restart (struct mii_if_info *mii); +extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, + struct ethtool_cmd *ecmd); +extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, + struct ethtool_cmd *ecmd); +extern void _kc_mii_check_link (struct mii_if_info *mii); +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +#undef generic_mii_ioctl +#define generic_mii_ioctl _kc_generic_mii_ioctl +extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_changed); +#endif /* > 2.4.6 */ + + +struct _kc_pci_dev_ext { + struct pci_dev *dev; + void *pci_drvdata; + struct pci_driver *driver; +}; + +struct _kc_net_dev_ext { + struct net_device *dev; + unsigned int carrier; +}; + + +/**************************************/ +/* mii support */ + +int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + u32 advert, bmcr, lpa, nego; + + ecmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); + + /* only supports twisted-pair */ + ecmd->port = PORT_MII; + + /* only supports internal transceiver */ + ecmd->transceiver = XCVR_INTERNAL; + + /* this isn't fully supported at higher layers */ + ecmd->phy_address = mii->phy_id; + + ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + if (advert & ADVERTISE_10HALF) + ecmd->advertising |= ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + ecmd->advertising |= ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); + if (bmcr & BMCR_ANENABLE) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + + nego = mii_nway_result(advert & lpa); + if (nego == LPA_100FULL || nego == LPA_100HALF) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; + if (nego == LPA_100FULL || nego == LPA_10FULL) { + ecmd->duplex = DUPLEX_FULL; + mii->full_duplex = 1; + } else { + ecmd->duplex = DUPLEX_HALF; + mii->full_duplex = 0; + } + } else { + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + /* ignore maxtxpkt, maxrxpkt for now */ + + return 0; +} + +int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_MII) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + if (ecmd->phy_address != mii->phy_id) + return -EINVAL; + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + /* ignore supported, maxtxpkt, maxrxpkt */ + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u32 bmcr, advert, tmp; + + if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full)) == 0) + return -EINVAL; + + /* advertise only what has been requested */ + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (ADVERTISED_10baseT_Half) + tmp |= ADVERTISE_10HALF; + if (ADVERTISED_10baseT_Full) + tmp |= ADVERTISE_10FULL; + if (ADVERTISED_100baseT_Half) + tmp |= ADVERTISE_100HALF; + if (ADVERTISED_100baseT_Full) + tmp |= ADVERTISE_100FULL; + if (advert != tmp) { + mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); + mii->advertising = tmp; + } + + /* turn on autonegotiation, and force a renegotiate */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); + + mii->force_media = 0; + } else { + u32 bmcr, tmp; + + /* turn off auto negotiation, set speed and duplexity */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); + if (ecmd->speed == SPEED_100) + tmp |= BMCR_SPEED100; + if (ecmd->duplex == DUPLEX_FULL) { + tmp |= BMCR_FULLDPLX; + mii->full_duplex = 1; + } else + mii->full_duplex = 0; + if (bmcr != tmp) + mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); + + mii->force_media = 1; + } + return 0; +} + +int _kc_mii_link_ok (struct mii_if_info *mii) +{ + /* first, a dummy read, needed to latch some MII phys */ + mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) + return 1; + return 0; +} + +int _kc_mii_nway_restart (struct mii_if_info *mii) +{ + int bmcr; + int r = -EINVAL; + + /* if autoneg is off, it's an error */ + bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); + r = 0; + } + + return r; +} + +void _kc_mii_check_link (struct mii_if_info *mii) +{ + int cur_link = mii_link_ok(mii); + int prev_link = netif_carrier_ok(mii->dev); + + if (cur_link && !prev_link) + netif_carrier_on(mii->dev); + else if (prev_link && !cur_link) + netif_carrier_off(mii->dev); +} + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) +int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_chg_out) +{ + int rc = 0; + unsigned int duplex_changed = 0; + + if (duplex_chg_out) + *duplex_chg_out = 0; + + mii_data->phy_id &= mii_if->phy_id_mask; + mii_data->reg_num &= mii_if->reg_num_mask; + + switch(cmd) { + case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ + case SIOCGMIIPHY: + mii_data->phy_id = mii_if->phy_id; + /* fall through */ + + case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ + case SIOCGMIIREG: + mii_data->val_out = + mii_if->mdio_read(mii_if->dev, mii_data->phy_id, + mii_data->reg_num); + break; + + case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ + case SIOCSMIIREG: { + u16 val = mii_data->val_in; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (mii_data->phy_id == mii_if->phy_id) { + switch(mii_data->reg_num) { + case MII_BMCR: { + unsigned int new_duplex = 0; + if (val & (BMCR_RESET|BMCR_ANENABLE)) + mii_if->force_media = 0; + else + mii_if->force_media = 1; + if (mii_if->force_media && + (val & BMCR_FULLDPLX)) + new_duplex = 1; + if (mii_if->full_duplex != new_duplex) { + duplex_changed = 1; + mii_if->full_duplex = new_duplex; + } + break; + } + case MII_ADVERTISE: + mii_if->advertising = val; + break; + default: + /* do nothing */ + break; + } + } + + mii_if->mdio_write(mii_if->dev, mii_data->phy_id, + mii_data->reg_num, val); + break; + } + + default: + rc = -EOPNOTSUPP; + break; + } + + if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) + *duplex_chg_out = 1; + + return rc; +} +#endif /* > 2.4.6 */ + From 5f1f014eddcf4aec400c5747da1e7cbb221bcf36 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Thu, 8 Aug 2019 14:26:58 +0800 Subject: [PATCH 16/20] 1. remove ixgbe driver (use github to download and patch it) 2. add Pegatron project back to one-image 3. modify Pegatron project utility --- platform/nephos/one-image.mk | 8 +- platform/nephos/platform-modules-pegatron.mk | 2 +- .../debian/control | 4 +- .../debian/rules | 16 +- ...-Intel-ixgbe-driver-for-fn-6254-dn-f.patch | 382 + .../pegatron_fn_6254_dn_f_ixgbe/ixgbe.h | 1287 -- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c | 1399 -- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h | 43 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c | 2614 ---- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h | 55 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c | 1624 --- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h | 213 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c | 168 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h | 31 - .../ixgbe_common.c | 5274 ------- .../ixgbe_common.h | 171 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c | 718 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h | 165 - .../ixgbe_dcb_82598.c | 350 - .../ixgbe_dcb_82598.h | 90 - .../ixgbe_dcb_82599.c | 584 - .../ixgbe_dcb_82599.h | 118 - .../ixgbe_dcb_nl.c | 898 -- .../ixgbe_debugfs.c | 281 - .../ixgbe_ethtool.c | 4429 ------ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c | 1043 -- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h | 93 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c | 210 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h | 51 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c | 1311 -- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c | 11946 ---------------- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c | 760 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h | 155 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h | 200 - .../ixgbe_osdep2.h | 68 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c | 1256 -- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c | 2685 ---- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h | 209 - .../ixgbe_procfs.c | 938 -- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c | 1437 -- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c | 1881 --- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h | 92 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c | 257 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h | 4337 ------ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c | 1048 -- .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h | 58 - .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c | 4711 ------ .../pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h | 115 - .../pegatron_fn_6254_dn_f_ixgbe/kcompat.c | 2375 --- .../pegatron_fn_6254_dn_f_ixgbe/kcompat.h | 5610 -------- .../kcompat_ethtool.c | 1169 -- .../fn-6254-dn-f/pegaProcess/__init__.py | 0 .../fn-6254-dn-f/pegaProcess/common.py | 87 + .../fn-6254-dn-f/pegaProcess/device.py | 370 + .../fn-6254-dn-f/pegaProcess/main.py | 205 + .../fn-6254-dn-f/scripts/sensors | 4 +- .../fn_6254_dn_f-platform-init.service | 4 +- .../fn_6254_dn_f-platform-status.service | 12 - .../utils/pegatron_fn_6254_dn_f_sensors.py | 141 - .../utils/pegatron_fn_6254_dn_f_status.py | 164 - .../utils/pegatron_fn_6254_dn_f_util.py | 265 +- 61 files changed, 1093 insertions(+), 65098 deletions(-) create mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/0001-modify-Intel-ixgbe-driver-for-fn-6254-dn-f.patch delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep2.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_procfs.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.c delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.h delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat_ethtool.c create mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/__init__.py create mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/common.py create mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/device.py create mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/main.py delete mode 100644 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-status.service delete mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_sensors.py delete mode 100755 platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_status.py diff --git a/platform/nephos/one-image.mk b/platform/nephos/one-image.mk index 250448ed2c1d..0500e8315268 100644 --- a/platform/nephos/one-image.mk +++ b/platform/nephos/one-image.mk @@ -8,13 +8,9 @@ $(SONIC_ONE_IMAGE)_INSTALLS += $(SYSTEMD_SONIC_GENERATOR) $(SONIC_ONE_IMAGE)_LAZY_INSTALLS += $(INGRASYS_S9130_32X_PLATFORM_MODULE) \ $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ -<<<<<<< HEAD $(CIG_CS6436_56P_PLATFORM_MODULE) \ - $(PEGATRON_PORSCHE_PLATFORM_MODULE) \ - $(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE) -$(SONIC_ONE_IMAGE)_DOCKERS += $(SONIC_INSTALL_DOCKER_IMAGES) -======= - $(CIG_CS6436_56P_PLATFORM_MODULE) + $(PEGATRON_PORSCHE_PLATFORM_MODULE) \ + $(PEGATRON_FN_6254_DN_F_PLATFORM_MODULE) ifeq ($(INSTALL_DEBUG_TOOLS),y) $(SONIC_ONE_IMAGE)_DOCKERS += $(SONIC_INSTALL_DOCKER_DBG_IMAGES) $(SONIC_ONE_IMAGE)_DOCKERS += $(filter-out $(patsubst %-$(DBG_IMAGE_MARK).gz,%.gz, $(SONIC_INSTALL_DOCKER_DBG_IMAGES)), $(SONIC_INSTALL_DOCKER_IMAGES)) diff --git a/platform/nephos/platform-modules-pegatron.mk b/platform/nephos/platform-modules-pegatron.mk index ef32c9cbc0e8..7602a7fe42e0 100755 --- a/platform/nephos/platform-modules-pegatron.mk +++ b/platform/nephos/platform-modules-pegatron.mk @@ -1,6 +1,6 @@ # Pegatron Platform modules -PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION = 0.0.1 +PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION = 1.0.0 PEGATRON_FN_6254_DN_F_PLATFORM_MODULE_VERSION = 1.0.0 export PEGATRON_PORSCHE_PLATFORM_MODULE_VERSION diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/control b/platform/nephos/sonic-platform-modules-pegatron/debian/control index c24275f6b94e..37c0a423cf50 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/control +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/control @@ -7,10 +7,10 @@ Standards-Version: 3.9.3 Package: sonic-platform-pegatron-porsche Architecture: amd64 -Depends: linux-image-4.9.0-8-amd64 +Depends: linux-image-4.9.0-9-amd64 Description: kernel modules for platform devices such as fan, led, sfp Package: sonic-platform-pegatron-fn-6254-dn-f Architecture: amd64 -Depends: linux-image-4.9.0-8-amd64 +Depends: linux-image-4.9.0-9-amd64 Description: kernel modules for platform devices such as fan, led, sfp diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/rules b/platform/nephos/sonic-platform-modules-pegatron/debian/rules index 906bbc9ad981..11f0ca097d7c 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/rules +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/rules @@ -25,6 +25,7 @@ UTILS_DIR := utils SERVICE_DIR := service SCRIPTS_DIR := scripts CONF_DIR := conf +PROCESS_DIR := pegaProcess %: dh $@ --with systemd,python2,python3 --buildsystem=pybuild @@ -35,6 +36,11 @@ clean: dh_clean build: + # For fn-6254-dn-f ixgbe driver + git clone https://github.com/Peter5Lin/kernelDriver + git --git-dir=kernelDriver/.git/ --work-tree=kernelDriver am ../fn-6254-dn-f/$(MODULE_DIR)/pegatron_fn_6254_dn_f_ixgbe/0001-modify-Intel-ixgbe-driver-for-fn-6254-dn-f.patch + cp kernelDriver/* $(MOD_SRC_DIR)/fn-6254-dn-f/$(MODULE_DIR)/pegatron_fn_6254_dn_f_ixgbe/ + rm -rf kernelDriver (for mod in $(MODULE_DIRS); do \ make modules -C $(KERNEL_SRC)/build M=$(MOD_SRC_DIR)/$${mod}/modules; \ done) @@ -57,14 +63,16 @@ binary-indep: # Custom package commands (for mod in $(MODULE_DIRS); do \ - dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod}/$(KERNEL_SRC)/$(INSTALL_MOD_DIR); \ - dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod}/usr/local/bin; \ - dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod}/usr/bin; \ - dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod}/lib/systemd/system; \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod} /$(KERNEL_SRC)/$(INSTALL_MOD_DIR); \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod} /usr/local/bin; \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod} /usr/local/bin/${PROCESS_DIR}; \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod} /usr/bin; \ + dh_installdirs -p$(PACKAGE_PRE_NAME)-$${mod} /lib/systemd/system; \ cp $(MOD_SRC_DIR)/$${mod}/$(MODULE_DIR)/*.ko debian/$(PACKAGE_PRE_NAME)-$${mod}/$(KERNEL_SRC)/$(INSTALL_MOD_DIR); \ cp $(MOD_SRC_DIR)/$${mod}/$(UTILS_DIR)/* debian/$(PACKAGE_PRE_NAME)-$${mod}/usr/local/bin/; \ cp $(MOD_SRC_DIR)/$${mod}/$(SERVICE_DIR)/*.service debian/$(PACKAGE_PRE_NAME)-$${mod}/lib/systemd/system/; \ cp $(MOD_SRC_DIR)/$${mod}/$(SCRIPTS_DIR)/* debian/$(PACKAGE_PRE_NAME)-$${mod}/usr/bin/; \ + cp $(MOD_SRC_DIR)/$${mod}/${PROCESS_DIR}/* debian/$(PACKAGE_PRE_NAME)-$${mod}/usr/local/bin/${PROCESS_DIR}; \ done) # For fn-6254-dn-f ixgbe driver diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/0001-modify-Intel-ixgbe-driver-for-fn-6254-dn-f.patch b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/0001-modify-Intel-ixgbe-driver-for-fn-6254-dn-f.patch new file mode 100644 index 000000000000..1a76e80666ed --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/0001-modify-Intel-ixgbe-driver-for-fn-6254-dn-f.patch @@ -0,0 +1,382 @@ +From 004c2ebd62f10d3a4c56ebef4056a90d063f8ea0 Mon Sep 17 00:00:00 2001 +From: PeterLin +Date: Wed, 7 Aug 2019 13:56:41 +0800 +Subject: [PATCH] modify Intel ixgbe driver for fn-6254-dn-f + +--- + ixgbe_common.c | 165 +++++++++++++++++++++++++++++++++++++++++++++++++------- + ixgbe_ethtool.c | 82 +++++++++++++++++++++++++++- + ixgbe_main.c | 10 +++- + ixgbe_x550.c | 11 ++-- + 4 files changed, 244 insertions(+), 24 deletions(-) + +diff --git a/ixgbe_common.c b/ixgbe_common.c +index b725de4..04369ba 100644 +--- a/ixgbe_common.c ++++ b/ixgbe_common.c +@@ -382,6 +382,8 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + s32 ret_val; + u32 ctrl_ext; + u16 device_caps; ++ s32 rc; ++ u16 regVal=0; + + DEBUGFUNC("ixgbe_start_hw_generic"); + +@@ -428,6 +430,67 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + ++#if 1 /* To modify speed LED polarity and configure led on only for speed 1G in M88E1512 ++ * for Porsche2 platform. ++ * From 88E1512 datasheet: ++ * Page register: 0x16 ++ * LED functon control register: 0x10 in page 3 ++ * LED polarity control register: 0x11 in page 3 ++ */ ++ ++ if (hw->mac.type == ixgbe_mac_X550EM_a && ++ (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { ++ /* For M88E1512, to select page 3 in register 0x16 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++#if 0 //for debug ++ /* For M88E1512, read from register 0x16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x16, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "phy register read failed, rc:%x\n", rc); ++ } ++ hw_err(hw, "####read phy register 0x16 again, value:%x\n", regVal); ++#endif ++ /* For M88E1512, read from page 3, register 0x11 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x11, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led polarity register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 0x11 with polarity bit set */ ++ regVal |= 0x01; ++ rc = hw->phy.ops.write_reg(hw, 0x11, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led polarity register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with only 1000M led on */ ++ regVal = (regVal & 0xFFF0) | 0x0007; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } ++#endif + return IXGBE_SUCCESS; + } + +@@ -965,17 +1028,50 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) + s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) + { + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ s32 rc; ++ u16 regVal; + + DEBUGFUNC("ixgbe_led_on_generic"); +- +- if (index > 3) +- return IXGBE_ERR_PARAM; +- +- /* To turn on the LED, set mode to ON. */ +- led_reg &= ~IXGBE_LED_MODE_MASK(index); +- led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); +- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +- IXGBE_WRITE_FLUSH(hw); ++ if (hw->mac.type == ixgbe_mac_X550EM_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0099; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } ++ else ++ { ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ /* To turn on the LED, set mode to ON. */ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ } + + return IXGBE_SUCCESS; + } +@@ -988,18 +1084,51 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) + s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) + { + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ s32 rc; ++ u16 regVal; + + DEBUGFUNC("ixgbe_led_off_generic"); + +- if (index > 3) +- return IXGBE_ERR_PARAM; +- +- /* To turn off the LED, set mode to OFF. */ +- led_reg &= ~IXGBE_LED_MODE_MASK(index); +- led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); +- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); +- IXGBE_WRITE_FLUSH(hw); +- ++ if (hw->mac.type == ixgbe_mac_X550EM_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0088; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } ++ else ++ { ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ /* To turn off the LED, set mode to OFF. */ ++ led_reg &= ~IXGBE_LED_MODE_MASK(index); ++ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ IXGBE_WRITE_FLUSH(hw); ++ } + return IXGBE_SUCCESS; + } + +diff --git a/ixgbe_ethtool.c b/ixgbe_ethtool.c +index 1e8762c..a446f22 100644 +--- a/ixgbe_ethtool.c ++++ b/ixgbe_ethtool.c +@@ -839,7 +839,55 @@ static u32 ixgbe_get_msglevel(struct net_device *netdev) + static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u16 regVal; ++ s32 rc; ++ + adapter->msg_enable = data; ++ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ ++ /* For M88E1512, read from (page 3, register 16)[LED Function Control Register] */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ /*hw_err(hw, "[Pega Debug] : current register value = 0x%x\n", regVal);*/ ++ if (rc) ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ ++ if (data == 0) /* Turn off OOB LED. */ ++ { ++ /* For M88E1512, write to (page 3, register 16) with force led off */ ++ regVal = (regVal & 0xFF00) | 0x0088; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ else if (data == 1) /* Turn on OOB LED. */ ++ { ++ /* For M88E1512, write to (page 3, register 16) with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0099; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ else /* Switch OOB LED back to normal. */ ++ { ++ /* For M88E1512, set led back to nornmal in (page 3, register 16). */ ++ regVal = (regVal & 0xFF00) | 0x0017; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write 0 in (page 0, register 22) to back to page 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ + } + + static int ixgbe_get_regs_len(struct net_device __always_unused *netdev) +@@ -2607,6 +2655,8 @@ static int ixgbe_set_phys_id(struct net_device *netdev, + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; ++ s32 rc; ++ u16 regVal; + + if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) + return -EOPNOTSUPP; +@@ -2628,7 +2678,37 @@ static int ixgbe_set_phys_id(struct net_device *netdev, + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); ++ if (hw->mac.type == ixgbe_mac_X550EM_a) { ++ /* For M88E1512, to select page 3 in register 22 */ ++ regVal = 0x03; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, read from page 3, register 16 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); ++ if (rc) { ++ hw_err(hw, "led function control register read failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write to page 3 register 16 with force led on */ ++ regVal = (regVal & 0xFF00) | 0x0017; ++ rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "led function control register write failed, rc:%x\n", rc); ++ } ++ ++ /* For M88E1512, write page 22 back to default 0 */ ++ regVal = 0x00; ++ rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); ++ if (rc) { ++ hw_err(hw, "page register write failed, rc:%x\n", rc); ++ } ++ } ++ else ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); + break; + } + +diff --git a/ixgbe_main.c b/ixgbe_main.c +index c7a1499..613c692 100644 +--- a/ixgbe_main.c ++++ b/ixgbe_main.c +@@ -11236,7 +11236,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, + if (hw->mac.ops.get_bus_info) + hw->mac.ops.get_bus_info(hw); + +- strcpy(netdev->name, "eth%d"); ++ if(!strcmp("0000:03:00.0", pci_name(pdev))) ++ strcpy(netdev->name, "eth0"); ++ else if(!strcmp("0000:03:00.1", pci_name(pdev))) ++ strcpy(netdev->name, "eth1"); ++ else if(!strcmp("0000:02:00.0", pci_name(pdev))) ++ strcpy(netdev->name, "eth2"); ++ else if(!strcmp("0000:02:00.1", pci_name(pdev))) ++ strcpy(netdev->name, "eth3"); ++ + err = register_netdev(netdev); + if (err) + goto err_register; +diff --git a/ixgbe_x550.c b/ixgbe_x550.c +index 7c8b72f..c401159 100644 +--- a/ixgbe_x550.c ++++ b/ixgbe_x550.c +@@ -566,8 +566,8 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) + phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; + hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; + hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; +- if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) +- return IXGBE_ERR_PHY_ADDR_INVALID; ++ /*if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) ++ return IXGBE_ERR_PHY_ADDR_INVALID;*/ + return IXGBE_SUCCESS; + } + +@@ -585,8 +585,8 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + + hw->phy.type = ixgbe_phy_fw; +- hw->phy.ops.read_reg = NULL; +- hw->phy.ops.write_reg = NULL; ++ /*hw->phy.ops.read_reg = NULL; ++ hw->phy.ops.write_reg = NULL;*/ + return ixgbe_get_phy_id_fw(hw); + } + +@@ -2334,6 +2334,9 @@ STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + } ++ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { ++ hw->phy.addr = (hw->bus.lan_id == 0) ? (1) : (0); ++ } + + return IXGBE_SUCCESS; + } +-- +2.7.4 + diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h deleted file mode 100644 index 33be88cc7eea..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe.h +++ /dev/null @@ -1,1287 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_H_ -#define _IXGBE_H_ - -#include - -#include -#include -#include - -#ifdef SIOCETHTOOL -#include -#endif -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) -#include -#endif -/* Can't use IS_ENABLED until after kcompat is loaded */ -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) -#define IXGBE_DCA -#include -#endif -#include "ixgbe_dcb.h" - -#include "kcompat.h" - -#ifdef CONFIG_NET_RX_BUSY_POLL -#include -#ifdef HAVE_NDO_BUSY_POLL -#define BP_EXTENDED_STATS -#endif -#endif /* CONFIG_NET_RX_BUSY_POLL */ - -#ifdef HAVE_SCTP -#include -#endif - -#ifdef HAVE_INCLUDE_LINUX_MDIO_H -#include -#endif - -#if IS_ENABLED(CONFIG_FCOE) -#include "ixgbe_fcoe.h" -#endif /* CONFIG_FCOE */ - -#include "ixgbe_api.h" - -#include "ixgbe_common.h" - -#define PFX "ixgbe: " -#define DPRINTK(nlevel, klevel, fmt, args...) \ - ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ - printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ - __func__ , ## args))) - -#ifdef HAVE_PTP_1588_CLOCK -#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H -#include -#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ -#include -#include -#include -#endif - -/* TX/RX descriptor defines */ -#define IXGBE_DEFAULT_TXD 512 -#define IXGBE_DEFAULT_TX_WORK 256 -#define IXGBE_MAX_TXD 4096 -#define IXGBE_MIN_TXD 64 - -#define IXGBE_DEFAULT_RXD 512 -#define IXGBE_DEFAULT_RX_WORK 256 -#define IXGBE_MAX_RXD 4096 -#define IXGBE_MIN_RXD 64 - -#define IXGBE_ETH_P_LLDP 0x88CC - -/* flow control */ -#define IXGBE_MIN_FCRTL 0x40 -#define IXGBE_MAX_FCRTL 0x7FF80 -#define IXGBE_MIN_FCRTH 0x600 -#define IXGBE_MAX_FCRTH 0x7FFF0 -#define IXGBE_DEFAULT_FCPAUSE 0xFFFF -#define IXGBE_MIN_FCPAUSE 0 -#define IXGBE_MAX_FCPAUSE 0xFFFF - -/* Supported Rx Buffer Sizes */ -#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ -#define IXGBE_RXBUFFER_1536 1536 -#define IXGBE_RXBUFFER_2K 2048 -#define IXGBE_RXBUFFER_3K 3072 -#define IXGBE_RXBUFFER_4K 4096 -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -#define IXGBE_RXBUFFER_7K 7168 -#define IXGBE_RXBUFFER_8K 8192 -#define IXGBE_RXBUFFER_15K 15360 -#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ -#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ - -/* Attempt to maximize the headroom available for incoming frames. We - * use a 2K buffer for receives and need 1536/1534 to store the data for - * the frame. This leaves us with 512 bytes of room. From that we need - * to deduct the space needed for the shared info and the padding needed - * to IP align the frame. - * - * Note: For cache line sizes 256 or larger this value is going to end - * up negative. In these cases we should fall back to the 3K - * buffers. - */ -#if (PAGE_SIZE < 8192) -#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN) -#define IXGBE_2K_TOO_SMALL_WITH_PADDING \ -((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K)) - -static inline int ixgbe_compute_pad(int rx_buf_len) -{ - int page_size, pad_size; - - page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); - pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; - - return pad_size; -} - -static inline int ixgbe_skb_pad(void) -{ - int rx_buf_len; - - /* If a 2K buffer cannot handle a standard Ethernet frame then - * optimize padding for a 3K buffer instead of a 1.5K buffer. - * - * For a 3K buffer we need to add enough padding to allow for - * tailroom due to NET_IP_ALIGN possibly shifting us out of - * cache-line alignment. - */ - if (IXGBE_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); - else - rx_buf_len = IXGBE_RXBUFFER_1536; - - /* if needed make room for NET_IP_ALIGN */ - rx_buf_len -= NET_IP_ALIGN; - - return ixgbe_compute_pad(rx_buf_len); -} - -#define IXGBE_SKB_PAD ixgbe_skb_pad() -#else -#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#endif - -/* - * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we - * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, - * this adds up to 448 bytes of extra data. - * - * Since netdev_alloc_skb now allocates a page fragment we can use a value - * of 256 and the resultant skb will have a truesize of 960 or less. - */ -#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 - -#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) - -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - -#ifdef HAVE_STRUCT_DMA_ATTRS -#define IXGBE_RX_DMA_ATTR NULL -#else -#define IXGBE_RX_DMA_ATTR \ - (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) -#endif - -/* assume the kernel supports 8021p to avoid stripping vlan tags */ -#ifdef IXGBE_DISABLE_8021P_SUPPORT -#ifndef HAVE_8021P_SUPPORT -#define HAVE_8021P_SUPPORT -#endif -#endif /* IXGBE_DISABLE_8021P_SUPPORT */ - -enum ixgbe_tx_flags { - /* cmd_type flags */ - IXGBE_TX_FLAGS_HW_VLAN = 0x01, - IXGBE_TX_FLAGS_TSO = 0x02, - IXGBE_TX_FLAGS_TSTAMP = 0x04, - - /* olinfo flags */ - IXGBE_TX_FLAGS_CC = 0x08, - IXGBE_TX_FLAGS_IPV4 = 0x10, - IXGBE_TX_FLAGS_CSUM = 0x20, - - /* software defined flags */ - IXGBE_TX_FLAGS_SW_VLAN = 0x40, - IXGBE_TX_FLAGS_FCOE = 0x80, -}; - -/* VLAN info */ -#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 -#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 - -#define IXGBE_MAX_RX_DESC_POLL 10 - -#define IXGBE_MAX_VF_MC_ENTRIES 30 -#define IXGBE_MAX_VF_FUNCTIONS 64 -#define IXGBE_MAX_VFTA_ENTRIES 128 -#define MAX_EMULATION_MAC_ADDRS 16 -#define IXGBE_MAX_PF_MACVLANS 15 - -/* must account for pools assigned to VFs. */ -#ifdef CONFIG_PCI_IOV -#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) -#else -#define VMDQ_P(p) (p) -#endif - -#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ - { \ - u32 current_counter = IXGBE_READ_REG(hw, reg); \ - if (current_counter < last_counter) \ - counter += 0x100000000LL; \ - last_counter = current_counter; \ - counter &= 0xFFFFFFFF00000000LL; \ - counter |= current_counter; \ - } - -#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ - { \ - u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ - u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ - u64 current_counter = (current_counter_msb << 32) | \ - current_counter_lsb; \ - if (current_counter < last_counter) \ - counter += 0x1000000000LL; \ - last_counter = current_counter; \ - counter &= 0xFFFFFFF000000000LL; \ - counter |= current_counter; \ - } - -struct vf_stats { - u64 gprc; - u64 gorc; - u64 gptc; - u64 gotc; - u64 mprc; -}; -struct vf_data_storage { - struct pci_dev *vfdev; - unsigned char vf_mac_addresses[ETH_ALEN]; - u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; - u16 num_vf_mc_hashes; - bool clear_to_send; - struct vf_stats vfstats; - struct vf_stats last_vfstats; - struct vf_stats saved_rst_vfstats; - bool pf_set_mac; - u16 pf_vlan; /* When set, guest VLAN config not allowed. */ - u16 pf_qos; - u16 tx_rate; - u8 spoofchk_enabled; -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN - bool rss_query_enabled; -#endif - u8 trusted; - int xcast_mode; - unsigned int vf_api; -}; - -struct vf_macvlans { - struct list_head l; - int vf; - bool free; - bool is_macvlan; - u8 vf_macvlan[ETH_ALEN]; -}; - -#define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) - -/* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) -#ifndef MAX_SKB_FRAGS -#define DESC_NEEDED 4 -#elif (MAX_SKB_FRAGS < 16) -#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) -#else -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) -#endif - -/* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ -struct ixgbe_tx_buffer { - union ixgbe_adv_tx_desc *next_to_watch; - unsigned long time_stamp; - struct sk_buff *skb; - unsigned int bytecount; - unsigned short gso_segs; - __be16 protocol; - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(len); - u32 tx_flags; -}; - -struct ixgbe_rx_buffer { - struct sk_buff *skb; - dma_addr_t dma; -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; -#endif -}; - -struct ixgbe_queue_stats { - u64 packets; - u64 bytes; -#ifdef BP_EXTENDED_STATS - u64 yields; - u64 misses; - u64 cleaned; -#endif /* BP_EXTENDED_STATS */ -}; - -struct ixgbe_tx_queue_stats { - u64 restart_queue; - u64 tx_busy; - u64 tx_done_old; -}; - -struct ixgbe_rx_queue_stats { - u64 rsc_count; - u64 rsc_flush; - u64 non_eop_descs; - u64 alloc_rx_page_failed; - u64 alloc_rx_buff_failed; - u64 csum_err; -}; - -#define IXGBE_TS_HDR_LEN 8 -enum ixgbe_ring_state_t { -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - __IXGBE_RX_3K_BUFFER, - __IXGBE_RX_BUILD_SKB_ENABLED, -#endif - __IXGBE_RX_RSC_ENABLED, - __IXGBE_RX_CSUM_UDP_ZERO_ERR, -#if IS_ENABLED(CONFIG_FCOE) - __IXGBE_RX_FCOE, -#endif - __IXGBE_TX_FDIR_INIT_DONE, - __IXGBE_TX_XPS_INIT_DONE, - __IXGBE_TX_DETECT_HANG, - __IXGBE_HANG_CHECK_ARMED, -}; - -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -#define ring_uses_build_skb(ring) \ - test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) -#endif -#define check_for_tx_hang(ring) \ - test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) -#define set_check_for_tx_hang(ring) \ - set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) -#define clear_check_for_tx_hang(ring) \ - clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) -#define ring_is_rsc_enabled(ring) \ - test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) -#define set_ring_rsc_enabled(ring) \ - set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) -#define clear_ring_rsc_enabled(ring) \ - clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) -#define netdev_ring(ring) (ring->netdev) -#define ring_queue_index(ring) (ring->queue_index) - - -struct ixgbe_ring { - struct ixgbe_ring *next; /* pointer to next ring in q_vector */ - struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ - struct net_device *netdev; /* netdev ring belongs to */ - struct device *dev; /* device for DMA mapping */ - void *desc; /* descriptor ring memory */ - union { - struct ixgbe_tx_buffer *tx_buffer_info; - struct ixgbe_rx_buffer *rx_buffer_info; - }; - unsigned long state; - u8 __iomem *tail; - dma_addr_t dma; /* phys. address of descriptor ring */ - unsigned int size; /* length in bytes */ - - u16 count; /* amount of descriptors */ - - u8 queue_index; /* needed for multiqueue queue management */ - u8 reg_idx; /* holds the special value that gets - * the hardware register offset - * associated with this ring, which is - * different for DCB and RSS modes - */ - u16 next_to_use; - u16 next_to_clean; - -#ifdef HAVE_PTP_1588_CLOCK - unsigned long last_rx_timestamp; - -#endif - union { -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - u16 rx_buf_len; -#else - u16 next_to_alloc; -#endif - struct { - u8 atr_sample_rate; - u8 atr_count; - }; - }; - - u8 dcb_tc; - struct ixgbe_queue_stats stats; -#ifdef HAVE_NDO_GET_STATS64 - struct u64_stats_sync syncp; -#endif - union { - struct ixgbe_tx_queue_stats tx_stats; - struct ixgbe_rx_queue_stats rx_stats; - }; -} ____cacheline_internodealigned_in_smp; - -enum ixgbe_ring_f_enum { - RING_F_NONE = 0, - RING_F_VMDQ, /* SR-IOV uses the same ring feature */ - RING_F_RSS, - RING_F_FDIR, -#if IS_ENABLED(CONFIG_FCOE) - RING_F_FCOE, -#endif /* CONFIG_FCOE */ - RING_F_ARRAY_SIZE /* must be last in enum set */ -}; - -#define IXGBE_MAX_DCB_INDICES 8 -#define IXGBE_MAX_RSS_INDICES 16 -#define IXGBE_MAX_RSS_INDICES_X550 63 -#define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 63 -#if IS_ENABLED(CONFIG_FCOE) -#define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) -#else -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#endif /* CONFIG_FCOE */ -struct ixgbe_ring_feature { - u16 limit; /* upper limit on feature indices */ - u16 indices; /* current value of indices */ - u16 mask; /* Mask used for feature to ring mapping */ - u16 offset; /* offset to start of feature */ -}; - -#define IXGBE_82599_VMDQ_8Q_MASK 0x78 -#define IXGBE_82599_VMDQ_4Q_MASK 0x7C -#define IXGBE_82599_VMDQ_2Q_MASK 0x7E - -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -/* - * FCoE requires that all Rx buffers be over 2200 bytes in length. Since - * this is twice the size of a half page we need to double the page order - * for FCoE enabled Rx queues. - */ -static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring __maybe_unused *ring) -{ -#if MAX_SKB_FRAGS < 8 - return ALIGN(IXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); -#else - if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) - return IXGBE_RXBUFFER_3K; -#if (PAGE_SIZE < 8192) - if (ring_uses_build_skb(ring)) - return IXGBE_MAX_2K_FRAME_BUILD_SKB; -#endif - return IXGBE_RXBUFFER_2K; -#endif -} - -static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring __maybe_unused *ring) -{ -#if (PAGE_SIZE < 8192) - if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) - return 1; -#endif - return 0; -} -#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) - -#endif -struct ixgbe_ring_container { - struct ixgbe_ring *ring; /* pointer to linked list of rings */ - unsigned int total_bytes; /* total bytes processed this int */ - unsigned int total_packets; /* total packets processed this int */ - u16 work_limit; /* total work allowed per interrupt */ - u8 count; /* total number of rings in vector */ - u8 itr; /* current ITR setting for ring */ -}; - -/* iterator for handling rings in ring container */ -#define ixgbe_for_each_ring(pos, head) \ - for (pos = (head).ring; pos != NULL; pos = pos->next) - -#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ - ? 8 : 1) -#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS - -/* MAX_MSIX_Q_VECTORS of these are allocated, - * but we only use one per queue-specific vector. - */ -struct ixgbe_q_vector { - struct ixgbe_adapter *adapter; - int cpu; /* CPU for DCA */ - u16 v_idx; /* index of q_vector within array, also used for - * finding the bit in EICR and friends that - * represents the vector for this ring */ - u16 itr; /* Interrupt throttle rate written to EITR */ - struct ixgbe_ring_container rx, tx; - - struct napi_struct napi; -#ifndef HAVE_NETDEV_NAPI_LIST - struct net_device poll_dev; -#endif -#ifdef HAVE_IRQ_AFFINITY_HINT - cpumask_t affinity_mask; -#endif - int numa_node; - struct rcu_head rcu; /* to avoid race with update stats on free */ - char name[IFNAMSIZ + 9]; - bool netpoll_rx; - -#ifdef HAVE_NDO_BUSY_POLL - atomic_t state; -#endif /* HAVE_NDO_BUSY_POLL */ - - /* for dynamic allocation of rings associated with this q_vector */ - struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; -}; - -#ifdef HAVE_NDO_BUSY_POLL -enum ixgbe_qv_state_t { - IXGBE_QV_STATE_IDLE = 0, - IXGBE_QV_STATE_NAPI, - IXGBE_QV_STATE_POLL, - IXGBE_QV_STATE_DISABLE -}; - -static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) -{ - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* called from the device poll routine to get ownership of a q_vector */ -static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_NAPI); -#ifdef BP_EXTENDED_STATS - if (rc != IXGBE_QV_STATE_IDLE) - q_vector->tx.ring->stats.yields++; -#endif - - return rc == IXGBE_QV_STATE_IDLE; -} - -/* returns true is someone tried to get the qv while napi had it */ -static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) -{ - WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); - - /* flush any outstanding Rx frames */ - if (q_vector->napi.gro_list) - napi_gro_flush(&q_vector->napi, false); - - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* called from ixgbe_low_latency_poll() */ -static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_POLL); -#ifdef BP_EXTENDED_STATS - if (rc != IXGBE_QV_STATE_IDLE) - q_vector->tx.ring->stats.yields++; -#endif - return rc == IXGBE_QV_STATE_IDLE; -} - -/* returns true if someone tried to get the qv while it was locked */ -static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) -{ - WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); - - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) -{ - return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; -} - -/* false if QV is currently owned */ -static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_DISABLE); - - return rc == IXGBE_QV_STATE_IDLE; -} - -#endif /* HAVE_NDO_BUSY_POLL */ -#ifdef IXGBE_HWMON - -#define IXGBE_HWMON_TYPE_LOC 0 -#define IXGBE_HWMON_TYPE_TEMP 1 -#define IXGBE_HWMON_TYPE_CAUTION 2 -#define IXGBE_HWMON_TYPE_MAX 3 - -struct hwmon_attr { - struct device_attribute dev_attr; - struct ixgbe_hw *hw; - struct ixgbe_thermal_diode_data *sensor; - char name[12]; -}; - -struct hwmon_buff { - struct device *device; - struct hwmon_attr *hwmon_list; - unsigned int n_hwmon; -}; -#endif /* IXGBE_HWMON */ - -/* - * microsecond values for various ITR rates shifted by 2 to fit itr register - * with the first 3 bits reserved 0 - */ -#define IXGBE_MIN_RSC_ITR 24 -#define IXGBE_100K_ITR 40 -#define IXGBE_20K_ITR 200 -#define IXGBE_16K_ITR 248 -#define IXGBE_12K_ITR 336 - -/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ -static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, - const u32 stat_err_bits) -{ - return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); -} - -/* ixgbe_desc_unused - calculate if we have unused descriptors */ -static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) -{ - u16 ntc = ring->next_to_clean; - u16 ntu = ring->next_to_use; - - return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; -} - -#define IXGBE_RX_DESC(R, i) \ - (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBE_TX_DESC(R, i) \ - (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBE_TX_CTXTDESC(R, i) \ - (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) - -#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 -#if IS_ENABLED(CONFIG_FCOE) -/* use 3K as the baby jumbo frame size for FCoE */ -#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 -#endif /* CONFIG_FCOE */ - -#define TCP_TIMER_VECTOR 0 -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) - -#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64 -#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16 - -struct ixgbe_mac_addr { - u8 addr[ETH_ALEN]; - u16 pool; - u16 state; /* bitmask */ -}; - -#define IXGBE_MAC_STATE_DEFAULT 0x1 -#define IXGBE_MAC_STATE_MODIFIED 0x2 -#define IXGBE_MAC_STATE_IN_USE 0x4 - -#ifdef IXGBE_PROCFS -struct ixgbe_therm_proc_data { - struct ixgbe_hw *hw; - struct ixgbe_thermal_diode_data *sensor_data; -}; - -#endif /* IXGBE_PROCFS */ -/* - * Only for array allocations in our adapter struct. On 82598, there will be - * unused entries in the array, but that's not a big deal. Also, in 82599, - * we can actually assign 64 queue vectors based on our extended-extended - * interrupt registers. This is different than 82598, which is limited to 16. - */ -#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599 -#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599 - -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) - -/* default to trying for four seconds */ -#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) -#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ - -/* board specific private data structure */ -struct ixgbe_adapter { -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) -#ifdef HAVE_VLAN_RX_REGISTER - struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */ -#else - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; -#endif -#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - unsigned long state; - - /* Some features need tri-state capability, - * thus the additional *_CAPABLE flags. - */ - u32 flags; -#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) -#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) -#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) -#ifndef IXGBE_NO_LLI -#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4) -#endif - -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 6) -#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 7) -#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 8) -#else -#define IXGBE_FLAG_DCA_ENABLED (u32)0 -#define IXGBE_FLAG_DCA_CAPABLE (u32)0 -#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0 -#endif -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) -#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) -#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) -#if IS_ENABLED(CONFIG_FCOE) -#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 17) -#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 18) -#endif /* CONFIG_FCOE */ -#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) -#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) -#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) -#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) -#define IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) -#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) -#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) -#define IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) -#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) -#define IXGBE_FLAG_MDD_ENABLED (u32)(1 << 29) -#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 30) -#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(31) - -/* preset defaults */ -#define IXGBE_FLAGS_82598_INIT (IXGBE_FLAG_MSI_CAPABLE | \ - IXGBE_FLAG_MSIX_CAPABLE | \ - IXGBE_FLAG_MQ_CAPABLE) - -#define IXGBE_FLAGS_82599_INIT (IXGBE_FLAGS_82598_INIT | \ - IXGBE_FLAG_SRIOV_CAPABLE) - -#define IXGBE_FLAGS_X540_INIT IXGBE_FLAGS_82599_INIT - -#define IXGBE_FLAGS_X550_INIT (IXGBE_FLAGS_82599_INIT | \ - IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE) - - u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) -#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 3) -#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 4) -#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 5) -#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 6) -#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 8) -#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 9) -#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 10) -#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) -#define IXGBE_FLAG2_EEE_CAPABLE (u32)(1 << 14) -#define IXGBE_FLAG2_EEE_ENABLED (u32)(1 << 15) -#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED (u32)(1 << 16) -#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 17) -#define IXGBE_FLAG2_VLAN_PROMISC (u32)(1 << 18) -#define IXGBE_FLAG2_RX_LEGACY (u32)(1 << 19) - - /* Tx fast path data */ - int num_tx_queues; - u16 tx_itr_setting; - u16 tx_work_limit; - -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) - __be16 vxlan_port; -#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */ -#ifdef HAVE_UDP_ENC_RX_OFFLOAD - __be16 geneve_port; -#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ - - /* Rx fast path data */ - int num_rx_queues; - u16 rx_itr_setting; - u16 rx_work_limit; - - /* TX */ - struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; - - u64 restart_queue; - u64 lsc_int; - u32 tx_timeout_count; - - /* RX */ - struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; - int num_rx_pools; /* does not include pools assigned to VFs */ - int num_rx_queues_per_pool; - u64 hw_csum_rx_error; - u64 hw_rx_no_dma_resources; - u64 rsc_total_count; - u64 rsc_total_flush; - u64 non_eop_descs; - u32 alloc_rx_page_failed; - u32 alloc_rx_buff_failed; - - struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - -#ifdef HAVE_DCBNL_IEEE - struct ieee_pfc *ixgbe_ieee_pfc; - struct ieee_ets *ixgbe_ieee_ets; -#endif - struct ixgbe_dcb_config dcb_cfg; - struct ixgbe_dcb_config temp_dcb_cfg; - u8 dcb_set_bitmap; - u8 dcbx_cap; -#ifndef HAVE_MQPRIO - u8 dcb_tc; -#endif - enum ixgbe_fc_mode last_lfc_mode; - - int num_q_vectors; /* current number of q_vectors for device */ - int max_q_vectors; /* upper limit of q_vectors for device */ - struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; - struct msix_entry *msix_entries; - -#ifndef HAVE_NETDEV_STATS_IN_NETDEV - struct net_device_stats net_stats; -#endif - -#ifdef ETHTOOL_TEST - u32 test_icr; - struct ixgbe_ring test_tx_ring; - struct ixgbe_ring test_rx_ring; -#endif - - /* structs defined in ixgbe_hw.h */ - struct ixgbe_hw hw; - u16 msg_enable; - struct ixgbe_hw_stats stats; -#ifndef IXGBE_NO_LLI - u32 lli_port; - u32 lli_size; - u32 lli_etype; - u32 lli_vlan_pri; -#endif /* IXGBE_NO_LLI */ - - u32 *config_space; - u64 tx_busy; - unsigned int tx_ring_count; - unsigned int rx_ring_count; - - u32 link_speed; - bool link_up; - - bool cloud_mode; - - unsigned long sfp_poll_time; - unsigned long link_check_timeout; - - struct timer_list service_timer; - struct work_struct service_task; - - struct hlist_head fdir_filter_list; - unsigned long fdir_overflow; /* number of times ATR was backed off */ - union ixgbe_atr_input fdir_mask; - int fdir_filter_count; - u32 fdir_pballoc; - u32 atr_sample_rate; - spinlock_t fdir_perfect_lock; - -#if IS_ENABLED(CONFIG_FCOE) - struct ixgbe_fcoe fcoe; -#endif /* CONFIG_FCOE */ - u8 __iomem *io_addr; /* Mainly for iounmap use */ - u32 wol; - - u16 bd_number; - -#ifdef HAVE_BRIDGE_ATTRIBS - u16 bridge_mode; -#endif - - char eeprom_id[32]; - u16 eeprom_cap; - bool netdev_registered; - u32 interrupt_event; -#ifdef HAVE_ETHTOOL_SET_PHYS_ID - u32 led_reg; -#endif - -#ifdef HAVE_PTP_1588_CLOCK - struct ptp_clock *ptp_clock; - struct ptp_clock_info ptp_caps; - struct work_struct ptp_tx_work; - struct sk_buff *ptp_tx_skb; - struct hwtstamp_config tstamp_config; - unsigned long ptp_tx_start; - unsigned long last_overflow_check; - unsigned long last_rx_ptp_check; - spinlock_t tmreg_lock; - struct cyclecounter hw_cc; - struct timecounter hw_tc; - u32 base_incval; - u32 tx_hwtstamp_timeouts; - u32 tx_hwtstamp_skipped; - u32 rx_hwtstamp_cleared; - void (*ptp_setup_sdp) (struct ixgbe_adapter *); -#endif /* HAVE_PTP_1588_CLOCK */ - - DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); - unsigned int num_vfs; - unsigned int max_vfs; - struct vf_data_storage *vfinfo; - int vf_rate_link_speed; - struct vf_macvlans vf_mvs; - struct vf_macvlans *mv_list; -#ifdef CONFIG_PCI_IOV - u32 timer_event_accumulator; - u32 vferr_refcount; -#endif - struct ixgbe_mac_addr *mac_table; -#ifdef IXGBE_SYSFS -#ifdef IXGBE_HWMON - struct hwmon_buff ixgbe_hwmon_buff; -#endif /* IXGBE_HWMON */ -#else /* IXGBE_SYSFS */ -#ifdef IXGBE_PROCFS - struct proc_dir_entry *eth_dir; - struct proc_dir_entry *info_dir; - u64 old_lsc; - struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS]; - struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS]; -#endif /* IXGBE_PROCFS */ -#endif /* IXGBE_SYSFS */ - -#ifdef HAVE_IXGBE_DEBUG_FS - struct dentry *ixgbe_dbg_adapter; -#endif /*HAVE_IXGBE_DEBUG_FS*/ - u8 default_up; - -/* maximum number of RETA entries among all devices supported by ixgbe - * driver: currently it's x550 device in non-SRIOV mode - */ -#define IXGBE_MAX_RETA_ENTRIES 512 - u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; - -#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ - u32 *rss_key; - -#ifdef HAVE_TX_MQ -#ifndef HAVE_NETDEV_SELECT_QUEUE - unsigned int indices; -#endif -#endif - bool need_crosstalk_fix; -}; - -static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) -{ - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - return IXGBE_MAX_RSS_INDICES; - break; - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - return IXGBE_MAX_RSS_INDICES_X550; - break; - default: - return 0; - break; - } -} - -struct ixgbe_fdir_filter { - struct hlist_node fdir_node; - union ixgbe_atr_input filter; - u16 sw_idx; - u64 action; -}; - -enum ixgbe_state_t { - __IXGBE_TESTING, - __IXGBE_RESETTING, - __IXGBE_DOWN, - __IXGBE_DISABLED, - __IXGBE_REMOVE, - __IXGBE_SERVICE_SCHED, - __IXGBE_SERVICE_INITED, - __IXGBE_IN_SFP_INIT, -#ifdef HAVE_PTP_1588_CLOCK - __IXGBE_PTP_RUNNING, - __IXGBE_PTP_TX_IN_PROGRESS, -#endif - __IXGBE_RESET_REQUESTED, -}; - -struct ixgbe_cb { -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - union { /* Union defining head/tail partner */ - struct sk_buff *head; - struct sk_buff *tail; - }; -#endif - dma_addr_t dma; -#ifdef HAVE_VLAN_RX_REGISTER - u16 vid; /* VLAN tag */ -#endif - u16 append_cnt; /* number of skb's appended */ -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - bool page_released; -#endif -}; -#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) - -/* ESX ixgbe CIM IOCTL definition */ - -#ifdef IXGBE_SYSFS -void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); -int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); -#endif /* IXGBE_SYSFS */ -#ifdef IXGBE_PROCFS -void ixgbe_procfs_exit(struct ixgbe_adapter *adapter); -int ixgbe_procfs_init(struct ixgbe_adapter *adapter); -int ixgbe_procfs_topdir_init(void); -void ixgbe_procfs_topdir_exit(void); -#endif /* IXGBE_PROCFS */ - -extern struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; -int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max); - -u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index); - -/* needed by ixgbe_main.c */ -int ixgbe_validate_mac_addr(u8 *mc_addr); -void ixgbe_check_options(struct ixgbe_adapter *adapter); -void ixgbe_assign_netdev_ops(struct net_device *netdev); - -/* needed by ixgbe_ethtool.c */ -#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME -extern char ixgbe_driver_name[]; -#else -extern const char ixgbe_driver_name[]; -#endif -extern const char ixgbe_driver_version[]; - -void ixgbe_up(struct ixgbe_adapter *adapter); -void ixgbe_down(struct ixgbe_adapter *adapter); -void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); -void ixgbe_reset(struct ixgbe_adapter *adapter); -void ixgbe_set_ethtool_ops(struct net_device *netdev); -int ixgbe_setup_rx_resources(struct ixgbe_ring *); -int ixgbe_setup_tx_resources(struct ixgbe_ring *); -void ixgbe_free_rx_resources(struct ixgbe_ring *); -void ixgbe_free_tx_resources(struct ixgbe_ring *); -void ixgbe_configure_rx_ring(struct ixgbe_adapter *, - struct ixgbe_ring *); -void ixgbe_configure_tx_ring(struct ixgbe_adapter *, - struct ixgbe_ring *); -void ixgbe_update_stats(struct ixgbe_adapter *adapter); -int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter); -void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter); -void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); -bool ixgbe_is_ixgbe(struct pci_dev *pcidev); -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, - struct ixgbe_adapter *, - struct ixgbe_ring *); -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, - struct ixgbe_tx_buffer *); -void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); -void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *); -void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *); -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) -void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *, u32); -#endif -void ixgbe_set_rx_mode(struct net_device *netdev); -int ixgbe_write_mc_addr_list(struct net_device *netdev); -int ixgbe_setup_tc(struct net_device *dev, u8 tc); -void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); -void ixgbe_do_reset(struct net_device *netdev); -void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector); -int ixgbe_poll(struct napi_struct *napi, int budget); -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *); -void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter); -void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter); -#ifdef ETHTOOL_OPS_COMPAT -int ethtool_ioctl(struct ifreq *ifr); -#endif - -#if IS_ENABLED(CONFIG_FCOE) -void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); -int ixgbe_fso(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - u8 *hdr_len); -int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb); -int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); -#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET -int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); -#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ -int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); -int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); -void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); -#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE -int ixgbe_fcoe_enable(struct net_device *netdev); -int ixgbe_fcoe_disable(struct net_device *netdev); -#else -int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter); -void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter); -#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ -#if IS_ENABLED(CONFIG_DCB) -#ifdef HAVE_DCBNL_OPS_GETAPP -u8 ixgbe_fcoe_getapp(struct net_device *netdev); -#endif /* HAVE_DCBNL_OPS_GETAPP */ -u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); -#endif /* CONFIG_DCB */ -u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); -#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN -int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); -#endif -#endif /* CONFIG_FCOE */ - -#ifdef HAVE_IXGBE_DEBUG_FS -void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); -void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); -void ixgbe_dbg_init(void); -void ixgbe_dbg_exit(void); -#endif /* HAVE_IXGBE_DEBUG_FS */ - -#if IS_ENABLED(CONFIG_BQL) || defined(HAVE_SKB_XMIT_MORE) -static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) -{ - return netdev_get_tx_queue(ring->netdev, ring->queue_index); -} -#endif - -#if IS_ENABLED(CONFIG_DCB) -#ifdef HAVE_DCBNL_IEEE -s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame); -#endif /* HAVE_DCBNL_IEEE */ -#endif /* CONFIG_DCB */ - -bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id); -void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring); -int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn); -void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); -int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, - const u8 *addr, u16 queue); -int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, - const u8 *addr, u16 queue); -int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool); -void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); -#ifndef HAVE_VLAN_RX_REGISTER -void ixgbe_vlan_mode(struct net_device *, u32); -#else -#ifdef CONFIG_PCI_IOV -int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan); -#endif -#endif - -#ifdef HAVE_PTP_1588_CLOCK -void ixgbe_ptp_init(struct ixgbe_adapter *adapter); -void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); -void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); -void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); -void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); -void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter); -void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb); -void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb); -static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) { - ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); - return; - } - - if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) - return; - - ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); - - /* Update the last_rx_timestamp timer in order to enable watchdog check - * for error case of latched timestamp on a dropped packet. - */ - rx_ring->last_rx_timestamp = jiffies; -} - -int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); -int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); -void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); -void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); -void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); -#endif /* HAVE_PTP_1588_CLOCK */ -#ifdef CONFIG_PCI_IOV -void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); -#endif -u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); -void ixgbe_store_key(struct ixgbe_adapter *adapter); -void ixgbe_store_reta(struct ixgbe_adapter *adapter); - -void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); -#endif /* _IXGBE_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c deleted file mode 100644 index 8b7fc593427e..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.c +++ /dev/null @@ -1,1399 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_type.h" -#include "ixgbe_82598.h" -#include "ixgbe_api.h" -#include "ixgbe_common.h" -#include "ixgbe_phy.h" - -#define IXGBE_82598_MAX_TX_QUEUES 32 -#define IXGBE_82598_MAX_RX_QUEUES 64 -#define IXGBE_82598_RAR_ENTRIES 16 -#define IXGBE_82598_MC_TBL_SIZE 128 -#define IXGBE_82598_VFT_TBL_SIZE 128 -#define IXGBE_82598_RX_PB_SIZE 512 - -STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg); -STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); -STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete); -STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, bool *link_up, - bool link_up_wait_to_complete); -STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); -STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); -STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, - u32 headroom, int strategy); -STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data); -/** - * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout - * @hw: pointer to the HW structure - * - * The defaults for 82598 should be in the range of 50us to 50ms, - * however the hardware default for these parts is 500us to 1ms which is less - * than the 10ms recommended by the pci-e spec. To address this we need to - * increase the value to either 10ms to 250ms for capability version 1 config, - * or 16ms to 55ms for version 2. - **/ -void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) -{ - u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); - u16 pcie_devctl2; - - /* only take action if timeout value is defaulted to 0 */ - if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) - goto out; - - /* - * if capababilities version is type 1 we can write the - * timeout of 10ms to 250ms through the GCR register - */ - if (!(gcr & IXGBE_GCR_CAP_VER2)) { - gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; - goto out; - } - - /* - * for version 2 capabilities we need to write the config space - * directly in order to set the completion timeout value for - * 16ms to 55ms - */ - pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); - pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; - IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); -out: - /* disable completion timeout resend */ - gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; - IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); -} - -/** - * ixgbe_init_ops_82598 - Inits func ptrs and MAC type - * @hw: pointer to hardware structure - * - * Initialize the function pointers and assign the MAC type for 82598. - * Does not touch the hardware. - **/ -s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; - - DEBUGFUNC("ixgbe_init_ops_82598"); - - ret_val = ixgbe_init_phy_ops_generic(hw); - ret_val = ixgbe_init_ops_generic(hw); - - /* PHY */ - phy->ops.init = ixgbe_init_phy_ops_82598; - - /* MAC */ - mac->ops.start_hw = ixgbe_start_hw_82598; - mac->ops.reset_hw = ixgbe_reset_hw_82598; - mac->ops.get_media_type = ixgbe_get_media_type_82598; - mac->ops.get_supported_physical_layer = - ixgbe_get_supported_physical_layer_82598; - mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; - mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; - mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; - mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; - - /* RAR, Multicast, VLAN */ - mac->ops.set_vmdq = ixgbe_set_vmdq_82598; - mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; - mac->ops.set_vfta = ixgbe_set_vfta_82598; - mac->ops.set_vlvf = NULL; - mac->ops.clear_vfta = ixgbe_clear_vfta_82598; - - /* Flow Control */ - mac->ops.fc_enable = ixgbe_fc_enable_82598; - - mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; - mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - /* SFP+ Module */ - phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; - phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; - - /* Link */ - mac->ops.check_link = ixgbe_check_mac_link_82598; - mac->ops.setup_link = ixgbe_setup_mac_link_82598; - mac->ops.flap_tx_laser = NULL; - mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; - mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; - - /* Manageability interface */ - mac->ops.set_fw_drv_ver = NULL; - - mac->ops.get_rtrup2tc = NULL; - - return ret_val; -} - -/** - * ixgbe_init_phy_ops_82598 - PHY/SFP specific init - * @hw: pointer to hardware structure - * - * Initialize any function pointers that were not able to be - * set during init_shared_code because the PHY/SFP type was - * not known. Perform the SFP init if necessary. - * - **/ -s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val = IXGBE_SUCCESS; - u16 list_offset, data_offset; - - DEBUGFUNC("ixgbe_init_phy_ops_82598"); - - /* Identify the PHY */ - phy->ops.identify(hw); - - /* Overwrite the link function pointers if copper PHY */ - if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = ixgbe_setup_copper_link_82598; - mac->ops.get_link_capabilities = - ixgbe_get_copper_link_capabilities_generic; - } - - switch (hw->phy.type) { - case ixgbe_phy_tn: - phy->ops.setup_link = ixgbe_setup_phy_link_tnx; - phy->ops.check_link = ixgbe_check_phy_link_tnx; - phy->ops.get_firmware_version = - ixgbe_get_phy_firmware_version_tnx; - break; - case ixgbe_phy_nl: - phy->ops.reset = ixgbe_reset_phy_nl; - - /* Call SFP+ identify routine to get the SFP+ module type */ - ret_val = phy->ops.identify_sfp(hw); - if (ret_val != IXGBE_SUCCESS) - goto out; - else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { - ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; - } - - /* Check to see if SFP+ module is supported */ - ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, - &list_offset, - &data_offset); - if (ret_val != IXGBE_SUCCESS) { - ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; - } - break; - default: - break; - } - -out: - return ret_val; -} - -/** - * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware using the generic start_hw function. - * Disables relaxed ordering Then set pcie completion timeout - * - **/ -s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) -{ - u32 regval; - u32 i; - s32 ret_val = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_start_hw_82598"); - - ret_val = ixgbe_start_hw_generic(hw); - if (ret_val) - return ret_val; - - /* Disable relaxed ordering */ - for (i = 0; ((i < hw->mac.max_tx_queues) && - (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); - regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); - } - - for (i = 0; ((i < hw->mac.max_rx_queues) && - (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | - IXGBE_DCA_RXCTRL_HEAD_WRO_EN); - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); - } - - /* set the completion timeout for interface */ - ixgbe_set_pcie_completion_timeout(hw); - - return ret_val; -} - -/** - * ixgbe_get_link_capabilities_82598 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value - * - * Determines the link capabilities by reading the AUTOC register. - **/ -STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - s32 status = IXGBE_SUCCESS; - u32 autoc = 0; - - DEBUGFUNC("ixgbe_get_link_capabilities_82598"); - - /* - * Determine link capabilities based on the stored value of AUTOC, - * which represents EEPROM defaults. If AUTOC value has not been - * stored, use the current register value. - */ - if (hw->mac.orig_link_settings_stored) - autoc = hw->mac.orig_autoc; - else - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - - switch (autoc & IXGBE_AUTOC_LMS_MASK) { - case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = false; - break; - - case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - *autoneg = false; - break; - - case IXGBE_AUTOC_LMS_1G_AN: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - break; - - case IXGBE_AUTOC_LMS_KX4_AN: - case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX_SUPP) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - break; - - default: - status = IXGBE_ERR_LINK_SETUP; - break; - } - - return status; -} - -/** - * ixgbe_get_media_type_82598 - Determines media type - * @hw: pointer to hardware structure - * - * Returns the media type (fiber, copper, backplane) - **/ -STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) -{ - enum ixgbe_media_type media_type; - - DEBUGFUNC("ixgbe_get_media_type_82598"); - - /* Detect if there is a copper PHY attached. */ - switch (hw->phy.type) { - case ixgbe_phy_cu_unknown: - case ixgbe_phy_tn: - media_type = ixgbe_media_type_copper; - goto out; - default: - break; - } - - /* Media type for I82598 is based on device ID */ - switch (hw->device_id) { - case IXGBE_DEV_ID_82598: - case IXGBE_DEV_ID_82598_BX: - /* Default device ID is mezzanine card KX/KX4 */ - media_type = ixgbe_media_type_backplane; - break; - case IXGBE_DEV_ID_82598AF_DUAL_PORT: - case IXGBE_DEV_ID_82598AF_SINGLE_PORT: - case IXGBE_DEV_ID_82598_DA_DUAL_PORT: - case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: - case IXGBE_DEV_ID_82598EB_XF_LR: - case IXGBE_DEV_ID_82598EB_SFP_LOM: - media_type = ixgbe_media_type_fiber; - break; - case IXGBE_DEV_ID_82598EB_CX4: - case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: - media_type = ixgbe_media_type_cx4; - break; - case IXGBE_DEV_ID_82598AT: - case IXGBE_DEV_ID_82598AT2: - media_type = ixgbe_media_type_copper; - break; - default: - media_type = ixgbe_media_type_unknown; - break; - } -out: - return media_type; -} - -/** - * ixgbe_fc_enable_82598 - Enable flow control - * @hw: pointer to hardware structure - * - * Enable flow control according to the current settings. - **/ -s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_SUCCESS; - u32 fctrl_reg; - u32 rmcs_reg; - u32 reg; - u32 fcrtl, fcrth; - u32 link_speed = 0; - int i; - bool link_up; - - DEBUGFUNC("ixgbe_fc_enable_82598"); - - /* Validate the water mark configuration */ - if (!hw->fc.pause_time) { - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* Low water mark of zero causes XOFF floods */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && - hw->fc.high_water[i]) { - if (!hw->fc.low_water[i] || - hw->fc.low_water[i] >= hw->fc.high_water[i]) { - DEBUGOUT("Invalid water mark configuration\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - } - } - - /* - * On 82598 having Rx FC on causes resets while doing 1G - * so if it's on turn it off once we know link_speed. For - * more details see 82598 Specification update. - */ - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { - switch (hw->fc.requested_mode) { - case ixgbe_fc_full: - hw->fc.requested_mode = ixgbe_fc_tx_pause; - break; - case ixgbe_fc_rx_pause: - hw->fc.requested_mode = ixgbe_fc_none; - break; - default: - /* no change */ - break; - } - } - - /* Negotiate the fc mode to use */ - ixgbe_fc_autoneg(hw); - - /* Disable any previous flow control settings */ - fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); - - rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); - - /* - * The possible values of fc.current_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - * other: Invalid. - */ - switch (hw->fc.current_mode) { - case ixgbe_fc_none: - /* - * Flow control is disabled by software override or autoneg. - * The code below will actually disable it in the HW. - */ - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - fctrl_reg |= IXGBE_FCTRL_RFCE; - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; - break; - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - fctrl_reg |= IXGBE_FCTRL_RFCE; - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; - break; - default: - DEBUGOUT("Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - break; - } - - /* Set 802.3x based flow control settings. */ - fctrl_reg |= IXGBE_FCTRL_DPF; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); - IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); - - /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && - hw->fc.high_water[i]) { - fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; - fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); - } else { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); - } - - } - - /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - -out: - return ret_val; -} - -/** - * ixgbe_start_mac_link_82598 - Configures MAC link settings - * @hw: pointer to hardware structure - * - * Configures link settings based on values in the ixgbe_hw struct. - * Restarts the link. Performs autonegotiation if needed. - **/ -STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete) -{ - u32 autoc_reg; - u32 links_reg; - u32 i; - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_start_mac_link_82598"); - - /* Restart link */ - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - - /* Only poll for autoneg to complete if specified to do so */ - if (autoneg_wait_to_complete) { - if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_AN || - (autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { - links_reg = 0; /* Just in case Autoneg time = 0 */ - for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - if (links_reg & IXGBE_LINKS_KX_AN_COMP) - break; - msec_delay(100); - } - if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - DEBUGOUT("Autonegotiation did not complete.\n"); - } - } - } - - /* Add delay to filter out noises during initial link setup */ - msec_delay(50); - - return status; -} - -/** - * ixgbe_validate_link_ready - Function looks for phy link - * @hw: pointer to hardware structure - * - * Function indicates success when phy link is available. If phy is not ready - * within 5 seconds of MAC indicating link, the function returns error. - **/ -STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) -{ - u32 timeout; - u16 an_reg; - - if (hw->device_id != IXGBE_DEV_ID_82598AT2) - return IXGBE_SUCCESS; - - for (timeout = 0; - timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); - - if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && - (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) - break; - - msec_delay(100); - } - - if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { - DEBUGOUT("Link was indicated but link is down\n"); - return IXGBE_ERR_LINK_SETUP; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_check_mac_link_82598 - Get link/speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true is link is up, false otherwise - * @link_up_wait_to_complete: bool used to wait for link up or not - * - * Reads the links register to determine if link is up and the current speed - **/ -STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, bool *link_up, - bool link_up_wait_to_complete) -{ - u32 links_reg; - u32 i; - u16 link_reg, adapt_comp_reg; - - DEBUGFUNC("ixgbe_check_mac_link_82598"); - - /* - * SERDES PHY requires us to read link status from undocumented - * register 0xC79F. Bit 0 set indicates link is up/ready; clear - * indicates link down. OxC00C is read to check that the XAUI lanes - * are active. Bit 0 clear indicates active; set indicates inactive. - */ - if (hw->phy.type == ixgbe_phy_nl) { - hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); - hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); - hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, - &adapt_comp_reg); - if (link_up_wait_to_complete) { - for (i = 0; i < hw->mac.max_link_up_time; i++) { - if ((link_reg & 1) && - ((adapt_comp_reg & 1) == 0)) { - *link_up = true; - break; - } else { - *link_up = false; - } - msec_delay(100); - hw->phy.ops.read_reg(hw, 0xC79F, - IXGBE_TWINAX_DEV, - &link_reg); - hw->phy.ops.read_reg(hw, 0xC00C, - IXGBE_TWINAX_DEV, - &adapt_comp_reg); - } - } else { - if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) - *link_up = true; - else - *link_up = false; - } - - if (*link_up == false) - goto out; - } - - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - if (link_up_wait_to_complete) { - for (i = 0; i < hw->mac.max_link_up_time; i++) { - if (links_reg & IXGBE_LINKS_UP) { - *link_up = true; - break; - } else { - *link_up = false; - } - msec_delay(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - } - } else { - if (links_reg & IXGBE_LINKS_UP) - *link_up = true; - else - *link_up = false; - } - - if (links_reg & IXGBE_LINKS_SPEED) - *speed = IXGBE_LINK_SPEED_10GB_FULL; - else - *speed = IXGBE_LINK_SPEED_1GB_FULL; - - if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && - (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) - *link_up = false; - -out: - return IXGBE_SUCCESS; -} - -/** - * ixgbe_setup_mac_link_82598 - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Set the link speed in the AUTOC register and restarts link. - **/ -STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - bool autoneg = false; - s32 status = IXGBE_SUCCESS; - ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 autoc = curr_autoc; - u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; - - DEBUGFUNC("ixgbe_setup_mac_link_82598"); - - /* Check to see if speed passed in is supported. */ - ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); - speed &= link_capabilities; - - if (speed == IXGBE_LINK_SPEED_UNKNOWN) - status = IXGBE_ERR_LINK_SETUP; - - /* Set KX4/KX support according to speed requested */ - else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || - link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { - autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - autoc |= IXGBE_AUTOC_KX4_SUPP; - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - autoc |= IXGBE_AUTOC_KX_SUPP; - if (autoc != curr_autoc) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); - } - - if (status == IXGBE_SUCCESS) { - /* - * Setup and restart the link based on the new values in - * ixgbe_hw This will write the AUTOC register based on the new - * stored values - */ - status = ixgbe_start_mac_link_82598(hw, - autoneg_wait_to_complete); - } - - return status; -} - - -/** - * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true if waiting is needed to complete - * - * Sets the link speed in the AUTOC register in the MAC and restarts link. - **/ -STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - s32 status; - - DEBUGFUNC("ixgbe_setup_copper_link_82598"); - - /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, - autoneg_wait_to_complete); - /* Set up MAC */ - ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); - - return status; -} - -/** - * ixgbe_reset_hw_82598 - Performs hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks and - * clears all interrupts, performing a PHY reset, and performing a link (MAC) - * reset. - **/ -STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - s32 phy_status = IXGBE_SUCCESS; - u32 ctrl; - u32 gheccr; - u32 i; - u32 autoc; - u8 analog_val; - - DEBUGFUNC("ixgbe_reset_hw_82598"); - - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); - if (status != IXGBE_SUCCESS) - goto reset_hw_out; - - /* - * Power up the Atlas Tx lanes if they are currently powered down. - * Atlas Tx lanes are powered down for MAC loopback tests, but - * they are not automatically restored on reset. - */ - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); - if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { - /* Enable Tx Atlas so packets can be transmitted again */ - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, - &analog_val); - analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, - analog_val); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, - &analog_val); - analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, - analog_val); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, - &analog_val); - analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, - analog_val); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, - &analog_val); - analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, - analog_val); - } - - /* Reset PHY */ - if (hw->phy.reset_disable == false) { - /* PHY ops must be identified and initialized prior to reset */ - - /* Init PHY and function pointers, perform SFP setup */ - phy_status = hw->phy.ops.init(hw); - if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto reset_hw_out; - if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) - goto mac_reset_top; - - hw->phy.ops.reset(hw); - } - -mac_reset_top: - /* - * Issue global reset to the MAC. This needs to be a SW reset. - * If link reset is used, it might reset the MAC when mng is using it - */ - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); - - /* Poll for reset bit to self-clear indicating reset is complete */ - for (i = 0; i < 10; i++) { - usec_delay(1); - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST)) - break; - } - if (ctrl & IXGBE_CTRL_RST) { - status = IXGBE_ERR_RESET_FAILED; - DEBUGOUT("Reset polling failed to complete.\n"); - } - - msec_delay(50); - - /* - * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to allow time - * for any pending HW events to complete. - */ - if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { - hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - goto mac_reset_top; - } - - gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); - gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); - IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); - - /* - * Store the original AUTOC value if it has not been - * stored off yet. Otherwise restore the stored original - * AUTOC value since the reset operation sets back to deaults. - */ - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - if (hw->mac.orig_link_settings_stored == false) { - hw->mac.orig_autoc = autoc; - hw->mac.orig_link_settings_stored = true; - } else if (autoc != hw->mac.orig_autoc) { - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); - } - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* - * Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table - */ - hw->mac.ops.init_rx_addrs(hw); - -reset_hw_out: - if (phy_status != IXGBE_SUCCESS) - status = phy_status; - - return status; -} - -/** - * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq set index - **/ -s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - u32 rar_high; - u32 rar_entries = hw->mac.num_rar_entries; - - DEBUGFUNC("ixgbe_set_vmdq_82598"); - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - DEBUGOUT1("RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); - rar_high &= ~IXGBE_RAH_VIND_MASK; - rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); - IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); - return IXGBE_SUCCESS; -} - -/** - * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq clear index (not used in 82598, but elsewhere) - **/ -STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - u32 rar_high; - u32 rar_entries = hw->mac.num_rar_entries; - - UNREFERENCED_1PARAMETER(vmdq); - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - DEBUGOUT1("RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); - if (rar_high & IXGBE_RAH_VIND_MASK) { - rar_high &= ~IXGBE_RAH_VIND_MASK; - IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_set_vfta_82598 - Set VLAN filter table - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFTA - * @vlan_on: boolean flag to turn on/off VLAN in VFTA - * @vlvf_bypass: boolean flag - unused - * - * Turn on/off specified VLAN in the VLAN filter table. - **/ -s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool vlvf_bypass) -{ - u32 regindex; - u32 bitindex; - u32 bits; - u32 vftabyte; - - UNREFERENCED_1PARAMETER(vlvf_bypass); - - DEBUGFUNC("ixgbe_set_vfta_82598"); - - if (vlan > 4095) - return IXGBE_ERR_PARAM; - - /* Determine 32-bit word position in array */ - regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ - - /* Determine the location of the (VMD) queue index */ - vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ - bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ - - /* Set the nibble for VMD queue index */ - bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); - bits &= (~(0x0F << bitindex)); - bits |= (vind << bitindex); - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); - - /* Determine the location of the bit for this VLAN id */ - bitindex = vlan & 0x1F; /* lower five bits */ - - bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); - if (vlan_on) - /* Turn on this VLAN id */ - bits |= (1 << bitindex); - else - /* Turn off this VLAN id */ - bits &= ~(1 << bitindex); - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_clear_vfta_82598 - Clear VLAN filter table - * @hw: pointer to hardware structure - * - * Clears the VLAN filer table, and the VMDq index associated with the filter - **/ -STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) -{ - u32 offset; - u32 vlanbyte; - - DEBUGFUNC("ixgbe_clear_vfta_82598"); - - for (offset = 0; offset < hw->mac.vft_size; offset++) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); - - for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) - for (offset = 0; offset < hw->mac.vft_size; offset++) - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), - 0); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value - * - * Performs read operation to Atlas analog register specified. - **/ -s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) -{ - u32 atlas_ctl; - - DEBUGFUNC("ixgbe_read_analog_reg8_82598"); - - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, - IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); - IXGBE_WRITE_FLUSH(hw); - usec_delay(10); - atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); - *val = (u8)atlas_ctl; - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register - * @hw: pointer to hardware structure - * @reg: atlas register to write - * @val: value to write - * - * Performs write operation to Atlas analog register specified. - **/ -s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) -{ - u32 atlas_ctl; - - DEBUGFUNC("ixgbe_write_analog_reg8_82598"); - - atlas_ctl = (reg << 8) | val; - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); - IXGBE_WRITE_FLUSH(hw); - usec_delay(10); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. - * @hw: pointer to hardware structure - * @dev_addr: address to read from - * @byte_offset: byte offset to read from dev_addr - * @eeprom_data: value read - * - * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. - **/ -STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, - u8 byte_offset, u8 *eeprom_data) -{ - s32 status = IXGBE_SUCCESS; - u16 sfp_addr = 0; - u16 sfp_data = 0; - u16 sfp_stat = 0; - u16 gssr; - u32 i; - - DEBUGFUNC("ixgbe_read_i2c_phy_82598"); - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; - - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) - return IXGBE_ERR_SWFW_SYNC; - - if (hw->phy.type == ixgbe_phy_nl) { - /* - * NetLogic phy SDA/SCL registers are at addresses 0xC30A to - * 0xC30D. These registers are used to talk to the SFP+ - * module's EEPROM through the SDA/SCL (I2C) interface. - */ - sfp_addr = (dev_addr << 8) + byte_offset; - sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); - hw->phy.ops.write_reg_mdi(hw, - IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - sfp_addr); - - /* Poll status */ - for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg_mdi(hw, - IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &sfp_stat); - sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; - if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) - break; - msec_delay(10); - } - - if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { - DEBUGOUT("EEPROM read did not pass.\n"); - status = IXGBE_ERR_SFP_NOT_PRESENT; - goto out; - } - - /* Read data */ - hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); - - *eeprom_data = (u8)(sfp_data >> 8); - } else { - status = IXGBE_ERR_PHY; - } - -out: - hw->mac.ops.release_swfw_sync(hw, gssr); - return status; -} - -/** - * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read - * @eeprom_data: value read - * - * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data) -{ - return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, - byte_offset, eeprom_data); -} - -/** - * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. - * @hw: pointer to hardware structure - * @byte_offset: byte offset at address 0xA2 - * @eeprom_data: value read - * - * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C - **/ -STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data) -{ - return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, - byte_offset, sff8472_data); -} - -/** - * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current configuration. - **/ -u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) -{ - u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; - u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; - u16 ext_ability = 0; - - DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); - - hw->phy.ops.identify(hw); - - /* Copper PHY must be checked before AUTOC LMS to determine correct - * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ - switch (hw->phy.type) { - case ixgbe_phy_tn: - case ixgbe_phy_cu_unknown: - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); - if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; - if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; - goto out; - default: - break; - } - - switch (autoc & IXGBE_AUTOC_LMS_MASK) { - case IXGBE_AUTOC_LMS_1G_AN: - case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: - if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; - else - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; - break; - case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: - if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; - else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; - else /* XAUI */ - physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - break; - case IXGBE_AUTOC_LMS_KX4_AN: - case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: - if (autoc & IXGBE_AUTOC_KX_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; - break; - default: - break; - } - - if (hw->phy.type == ixgbe_phy_nl) { - hw->phy.ops.identify_sfp(hw); - - switch (hw->phy.sfp_type) { - case ixgbe_sfp_type_da_cu: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; - break; - case ixgbe_sfp_type_sr: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - break; - case ixgbe_sfp_type_lr: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - break; - default: - physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - break; - } - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_82598_DA_DUAL_PORT: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; - break; - case IXGBE_DEV_ID_82598AF_DUAL_PORT: - case IXGBE_DEV_ID_82598AF_SINGLE_PORT: - case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - break; - case IXGBE_DEV_ID_82598EB_XF_LR: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - break; - default: - break; - } - -out: - return physical_layer; -} - -/** - * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple - * port devices. - * @hw: pointer to the HW structure - * - * Calls common function and corrects issue with some single port devices - * that enable LAN1 but not LAN0. - **/ -void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_bus_info *bus = &hw->bus; - u16 pci_gen = 0; - u16 pci_ctrl2 = 0; - - DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); - - ixgbe_set_lan_id_multi_port_pcie(hw); - - /* check if LAN0 is disabled */ - hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); - if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { - - hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); - - /* if LAN0 is completely disabled force function to 0 */ - if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && - !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && - !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { - - bus->func = 0; - } - } -} - -/** - * ixgbe_set_rxpba_82598 - Initialize RX packet buffer - * @hw: pointer to hardware structure - * @num_pb: number of packet buffers to allocate - * @headroom: reserve n KB of headroom - * @strategy: packet buffer allocation strategy - **/ -STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, - u32 headroom, int strategy) -{ - u32 rxpktsize = IXGBE_RXPBSIZE_64KB; - u8 i = 0; - UNREFERENCED_1PARAMETER(headroom); - - if (!num_pb) - return; - - /* Setup Rx packet buffer sizes */ - switch (strategy) { - case PBA_STRATEGY_WEIGHTED: - /* Setup the first four at 80KB */ - rxpktsize = IXGBE_RXPBSIZE_80KB; - for (; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - /* Setup the last four at 48KB...don't re-init i */ - rxpktsize = IXGBE_RXPBSIZE_48KB; - /* Fall Through */ - case PBA_STRATEGY_EQUAL: - default: - /* Divide the remaining Rx packet buffer evenly among the TCs */ - for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - break; - } - - /* Setup Tx packet buffer sizes */ - for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); -} - -/** - * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit - * @hw: pointer to hardware structure - * @regval: register value to write to RXCTRL - * - * Enables the Rx DMA unit - **/ -s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) -{ - DEBUGFUNC("ixgbe_enable_rx_dma_82598"); - - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); - - return IXGBE_SUCCESS; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h deleted file mode 100644 index 1e0c15a2776f..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82598.h +++ /dev/null @@ -1,43 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_82598_H_ -#define _IXGBE_82598_H_ - -u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); -s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); -s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, - bool vlvf_bypass); -s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); -s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); -s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data); -u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); -s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); -void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); -void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval); -#endif /* _IXGBE_82598_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c deleted file mode 100644 index 0164233b7b10..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.c +++ /dev/null @@ -1,2614 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_type.h" -#include "ixgbe_82599.h" -#include "ixgbe_api.h" -#include "ixgbe_common.h" -#include "ixgbe_phy.h" - -#define IXGBE_82599_MAX_TX_QUEUES 128 -#define IXGBE_82599_MAX_RX_QUEUES 128 -#define IXGBE_82599_RAR_ENTRIES 128 -#define IXGBE_82599_MC_TBL_SIZE 128 -#define IXGBE_82599_VFT_TBL_SIZE 128 -#define IXGBE_82599_RX_PB_SIZE 512 - -STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); -STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, - u16 offset, u16 *data); -STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); - -void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); - - /* - * enable the laser control functions for SFP+ fiber - * and MNG not enabled - */ - if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && - !ixgbe_mng_enabled(hw)) { - mac->ops.disable_tx_laser = - ixgbe_disable_tx_laser_multispeed_fiber; - mac->ops.enable_tx_laser = - ixgbe_enable_tx_laser_multispeed_fiber; - mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; - - } else { - mac->ops.disable_tx_laser = NULL; - mac->ops.enable_tx_laser = NULL; - mac->ops.flap_tx_laser = NULL; - } - - if (hw->phy.multispeed_fiber) { - /* Set up dual speed SFP+ support */ - mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; - mac->ops.set_rate_select_speed = - ixgbe_set_hard_rate_select_speed; - } else { - if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && - (hw->phy.smart_speed == ixgbe_smart_speed_auto || - hw->phy.smart_speed == ixgbe_smart_speed_on) && - !ixgbe_verify_lesm_fw_enabled_82599(hw)) { - mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; - } else { - mac->ops.setup_link = ixgbe_setup_mac_link_82599; - } - } -} - -/** - * ixgbe_init_phy_ops_82599 - PHY/SFP specific init - * @hw: pointer to hardware structure - * - * Initialize any function pointers that were not able to be - * set during init_shared_code because the PHY/SFP type was - * not known. Perform the SFP init if necessary. - * - **/ -s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val = IXGBE_SUCCESS; - u32 esdp; - - DEBUGFUNC("ixgbe_init_phy_ops_82599"); - - if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { - /* Store flag indicating I2C bus access control unit. */ - hw->phy.qsfp_shared_i2c_bus = TRUE; - - /* Initialize access to QSFP+ I2C bus */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp |= IXGBE_ESDP_SDP0_DIR; - esdp &= ~IXGBE_ESDP_SDP1_DIR; - esdp &= ~IXGBE_ESDP_SDP0; - esdp &= ~IXGBE_ESDP_SDP0_NATIVE; - esdp &= ~IXGBE_ESDP_SDP1_NATIVE; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - - phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; - phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; - } - /* Identify the PHY or SFP module */ - ret_val = phy->ops.identify(hw); - if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto init_phy_ops_out; - - /* Setup function pointers based on detected SFP module and speeds */ - ixgbe_init_mac_link_ops_82599(hw); - if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) - hw->phy.ops.reset = NULL; - - /* If copper media, overwrite with copper function pointers */ - if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = ixgbe_setup_copper_link_82599; - mac->ops.get_link_capabilities = - ixgbe_get_copper_link_capabilities_generic; - } - - /* Set necessary function pointers based on PHY type */ - switch (hw->phy.type) { - case ixgbe_phy_tn: - phy->ops.setup_link = ixgbe_setup_phy_link_tnx; - phy->ops.check_link = ixgbe_check_phy_link_tnx; - phy->ops.get_firmware_version = - ixgbe_get_phy_firmware_version_tnx; - break; - default: - break; - } -init_phy_ops_out: - return ret_val; -} - -s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_SUCCESS; - u16 list_offset, data_offset, data_value; - - DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); - - if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { - ixgbe_init_mac_link_ops_82599(hw); - - hw->phy.ops.reset = NULL; - - ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, - &data_offset); - if (ret_val != IXGBE_SUCCESS) - goto setup_sfp_out; - - /* PHY config will finish before releasing the semaphore */ - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val != IXGBE_SUCCESS) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto setup_sfp_out; - } - - if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) - goto setup_sfp_err; - while (data_value != 0xffff) { - IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); - IXGBE_WRITE_FLUSH(hw); - if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) - goto setup_sfp_err; - } - - /* Release the semaphore */ - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - /* Delay obtaining semaphore again to allow FW access - * prot_autoc_write uses the semaphore too. - */ - msec_delay(hw->eeprom.semaphore_delay); - - /* Restart DSP and set SFI mode */ - ret_val = hw->mac.ops.prot_autoc_write(hw, - hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, - false); - - if (ret_val) { - DEBUGOUT("sfp module setup not complete\n"); - ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; - goto setup_sfp_out; - } - - } - -setup_sfp_out: - return ret_val; - -setup_sfp_err: - /* Release the semaphore */ - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - /* Delay obtaining semaphore again to allow FW access */ - msec_delay(hw->eeprom.semaphore_delay); - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", data_offset); - return IXGBE_ERR_PHY; -} - -/** - * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read - * @hw: pointer to hardware structure - * @locked: Return the if we locked for this read. - * @reg_val: Value we read from AUTOC - * - * For this part (82599) we need to wrap read-modify-writes with a possible - * FW/SW lock. It is assumed this lock will be freed with the next - * prot_autoc_write_82599(). - */ -s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) -{ - s32 ret_val; - - *locked = false; - /* If LESM is on then we need to hold the SW/FW semaphore. */ - if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val != IXGBE_SUCCESS) - return IXGBE_ERR_SWFW_SYNC; - - *locked = true; - } - - *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); - return IXGBE_SUCCESS; -} - -/** - * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write - * @hw: pointer to hardware structure - * @reg_val: value to write to AUTOC - * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous proc_autoc_read_82599. - * - * This part (82599) may need to hold the SW/FW lock around all writes to - * AUTOC. Likewise after a write we need to do a pipeline reset. - */ -s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) -{ - s32 ret_val = IXGBE_SUCCESS; - - /* Blocked by MNG FW so bail */ - if (ixgbe_check_reset_blocked(hw)) - goto out; - - /* We only need to get the lock if: - * - We didn't do it already (in the read part of a read-modify-write) - * - LESM is enabled. - */ - if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val != IXGBE_SUCCESS) - return IXGBE_ERR_SWFW_SYNC; - - locked = true; - } - - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); - ret_val = ixgbe_reset_pipeline_82599(hw); - -out: - /* Free the SW/FW semaphore as we either grabbed it here or - * already had it when this function was called. - */ - if (locked) - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - - return ret_val; -} - -/** - * ixgbe_init_ops_82599 - Inits func ptrs and MAC type - * @hw: pointer to hardware structure - * - * Initialize the function pointers and assign the MAC type for 82599. - * Does not touch the hardware. - **/ - -s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - s32 ret_val; - - DEBUGFUNC("ixgbe_init_ops_82599"); - - ixgbe_init_phy_ops_generic(hw); - ret_val = ixgbe_init_ops_generic(hw); - - /* PHY */ - phy->ops.identify = ixgbe_identify_phy_82599; - phy->ops.init = ixgbe_init_phy_ops_82599; - - /* MAC */ - mac->ops.reset_hw = ixgbe_reset_hw_82599; - mac->ops.get_media_type = ixgbe_get_media_type_82599; - mac->ops.get_supported_physical_layer = - ixgbe_get_supported_physical_layer_82599; - mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; - mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; - mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; - mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; - mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; - mac->ops.start_hw = ixgbe_start_hw_82599; - mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; - mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; - mac->ops.get_device_caps = ixgbe_get_device_caps_generic; - mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; - mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; - mac->ops.prot_autoc_read = prot_autoc_read_82599; - mac->ops.prot_autoc_write = prot_autoc_write_82599; - - /* RAR, Multicast, VLAN */ - mac->ops.set_vmdq = ixgbe_set_vmdq_generic; - mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; - mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; - mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; - mac->rar_highwater = 1; - mac->ops.set_vfta = ixgbe_set_vfta_generic; - mac->ops.set_vlvf = ixgbe_set_vlvf_generic; - mac->ops.clear_vfta = ixgbe_clear_vfta_generic; - mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; - mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; - mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; - mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; - - /* Link */ - mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; - mac->ops.check_link = ixgbe_check_mac_link_generic; - mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; - ixgbe_init_mac_link_ops_82599(hw); - - mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; - mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) - & IXGBE_FWSM_MODE_MASK); - - hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; - - /* EEPROM */ - eeprom->ops.read = ixgbe_read_eeprom_82599; - eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; - - /* Manageability interface */ - mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; - - mac->ops.get_thermal_sensor_data = - ixgbe_get_thermal_sensor_data_generic; - mac->ops.init_thermal_sensor_thresh = - ixgbe_init_thermal_sensor_thresh_generic; - - mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; - - return ret_val; -} - -/** - * ixgbe_get_link_capabilities_82599 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: true when autoneg or autotry is enabled - * - * Determines the link capabilities by reading the AUTOC register. - **/ -s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - s32 status = IXGBE_SUCCESS; - u32 autoc = 0; - - DEBUGFUNC("ixgbe_get_link_capabilities_82599"); - - - /* Check if 1G SFP module. */ - if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - goto out; - } - - /* - * Determine link capabilities based on the stored value of AUTOC, - * which represents EEPROM defaults. If AUTOC value has not - * been stored, use the current register values. - */ - if (hw->mac.orig_link_settings_stored) - autoc = hw->mac.orig_autoc; - else - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - - switch (autoc & IXGBE_AUTOC_LMS_MASK) { - case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = false; - break; - - case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - *autoneg = false; - break; - - case IXGBE_AUTOC_LMS_1G_AN: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - break; - - case IXGBE_AUTOC_LMS_10G_SERIAL: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - *autoneg = false; - break; - - case IXGBE_AUTOC_LMS_KX4_KX_KR: - case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - if (autoc & IXGBE_AUTOC_KR_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX_SUPP) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - break; - - case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: - *speed = IXGBE_LINK_SPEED_100_FULL; - if (autoc & IXGBE_AUTOC_KR_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX_SUPP) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - break; - - case IXGBE_AUTOC_LMS_SGMII_1G_100M: - *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; - *autoneg = false; - break; - - default: - status = IXGBE_ERR_LINK_SETUP; - goto out; - break; - } - - if (hw->phy.multispeed_fiber) { - *speed |= IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - - /* QSFP must not enable full auto-negotiation - * Limited autoneg is enabled at 1G - */ - if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) - *autoneg = false; - else - *autoneg = true; - } - -out: - return status; -} - -/** - * ixgbe_get_media_type_82599 - Get media type - * @hw: pointer to hardware structure - * - * Returns the media type (fiber, copper, backplane) - **/ -enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) -{ - enum ixgbe_media_type media_type; - - DEBUGFUNC("ixgbe_get_media_type_82599"); - - /* Detect if there is a copper PHY attached. */ - switch (hw->phy.type) { - case ixgbe_phy_cu_unknown: - case ixgbe_phy_tn: - media_type = ixgbe_media_type_copper; - goto out; - default: - break; - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_KX4: - case IXGBE_DEV_ID_82599_KX4_MEZZ: - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - case IXGBE_DEV_ID_82599_KR: - case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: - case IXGBE_DEV_ID_82599_XAUI_LOM: - /* Default device ID is mezzanine card KX/KX4 */ - media_type = ixgbe_media_type_backplane; - break; - case IXGBE_DEV_ID_82599_SFP: - case IXGBE_DEV_ID_82599_SFP_FCOE: - case IXGBE_DEV_ID_82599_SFP_EM: - case IXGBE_DEV_ID_82599_SFP_SF2: - case IXGBE_DEV_ID_82599_SFP_SF_QP: - case IXGBE_DEV_ID_82599EN_SFP: - media_type = ixgbe_media_type_fiber; - break; - case IXGBE_DEV_ID_82599_CX4: - media_type = ixgbe_media_type_cx4; - break; - case IXGBE_DEV_ID_82599_T3_LOM: - media_type = ixgbe_media_type_copper; - break; - case IXGBE_DEV_ID_82599_LS: - media_type = ixgbe_media_type_fiber_lco; - break; - case IXGBE_DEV_ID_82599_QSFP_SF_QP: - media_type = ixgbe_media_type_fiber_qsfp; - break; - default: - media_type = ixgbe_media_type_unknown; - break; - } -out: - return media_type; -} - -/** - * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 - * @hw: pointer to hardware structure - * - * Disables link during D3 power down sequence. - * - **/ -void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) -{ - u32 autoc2_reg; - u16 ee_ctrl_2 = 0; - - DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); - ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); - - if (!ixgbe_mng_present(hw) && !hw->wol_enabled && - ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { - autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); - } -} - -/** - * ixgbe_start_mac_link_82599 - Setup MAC link settings - * @hw: pointer to hardware structure - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Configures link settings based on values in the ixgbe_hw struct. - * Restarts the link. Performs autonegotiation if needed. - **/ -s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete) -{ - u32 autoc_reg; - u32 links_reg; - u32 i; - s32 status = IXGBE_SUCCESS; - bool got_lock = false; - - DEBUGFUNC("ixgbe_start_mac_link_82599"); - - /* reset_pipeline requires us to hold this lock as it writes to - * AUTOC. - */ - if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { - status = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (status != IXGBE_SUCCESS) - goto out; - - got_lock = true; - } - - /* Restart link */ - ixgbe_reset_pipeline_82599(hw); - - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - - /* Only poll for autoneg to complete if specified to do so */ - if (autoneg_wait_to_complete) { - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_KX_KR || - (autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || - (autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { - links_reg = 0; /* Just in case Autoneg time = 0 */ - for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - if (links_reg & IXGBE_LINKS_KX_AN_COMP) - break; - msec_delay(100); - } - if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - DEBUGOUT("Autoneg did not complete.\n"); - } - } - } - - /* Add delay to filter out noises during initial link setup */ - msec_delay(50); - -out: - return status; -} - -/** - * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser - * @hw: pointer to hardware structure - * - * The base drivers may require better control over SFP+ module - * PHY states. This includes selectively shutting down the Tx - * laser on the PHY, effectively halting physical link. - **/ -void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) -{ - u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - - /* Blocked by MNG FW so bail */ - if (ixgbe_check_reset_blocked(hw)) - return; - - /* Disable Tx laser; allow 100us to go dark per spec */ - esdp_reg |= IXGBE_ESDP_SDP3; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - usec_delay(100); -} - -/** - * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser - * @hw: pointer to hardware structure - * - * The base drivers may require better control over SFP+ module - * PHY states. This includes selectively turning on the Tx - * laser on the PHY, effectively starting physical link. - **/ -void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) -{ - u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - - /* Enable Tx laser; allow 100ms to light up */ - esdp_reg &= ~IXGBE_ESDP_SDP3; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - msec_delay(100); -} - -/** - * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser - * @hw: pointer to hardware structure - * - * When the driver changes the link speeds that it can support, - * it sets autotry_restart to true to indicate that we need to - * initiate a new autotry session with the link partner. To do - * so, we set the speed then disable and re-enable the Tx laser, to - * alert the link partner that it also needs to restart autotry on its - * end. This is consistent with true clause 37 autoneg, which also - * involves a loss of signal. - **/ -void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) -{ - DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); - - /* Blocked by MNG FW so bail */ - if (ixgbe_check_reset_blocked(hw)) - return; - - if (hw->mac.autotry_restart) { - ixgbe_disable_tx_laser_multispeed_fiber(hw); - ixgbe_enable_tx_laser_multispeed_fiber(hw); - hw->mac.autotry_restart = false; - } -} - -/** - * ixgbe_set_hard_rate_select_speed - Set module link speed - * @hw: pointer to hardware structure - * @speed: link speed to set - * - * Set module link speed via RS0/RS1 rate select pins. - */ -void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed) -{ - u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - - switch (speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); - break; - case IXGBE_LINK_SPEED_1GB_FULL: - esdp_reg &= ~IXGBE_ESDP_SDP5; - esdp_reg |= IXGBE_ESDP_SDP5_DIR; - break; - default: - DEBUGOUT("Invalid fixed module speed\n"); - return; - } - - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Implements the Intel SmartSpeed algorithm. - **/ -s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - s32 status = IXGBE_SUCCESS; - ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; - s32 i, j; - bool link_up = false; - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - - DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); - - /* Set autoneg_advertised value based on input link speed */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (speed & IXGBE_LINK_SPEED_100_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; - - /* - * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the - * autoneg advertisement if link is unable to be established at the - * highest negotiated rate. This can sometimes happen due to integrity - * issues with the physical media connection. - */ - - /* First, try to get link with full advertisement */ - hw->phy.smart_speed_active = false; - for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { - status = ixgbe_setup_mac_link_82599(hw, speed, - autoneg_wait_to_complete); - if (status != IXGBE_SUCCESS) - goto out; - - /* - * Wait for the controller to acquire link. Per IEEE 802.3ap, - * Section 73.10.2, we may have to wait up to 500ms if KR is - * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per - * Table 9 in the AN MAS. - */ - for (i = 0; i < 5; i++) { - msec_delay(100); - - /* If we have link, just jump out */ - status = ixgbe_check_link(hw, &link_speed, &link_up, - false); - if (status != IXGBE_SUCCESS) - goto out; - - if (link_up) - goto out; - } - } - - /* - * We didn't get link. If we advertised KR plus one of KX4/KX - * (or BX4/BX), then disable KR and try again. - */ - if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || - ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) - goto out; - - /* Turn SmartSpeed on to disable KR support */ - hw->phy.smart_speed_active = true; - status = ixgbe_setup_mac_link_82599(hw, speed, - autoneg_wait_to_complete); - if (status != IXGBE_SUCCESS) - goto out; - - /* - * Wait for the controller to acquire link. 600ms will allow for - * the AN link_fail_inhibit_timer as well for multiple cycles of - * parallel detect, both 10g and 1g. This allows for the maximum - * connect attempts as defined in the AN MAS table 73-7. - */ - for (i = 0; i < 6; i++) { - msec_delay(100); - - /* If we have link, just jump out */ - status = ixgbe_check_link(hw, &link_speed, &link_up, false); - if (status != IXGBE_SUCCESS) - goto out; - - if (link_up) - goto out; - } - - /* We didn't get link. Turn SmartSpeed back off. */ - hw->phy.smart_speed_active = false; - status = ixgbe_setup_mac_link_82599(hw, speed, - autoneg_wait_to_complete); - -out: - if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) - DEBUGOUT("Smartspeed has downgraded the link speed " - "from the maximum advertised\n"); - return status; -} - -/** - * ixgbe_setup_mac_link_82599 - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Set the link speed in the AUTOC register and restarts link. - **/ -s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - bool autoneg = false; - s32 status = IXGBE_SUCCESS; - u32 pma_pmd_1g, link_mode; - u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ - u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ - u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ - u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; - u32 links_reg; - u32 i; - ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - - DEBUGFUNC("ixgbe_setup_mac_link_82599"); - - /* Check to see if speed passed in is supported. */ - status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); - if (status) - goto out; - - speed &= link_capabilities; - - if (speed == IXGBE_LINK_SPEED_UNKNOWN) { - status = IXGBE_ERR_LINK_SETUP; - goto out; - } - - /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ - if (hw->mac.orig_link_settings_stored) - orig_autoc = hw->mac.orig_autoc; - else - orig_autoc = autoc; - - link_mode = autoc & IXGBE_AUTOC_LMS_MASK; - pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; - - if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { - /* Set KX4/KX/KR support according to speed requested */ - autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) - autoc |= IXGBE_AUTOC_KX4_SUPP; - if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && - (hw->phy.smart_speed_active == false)) - autoc |= IXGBE_AUTOC_KR_SUPP; - } - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - autoc |= IXGBE_AUTOC_KX_SUPP; - } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && - (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || - link_mode == IXGBE_AUTOC_LMS_1G_AN)) { - /* Switch from 1G SFI to 10G SFI if requested */ - if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && - (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { - autoc &= ~IXGBE_AUTOC_LMS_MASK; - autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; - } - } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && - (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { - /* Switch from 10G SFI to 1G SFI if requested */ - if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && - (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { - autoc &= ~IXGBE_AUTOC_LMS_MASK; - if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) - autoc |= IXGBE_AUTOC_LMS_1G_AN; - else - autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; - } - } - - if (autoc != current_autoc) { - /* Restart link */ - status = hw->mac.ops.prot_autoc_write(hw, autoc, false); - if (status != IXGBE_SUCCESS) - goto out; - - /* Only poll for autoneg to complete if specified to do so */ - if (autoneg_wait_to_complete) { - if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { - links_reg = 0; /*Just in case Autoneg time=0*/ - for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { - links_reg = - IXGBE_READ_REG(hw, IXGBE_LINKS); - if (links_reg & IXGBE_LINKS_KX_AN_COMP) - break; - msec_delay(100); - } - if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = - IXGBE_ERR_AUTONEG_NOT_COMPLETE; - DEBUGOUT("Autoneg did not complete.\n"); - } - } - } - - /* Add delay to filter out noises during initial link setup */ - msec_delay(50); - } - -out: - return status; -} - -/** - * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true if waiting is needed to complete - * - * Restarts link on PHY and MAC based on settings passed in. - **/ -STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - s32 status; - - DEBUGFUNC("ixgbe_setup_copper_link_82599"); - - /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, - autoneg_wait_to_complete); - /* Set up MAC */ - ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); - - return status; -} - -/** - * ixgbe_reset_hw_82599 - Perform hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. - **/ -s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) -{ - ixgbe_link_speed link_speed; - s32 status; - u32 ctrl = 0; - u32 i, autoc, autoc2; - u32 curr_lms; - bool link_up = false; - - DEBUGFUNC("ixgbe_reset_hw_82599"); - - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); - if (status != IXGBE_SUCCESS) - goto reset_hw_out; - - /* flush pending Tx transactions */ - ixgbe_clear_tx_pending(hw); - - /* PHY ops must be identified and initialized prior to reset */ - - /* Identify PHY and related function pointers */ - status = hw->phy.ops.init(hw); - - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto reset_hw_out; - - /* Setup SFP module if there is one present. */ - if (hw->phy.sfp_setup_needed) { - status = hw->mac.ops.setup_sfp(hw); - hw->phy.sfp_setup_needed = false; - } - - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto reset_hw_out; - - /* Reset PHY */ - if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) - hw->phy.ops.reset(hw); - - /* remember AUTOC from before we reset */ - curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; - -mac_reset_top: - /* - * Issue global reset to the MAC. Needs to be SW reset if link is up. - * If link reset is used when link is up, it might reset the PHY when - * mng is using it. If link is down or the flag to force full link - * reset is set, then perform link reset. - */ - ctrl = IXGBE_CTRL_LNK_RST; - if (!hw->force_full_reset) { - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up) - ctrl = IXGBE_CTRL_RST; - } - - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); - - /* Poll for reset bit to self-clear meaning reset is complete */ - for (i = 0; i < 10; i++) { - usec_delay(1); - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST_MASK)) - break; - } - - if (ctrl & IXGBE_CTRL_RST_MASK) { - status = IXGBE_ERR_RESET_FAILED; - DEBUGOUT("Reset polling failed to complete.\n"); - } - - msec_delay(50); - - /* - * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to - * allow time for any pending HW events to complete. - */ - if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { - hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - goto mac_reset_top; - } - - /* - * Store the original AUTOC/AUTOC2 values if they have not been - * stored off yet. Otherwise restore the stored original - * values since the reset operation sets back to defaults. - */ - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - - /* Enable link if disabled in NVM */ - if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { - autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); - IXGBE_WRITE_FLUSH(hw); - } - - if (hw->mac.orig_link_settings_stored == false) { - hw->mac.orig_autoc = autoc; - hw->mac.orig_autoc2 = autoc2; - hw->mac.orig_link_settings_stored = true; - } else { - - /* If MNG FW is running on a multi-speed device that - * doesn't autoneg with out driver support we need to - * leave LMS in the state it was before we MAC reset. - * Likewise if we support WoL we don't want change the - * LMS state. - */ - if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || - hw->wol_enabled) - hw->mac.orig_autoc = - (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | - curr_lms; - - if (autoc != hw->mac.orig_autoc) { - status = hw->mac.ops.prot_autoc_write(hw, - hw->mac.orig_autoc, - false); - if (status != IXGBE_SUCCESS) - goto reset_hw_out; - } - - if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != - (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { - autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; - autoc2 |= (hw->mac.orig_autoc2 & - IXGBE_AUTOC2_UPPER_MASK); - IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); - } - } - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* - * Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. - */ - hw->mac.num_rar_entries = 128; - hw->mac.ops.init_rx_addrs(hw); - - /* Store the permanent SAN mac address */ - hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); - - /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { - /* Save the SAN MAC RAR index */ - hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; - - hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - - /* clear VMDq pool/queue selection for this RAR */ - hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, - IXGBE_CLEAR_VMDQ_ALL); - - /* Reserve the last RAR for the SAN MAC address */ - hw->mac.num_rar_entries--; - } - - /* Store the alternative WWNN/WWPN prefix */ - hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, - &hw->mac.wwpn_prefix); - -reset_hw_out: - return status; -} - -/** - * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete - * @hw: pointer to hardware structure - * @fdircmd: current value of FDIRCMD register - */ -STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) -{ - int i; - - for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { - *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); - if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) - return IXGBE_SUCCESS; - usec_delay(10); - } - - return IXGBE_ERR_FDIR_CMD_INCOMPLETE; -} - -/** - * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. - * @hw: pointer to hardware structure - **/ -s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) -{ - s32 err; - int i; - u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); - u32 fdircmd; - fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; - - DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); - - /* - * Before starting reinitialization process, - * FDIRCMD.CMD must be zero. - */ - err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); - if (err) { - DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); - return err; - } - - IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); - IXGBE_WRITE_FLUSH(hw); - /* - * 82599 adapters flow director init flow cannot be restarted, - * Workaround 82599 silicon errata by performing the following steps - * before re-writing the FDIRCTRL control register with the same value. - * - write 1 to bit 8 of FDIRCMD register & - * - write 0 to bit 8 of FDIRCMD register - */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, - (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | - IXGBE_FDIRCMD_CLEARHT)); - IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, - (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & - ~IXGBE_FDIRCMD_CLEARHT)); - IXGBE_WRITE_FLUSH(hw); - /* - * Clear FDIR Hash register to clear any leftover hashes - * waiting to be programmed. - */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); - IXGBE_WRITE_FLUSH(hw); - - IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); - IXGBE_WRITE_FLUSH(hw); - - /* Poll init-done after we write FDIRCTRL register */ - for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { - if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & - IXGBE_FDIRCTRL_INIT_DONE) - break; - msec_delay(1); - } - if (i >= IXGBE_FDIR_INIT_DONE_POLL) { - DEBUGOUT("Flow Director Signature poll time exceeded!\n"); - return IXGBE_ERR_FDIR_REINIT_FAILED; - } - - /* Clear FDIR statistics registers (read to clear) */ - IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); - IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); - IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - IXGBE_READ_REG(hw, IXGBE_FDIRMISS); - IXGBE_READ_REG(hw, IXGBE_FDIRLEN); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register - **/ -STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) -{ - int i; - - DEBUGFUNC("ixgbe_fdir_enable_82599"); - - /* Prime the keys for hashing */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); - IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); - - /* - * Poll init-done after we write the register. Estimated times: - * 10G: PBALLOC = 11b, timing is 60us - * 1G: PBALLOC = 11b, timing is 600us - * 100M: PBALLOC = 11b, timing is 6ms - * - * Multiple these timings by 4 if under full Rx load - * - * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for - * 1 msec per poll time. If we're at line rate and drop to 100M, then - * this might not finish in our poll time, but we can live with that - * for now. - */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); - IXGBE_WRITE_FLUSH(hw); - for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { - if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & - IXGBE_FDIRCTRL_INIT_DONE) - break; - msec_delay(1); - } - - if (i >= IXGBE_FDIR_INIT_DONE_POLL) - DEBUGOUT("Flow Director poll time exceeded!\n"); -} - -/** - * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register, initially - * contains just the value of the Rx packet buffer allocation - **/ -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) -{ - DEBUGFUNC("ixgbe_init_fdir_signature_82599"); - - /* - * Continue setup of fdirctrl register bits: - * Move the flexible bytes to use the ethertype - shift 6 words - * Set the maximum length per hash bucket to 0xA filters - * Send interrupt when 64 filters are left - */ - fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | - (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | - (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); - - /* write hashes and fdirctrl register, poll for completion */ - ixgbe_fdir_enable_82599(hw, fdirctrl); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register, initially - * contains just the value of the Rx packet buffer allocation - * @cloud_mode: true - cloud mode, false - other mode - **/ -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, - bool cloud_mode) -{ - DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); - - /* - * Continue setup of fdirctrl register bits: - * Turn perfect match filtering on - * Report hash in RSS field of Rx wb descriptor - * Initialize the drop queue to queue 127 - * Move the flexible bytes to use the ethertype - shift 6 words - * Set the maximum length per hash bucket to 0xA filters - * Send interrupt when 64 (0x4 * 16) filters are left - */ - fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | - IXGBE_FDIRCTRL_REPORT_STATUS | - (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | - (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | - (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | - (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); - - if (cloud_mode) - fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << - IXGBE_FDIRCTRL_FILTERMODE_SHIFT); - - /* write hashes and fdirctrl register, poll for completion */ - ixgbe_fdir_enable_82599(hw, fdirctrl); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue - * @hw: pointer to hardware structure - * @dropqueue: Rx queue index used for the dropped packets - **/ -void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) -{ - u32 fdirctrl; - - DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); - /* Clear init done bit and drop queue field */ - fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); - fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); - - /* Set drop queue */ - fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); - if ((hw->mac.type == ixgbe_mac_X550) || - (hw->mac.type == ixgbe_mac_X550EM_x) || - (hw->mac.type == ixgbe_mac_X550EM_a)) - fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; - - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, - (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | - IXGBE_FDIRCMD_CLEARHT)); - IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, - (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & - ~IXGBE_FDIRCMD_CLEARHT)); - IXGBE_WRITE_FLUSH(hw); - - /* write hashes and fdirctrl register, poll for completion */ - ixgbe_fdir_enable_82599(hw, fdirctrl); -} - -/* - * These defines allow us to quickly generate all of the necessary instructions - * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION - * for values 0 through 15 - */ -#define IXGBE_ATR_COMMON_HASH_KEY \ - (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) -#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ -do { \ - u32 n = (_n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ - common_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ - bucket_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ - sig_hash ^= lo_hash_dword << (16 - n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ - common_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ - bucket_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ - sig_hash ^= hi_hash_dword << (16 - n); \ -} while (0) - -/** - * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash - * @stream: input bitstream to compute the hash on - * - * This function is almost identical to the function above but contains - * several optimizations such as unwinding all of the loops, letting the - * compiler work out all of the conditional ifs since the keys are static - * defines, and computing two keys at once since the hashed dword stream - * will be the same for both keys. - **/ -u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common) -{ - u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; - - /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = IXGBE_NTOHL(input.dword); - - /* generate common hash dword */ - hi_hash_dword = IXGBE_NTOHL(common.dword); - - /* low dword is word swapped version of common */ - lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); - - /* apply flow ID/VM pool/VLAN ID bits to hash words */ - hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); - - /* Process bits 0 and 16 */ - IXGBE_COMPUTE_SIG_HASH_ITERATION(0); - - /* - * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to - * delay this because bit 0 of the stream should not be processed - * so we do not add the VLAN until after bit 0 was processed - */ - lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); - - /* Process remaining 30 bit of the key */ - IXGBE_COMPUTE_SIG_HASH_ITERATION(1); - IXGBE_COMPUTE_SIG_HASH_ITERATION(2); - IXGBE_COMPUTE_SIG_HASH_ITERATION(3); - IXGBE_COMPUTE_SIG_HASH_ITERATION(4); - IXGBE_COMPUTE_SIG_HASH_ITERATION(5); - IXGBE_COMPUTE_SIG_HASH_ITERATION(6); - IXGBE_COMPUTE_SIG_HASH_ITERATION(7); - IXGBE_COMPUTE_SIG_HASH_ITERATION(8); - IXGBE_COMPUTE_SIG_HASH_ITERATION(9); - IXGBE_COMPUTE_SIG_HASH_ITERATION(10); - IXGBE_COMPUTE_SIG_HASH_ITERATION(11); - IXGBE_COMPUTE_SIG_HASH_ITERATION(12); - IXGBE_COMPUTE_SIG_HASH_ITERATION(13); - IXGBE_COMPUTE_SIG_HASH_ITERATION(14); - IXGBE_COMPUTE_SIG_HASH_ITERATION(15); - - /* combine common_hash result with signature and bucket hashes */ - bucket_hash ^= common_hash; - bucket_hash &= IXGBE_ATR_HASH_MASK; - - sig_hash ^= common_hash << 16; - sig_hash &= IXGBE_ATR_HASH_MASK << 16; - - /* return completed signature hash */ - return sig_hash ^ bucket_hash; -} - -/** - * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter - * @hw: pointer to hardware structure - * @input: unique input dword - * @common: compressed common input dword - * @queue: queue index to direct traffic to - * - * Note that the tunnel bit in input must not be set when the hardware - * tunneling support does not exist. - **/ -void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue) -{ - u64 fdirhashcmd; - u8 flow_type; - bool tunnel; - u32 fdircmd; - - DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); - - /* - * Get the flow_type in order to program FDIRCMD properly - * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 - * fifth is FDIRCMD.TUNNEL_FILTER - */ - tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); - flow_type = input.formatted.flow_type & - (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); - switch (flow_type) { - case IXGBE_ATR_FLOW_TYPE_TCPV4: - case IXGBE_ATR_FLOW_TYPE_UDPV4: - case IXGBE_ATR_FLOW_TYPE_SCTPV4: - case IXGBE_ATR_FLOW_TYPE_TCPV6: - case IXGBE_ATR_FLOW_TYPE_UDPV6: - case IXGBE_ATR_FLOW_TYPE_SCTPV6: - break; - default: - DEBUGOUT(" Error on flow type input\n"); - return; - } - - /* configure FDIRCMD register */ - fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; - fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; - fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; - if (tunnel) - fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; - - /* - * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits - * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. - */ - fdirhashcmd = (u64)fdircmd << 32; - fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); - IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); - - DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); - - return; -} - -#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ -do { \ - u32 n = (_n); \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ - bucket_hash ^= lo_hash_dword >> n; \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ - bucket_hash ^= hi_hash_dword >> n; \ -} while (0) - -/** - * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash - * @atr_input: input bitstream to compute the hash on - * @input_mask: mask for the input bitstream - * - * This function serves two main purposes. First it applies the input_mask - * to the atr_input resulting in a cleaned up atr_input data stream. - * Secondly it computes the hash and stores it in the bkt_hash field at - * the end of the input byte stream. This way it will be available for - * future use without needing to recompute the hash. - **/ -void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - union ixgbe_atr_input *input_mask) -{ - - u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 bucket_hash = 0; - u32 hi_dword = 0; - u32 i = 0; - - /* Apply masks to input data */ - for (i = 0; i < 14; i++) - input->dword_stream[i] &= input_mask->dword_stream[i]; - - /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); - - /* generate common hash dword */ - for (i = 1; i <= 13; i++) - hi_dword ^= input->dword_stream[i]; - hi_hash_dword = IXGBE_NTOHL(hi_dword); - - /* low dword is word swapped version of common */ - lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); - - /* apply flow ID/VM pool/VLAN ID bits to hash words */ - hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); - - /* Process bits 0 and 16 */ - IXGBE_COMPUTE_BKT_HASH_ITERATION(0); - - /* - * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to - * delay this because bit 0 of the stream should not be processed - * so we do not add the VLAN until after bit 0 was processed - */ - lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); - - /* Process remaining 30 bit of the key */ - for (i = 1; i <= 15; i++) - IXGBE_COMPUTE_BKT_HASH_ITERATION(i); - - /* - * Limit hash to 13 bits since max bucket count is 8K. - * Store result at the end of the input stream. - */ - input->formatted.bkt_hash = bucket_hash & 0x1FFF; -} - -/** - * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks - * @input_mask: mask to be bit swapped - * - * The source and destination port masks for flow director are bit swapped - * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to - * generate a correctly swapped value we need to bit swap the mask and that - * is what is accomplished by this function. - **/ -STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) -{ - u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); - mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; - mask |= IXGBE_NTOHS(input_mask->formatted.src_port); - mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); - mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); - mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); - return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); -} - -/* - * These two macros are meant to address the fact that we have registers - * that are either all or in part big-endian. As a result on big-endian - * systems we will end up byte swapping the value to little-endian before - * it is byte swapped again and written to the hardware in the original - * big-endian format. - */ -#define IXGBE_STORE_AS_BE32(_value) \ - (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ - (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) - -#define IXGBE_WRITE_REG_BE32(a, reg, value) \ - IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) - -#define IXGBE_STORE_AS_BE16(_value) \ - IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) - -s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask, bool cloud_mode) -{ - /* mask IPv6 since it is currently not supported */ - u32 fdirm = IXGBE_FDIRM_DIPv6; - u32 fdirtcpm; - u32 fdirip6m; - DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); - - /* - * Program the relevant mask registers. If src/dst_port or src/dst_addr - * are zero, then assume a full mask for that field. Also assume that - * a VLAN of 0 is unspecified, so mask that out as well. L4type - * cannot be masked out in this implementation. - * - * This also assumes IPv4 only. IPv6 masking isn't supported at this - * point in time. - */ - - /* verify bucket hash is cleared on hash generation */ - if (input_mask->formatted.bkt_hash) - DEBUGOUT(" bucket hash should always be 0 in mask\n"); - - /* Program FDIRM and verify partial masks */ - switch (input_mask->formatted.vm_pool & 0x7F) { - case 0x0: - fdirm |= IXGBE_FDIRM_POOL; - case 0x7F: - break; - default: - DEBUGOUT(" Error on vm pool mask\n"); - return IXGBE_ERR_CONFIG; - } - - switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { - case 0x0: - fdirm |= IXGBE_FDIRM_L4P; - if (input_mask->formatted.dst_port || - input_mask->formatted.src_port) { - DEBUGOUT(" Error on src/dst port mask\n"); - return IXGBE_ERR_CONFIG; - } - case IXGBE_ATR_L4TYPE_MASK: - break; - default: - DEBUGOUT(" Error on flow type mask\n"); - return IXGBE_ERR_CONFIG; - } - - switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { - case 0x0000: - /* mask VLAN ID */ - fdirm |= IXGBE_FDIRM_VLANID; - /* fall through */ - case 0x0FFF: - /* mask VLAN priority */ - fdirm |= IXGBE_FDIRM_VLANP; - break; - case 0xE000: - /* mask VLAN ID only */ - fdirm |= IXGBE_FDIRM_VLANID; - /* fall through */ - case 0xEFFF: - /* no VLAN fields masked */ - break; - default: - DEBUGOUT(" Error on VLAN mask\n"); - return IXGBE_ERR_CONFIG; - } - - switch (input_mask->formatted.flex_bytes & 0xFFFF) { - case 0x0000: - /* Mask Flex Bytes */ - fdirm |= IXGBE_FDIRM_FLEX; - /* fall through */ - case 0xFFFF: - break; - default: - DEBUGOUT(" Error on flexible byte mask\n"); - return IXGBE_ERR_CONFIG; - } - - if (cloud_mode) { - fdirm |= IXGBE_FDIRM_L3P; - fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); - fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; - - switch (input_mask->formatted.inner_mac[0] & 0xFF) { - case 0x00: - /* Mask inner MAC, fall through */ - fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; - case 0xFF: - break; - default: - DEBUGOUT(" Error on inner_mac byte mask\n"); - return IXGBE_ERR_CONFIG; - } - - switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { - case 0x0: - /* Mask vxlan id */ - fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; - break; - case 0x00FFFFFF: - fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; - break; - case 0xFFFFFFFF: - break; - default: - DEBUGOUT(" Error on TNI/VNI byte mask\n"); - return IXGBE_ERR_CONFIG; - } - - switch (input_mask->formatted.tunnel_type & 0xFFFF) { - case 0x0: - /* Mask turnnel type, fall through */ - fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; - case 0xFFFF: - break; - default: - DEBUGOUT(" Error on tunnel type byte mask\n"); - return IXGBE_ERR_CONFIG; - } - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); - - /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, - * FDIRSIP4M and FDIRDIP4M in cloud mode to allow - * L3/L3 packets to tunnel. - */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); - break; - default: - break; - } - } - - /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); - - if (!cloud_mode) { - /* store the TCP/UDP port masks, bit reversed from port - * layout */ - fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); - - /* write both the same so that UDP and TCP use the same mask */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); - /* also use it for SCTP */ - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); - break; - default: - break; - } - - /* store source and destination IP masks (big-enian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, - ~input_mask->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, - ~input_mask->formatted.dst_ip[0]); - } - return IXGBE_SUCCESS; -} - -s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id, u8 queue, bool cloud_mode) -{ - u32 fdirport, fdirvlan, fdirhash, fdircmd; - u32 addr_low, addr_high; - u32 cloud_type = 0; - s32 err; - - DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); - if (!cloud_mode) { - /* currently IPv6 is not supported, must be programmed with 0 */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), - input->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), - input->formatted.src_ip[1]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), - input->formatted.src_ip[2]); - - /* record the source address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, - input->formatted.src_ip[0]); - - /* record the first 32 bits of the destination address - * (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, - input->formatted.dst_ip[0]); - - /* record source and destination port (little-endian)*/ - fdirport = IXGBE_NTOHS(input->formatted.dst_port); - fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; - fdirport |= IXGBE_NTOHS(input->formatted.src_port); - IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); - } - - /* record VLAN (little-endian) and flex_bytes(big-endian) */ - fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); - fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; - fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); - IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); - - if (cloud_mode) { - if (input->formatted.tunnel_type != 0) - cloud_type = 0x80000000; - - addr_low = ((u32)input->formatted.inner_mac[0] | - ((u32)input->formatted.inner_mac[1] << 8) | - ((u32)input->formatted.inner_mac[2] << 16) | - ((u32)input->formatted.inner_mac[3] << 24)); - addr_high = ((u32)input->formatted.inner_mac[4] | - ((u32)input->formatted.inner_mac[5] << 8)); - cloud_type |= addr_high; - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); - } - - /* configure FDIRHASH register */ - fdirhash = input->formatted.bkt_hash; - fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); - - /* - * flush all previous writes to make certain registers are - * programmed prior to issuing the command - */ - IXGBE_WRITE_FLUSH(hw); - - /* configure FDIRCMD register */ - fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; - if (queue == IXGBE_FDIR_DROP_QUEUE) - fdircmd |= IXGBE_FDIRCMD_DROP; - if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) - fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; - fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; - fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; - fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; - - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); - err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); - if (err) { - DEBUGOUT("Flow Director command did not complete!\n"); - return err; - } - - return IXGBE_SUCCESS; -} - -s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id) -{ - u32 fdirhash; - u32 fdircmd; - s32 err; - - /* configure FDIRHASH register */ - fdirhash = input->formatted.bkt_hash; - fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); - - /* flush hash to HW */ - IXGBE_WRITE_FLUSH(hw); - - /* Query if filter is present */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); - - err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); - if (err) { - DEBUGOUT("Flow Director command did not complete!\n"); - return err; - } - - /* if filter exists in hardware then remove it */ - if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); - IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, - IXGBE_FDIRCMD_CMD_REMOVE_FLOW); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter - * @hw: pointer to hardware structure - * @input: input bitstream - * @input_mask: mask for the input bitstream - * @soft_id: software index for the filters - * @queue: queue index to direct traffic to - * - * Note that the caller to this function must lock before calling, since the - * hardware writes must be protected from one another. - **/ -s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - union ixgbe_atr_input *input_mask, - u16 soft_id, u8 queue, bool cloud_mode) -{ - s32 err = IXGBE_ERR_CONFIG; - - DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); - - /* - * Check flow_type formatting, and bail out before we touch the hardware - * if there's a configuration issue - */ - switch (input->formatted.flow_type) { - case IXGBE_ATR_FLOW_TYPE_IPV4: - case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: - input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; - if (input->formatted.dst_port || input->formatted.src_port) { - DEBUGOUT(" Error on src/dst port\n"); - return IXGBE_ERR_CONFIG; - } - break; - case IXGBE_ATR_FLOW_TYPE_SCTPV4: - case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: - if (input->formatted.dst_port || input->formatted.src_port) { - DEBUGOUT(" Error on src/dst port\n"); - return IXGBE_ERR_CONFIG; - } - /* fall through */ - case IXGBE_ATR_FLOW_TYPE_TCPV4: - case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: - case IXGBE_ATR_FLOW_TYPE_UDPV4: - case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: - input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | - IXGBE_ATR_L4TYPE_MASK; - break; - default: - DEBUGOUT(" Error on flow type input\n"); - return err; - } - - /* program input mask into the HW */ - err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); - if (err) - return err; - - /* apply mask and compute/store hash */ - ixgbe_atr_compute_perfect_hash_82599(input, input_mask); - - /* program filters to filter memory */ - return ixgbe_fdir_write_perfect_filter_82599(hw, input, - soft_id, queue, cloud_mode); -} - -/** - * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value - * - * Performs read operation to Omer analog register specified. - **/ -s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) -{ - u32 core_ctl; - - DEBUGFUNC("ixgbe_read_analog_reg8_82599"); - - IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | - (reg << 8)); - IXGBE_WRITE_FLUSH(hw); - usec_delay(10); - core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); - *val = (u8)core_ctl; - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register - * @hw: pointer to hardware structure - * @reg: atlas register to write - * @val: value to write - * - * Performs write operation to Omer analog register specified. - **/ -s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) -{ - u32 core_ctl; - - DEBUGFUNC("ixgbe_write_analog_reg8_82599"); - - core_ctl = (reg << 8) | val; - IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); - IXGBE_WRITE_FLUSH(hw); - usec_delay(10); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware using the generic start_hw function - * and the generation start_hw function. - * Then performs revision-specific operations, if any. - **/ -s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_start_hw_82599"); - - ret_val = ixgbe_start_hw_generic(hw); - if (ret_val != IXGBE_SUCCESS) - goto out; - - ret_val = ixgbe_start_hw_gen2(hw); - if (ret_val != IXGBE_SUCCESS) - goto out; - - /* We need to run link autotry after the driver loads */ - hw->mac.autotry_restart = true; - - if (ret_val == IXGBE_SUCCESS) - ret_val = ixgbe_verify_fw_version_82599(hw); -out: - return ret_val; -} - -/** - * ixgbe_identify_phy_82599 - Get physical layer module - * @hw: pointer to hardware structure - * - * Determines the physical layer module found on the current adapter. - * If PHY already detected, maintains current PHY type in hw struct, - * otherwise executes the PHY detection routine. - **/ -s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) -{ - s32 status; - - DEBUGFUNC("ixgbe_identify_phy_82599"); - - /* Detect PHY if not unknown - returns success if already detected. */ - status = ixgbe_identify_phy_generic(hw); - if (status != IXGBE_SUCCESS) { - /* 82599 10GBASE-T requires an external PHY */ - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) - return status; - else - status = ixgbe_identify_module_generic(hw); - } - - /* Set PHY type none if no PHY detected */ - if (hw->phy.type == ixgbe_phy_unknown) { - hw->phy.type = ixgbe_phy_none; - return IXGBE_SUCCESS; - } - - /* Return error if SFP module has been detected but is not supported */ - if (hw->phy.type == ixgbe_phy_sfp_unsupported) - return IXGBE_ERR_SFP_NOT_SUPPORTED; - - return status; -} - -/** - * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current configuration. - **/ -u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) -{ - u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; - u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; - u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; - u16 ext_ability = 0; - - DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); - - hw->phy.ops.identify(hw); - - switch (hw->phy.type) { - case ixgbe_phy_tn: - case ixgbe_phy_cu_unknown: - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); - if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; - if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; - goto out; - default: - break; - } - - switch (autoc & IXGBE_AUTOC_LMS_MASK) { - case IXGBE_AUTOC_LMS_1G_AN: - case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: - if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | - IXGBE_PHYSICAL_LAYER_1000BASE_BX; - goto out; - } else - /* SFI mode so read SFP module */ - goto sfp_check; - break; - case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: - if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; - else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; - else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; - goto out; - break; - case IXGBE_AUTOC_LMS_10G_SERIAL: - if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; - goto out; - } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) - goto sfp_check; - break; - case IXGBE_AUTOC_LMS_KX4_KX_KR: - case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: - if (autoc & IXGBE_AUTOC_KX_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; - if (autoc & IXGBE_AUTOC_KR_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; - goto out; - break; - default: - goto out; - break; - } - -sfp_check: - /* SFP check must be done last since DA modules are sometimes used to - * test KR mode - we need to id KR mode correctly before SFP module. - * Call identify_sfp because the pluggable module may have changed */ - physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); -out: - return physical_layer; -} - -/** - * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 - * @hw: pointer to hardware structure - * @regval: register value to write to RXCTRL - * - * Enables the Rx DMA unit for 82599 - **/ -s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) -{ - - DEBUGFUNC("ixgbe_enable_rx_dma_82599"); - - /* - * Workaround for 82599 silicon errata when enabling the Rx datapath. - * If traffic is incoming before we enable the Rx unit, it could hang - * the Rx DMA unit. Therefore, make sure the security engine is - * completely disabled prior to enabling the Rx unit. - */ - - hw->mac.ops.disable_sec_rx_path(hw); - - if (regval & IXGBE_RXCTRL_RXEN) - ixgbe_enable_rx(hw); - else - ixgbe_disable_rx(hw); - - hw->mac.ops.enable_sec_rx_path(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_verify_fw_version_82599 - verify FW version for 82599 - * @hw: pointer to hardware structure - * - * Verifies that installed the firmware version is 0.6 or higher - * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. - * - * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or - * if the FW version is not supported. - **/ -STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_EEPROM_VERSION; - u16 fw_offset, fw_ptp_cfg_offset; - u16 fw_version; - - DEBUGFUNC("ixgbe_verify_fw_version_82599"); - - /* firmware check is only necessary for SFI devices */ - if (hw->phy.media_type != ixgbe_media_type_fiber) { - status = IXGBE_SUCCESS; - goto fw_version_out; - } - - /* get the offset to the Firmware Module block */ - if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", IXGBE_FW_PTR); - return IXGBE_ERR_EEPROM_VERSION; - } - - if ((fw_offset == 0) || (fw_offset == 0xFFFF)) - goto fw_version_out; - - /* get the offset to the Pass Through Patch Configuration block */ - if (hw->eeprom.ops.read(hw, (fw_offset + - IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), - &fw_ptp_cfg_offset)) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", - fw_offset + - IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); - return IXGBE_ERR_EEPROM_VERSION; - } - - if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) - goto fw_version_out; - - /* get the firmware version */ - if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + - IXGBE_FW_PATCH_VERSION_4), &fw_version)) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", - fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); - return IXGBE_ERR_EEPROM_VERSION; - } - - if (fw_version > 0x5) - status = IXGBE_SUCCESS; - -fw_version_out: - return status; -} - -/** - * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. - * @hw: pointer to hardware structure - * - * Returns true if the LESM FW module is present and enabled. Otherwise - * returns false. Smart Speed must be disabled if LESM FW module is enabled. - **/ -bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) -{ - bool lesm_enabled = false; - u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; - s32 status; - - DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); - - /* get the offset to the Firmware Module block */ - status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); - - if ((status != IXGBE_SUCCESS) || - (fw_offset == 0) || (fw_offset == 0xFFFF)) - goto out; - - /* get the offset to the LESM Parameters block */ - status = hw->eeprom.ops.read(hw, (fw_offset + - IXGBE_FW_LESM_PARAMETERS_PTR), - &fw_lesm_param_offset); - - if ((status != IXGBE_SUCCESS) || - (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) - goto out; - - /* get the LESM state word */ - status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + - IXGBE_FW_LESM_STATE_1), - &fw_lesm_state); - - if ((status == IXGBE_SUCCESS) && - (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) - lesm_enabled = true; - -out: - return lesm_enabled; -} - -/** - * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using - * fastest available method - * - * @hw: pointer to hardware structure - * @offset: offset of word in EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM - * - * Retrieves 16 bit word(s) read from EEPROM - **/ -STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - s32 ret_val = IXGBE_ERR_CONFIG; - - DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); - - /* - * If EEPROM is detected and can be addressed using 14 bits, - * use EERD otherwise use bit bang - */ - if ((eeprom->type == ixgbe_eeprom_spi) && - (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) - ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, - data); - else - ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, - words, - data); - - return ret_val; -} - -/** - * ixgbe_read_eeprom_82599 - Read EEPROM word using - * fastest available method - * - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM - **/ -STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, - u16 offset, u16 *data) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - s32 ret_val = IXGBE_ERR_CONFIG; - - DEBUGFUNC("ixgbe_read_eeprom_82599"); - - /* - * If EEPROM is detected and can be addressed using 14 bits, - * use EERD otherwise use bit bang - */ - if ((eeprom->type == ixgbe_eeprom_spi) && - (offset <= IXGBE_EERD_MAX_ADDR)) - ret_val = ixgbe_read_eerd_generic(hw, offset, data); - else - ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); - - return ret_val; -} - -/** - * ixgbe_reset_pipeline_82599 - perform pipeline reset - * - * @hw: pointer to hardware structure - * - * Reset pipeline by asserting Restart_AN together with LMS change to ensure - * full pipeline reset. This function assumes the SW/FW lock is held. - **/ -s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) -{ - s32 ret_val; - u32 anlp1_reg = 0; - u32 i, autoc_reg, autoc2_reg; - - /* Enable link if disabled in NVM */ - autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { - autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); - IXGBE_WRITE_FLUSH(hw); - } - - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, - autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); - /* Wait for AN to leave state 0 */ - for (i = 0; i < 10; i++) { - msec_delay(4); - anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); - if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) - break; - } - - if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { - DEBUGOUT("auto negotiation not completed\n"); - ret_val = IXGBE_ERR_RESET_FAILED; - goto reset_pipeline_out; - } - - ret_val = IXGBE_SUCCESS; - -reset_pipeline_out: - /* Write AUTOC register with original LMS field and Restart_AN */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - IXGBE_WRITE_FLUSH(hw); - - return ret_val; -} - -/** - * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) -{ - u32 esdp; - s32 status; - s32 timeout = 200; - - DEBUGFUNC("ixgbe_read_i2c_byte_82599"); - - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { - /* Acquire I2C bus ownership. */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp |= IXGBE_ESDP_SDP0; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - - while (timeout) { - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (esdp & IXGBE_ESDP_SDP1) - break; - - msec_delay(5); - timeout--; - } - - if (!timeout) { - DEBUGOUT("Driver can't access resource," - " acquiring I2C bus timeout.\n"); - status = IXGBE_ERR_I2C; - goto release_i2c_access; - } - } - - status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); - -release_i2c_access: - - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { - /* Release I2C bus ownership. */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp &= ~IXGBE_ESDP_SDP0; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - } - - return status; -} - -/** - * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) -{ - u32 esdp; - s32 status; - s32 timeout = 200; - - DEBUGFUNC("ixgbe_write_i2c_byte_82599"); - - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { - /* Acquire I2C bus ownership. */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp |= IXGBE_ESDP_SDP0; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - - while (timeout) { - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (esdp & IXGBE_ESDP_SDP1) - break; - - msec_delay(5); - timeout--; - } - - if (!timeout) { - DEBUGOUT("Driver can't access resource," - " acquiring I2C bus timeout.\n"); - status = IXGBE_ERR_I2C; - goto release_i2c_access; - } - } - - status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); - -release_i2c_access: - - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { - /* Release I2C bus ownership. */ - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - esdp &= ~IXGBE_ESDP_SDP0; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); - } - - return status; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h deleted file mode 100644 index 7d928b87bdca..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_82599.h +++ /dev/null @@ -1,55 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_82599_H_ -#define _IXGBE_82599_H_ - -s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, bool *autoneg); -enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); -void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed); -s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete); -s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); -void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); -s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); -s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); -s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); -s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); -s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); -s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); -u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); -s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val); -s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked); -#endif /* _IXGBE_82599_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c deleted file mode 100644 index 3251a7125723..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.c +++ /dev/null @@ -1,1624 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_api.h" -#include "ixgbe_common.h" - -#define IXGBE_EMPTY_PARAM - -static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM) -}; - -static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(_X540) -}; - -static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(_X550) -}; - -static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(_X550EM_x) -}; - -static const u32 ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = { - IXGBE_MVALS_INIT(_X550EM_a) -}; - -/** - * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg - * @hw: pointer to hardware structure - * @map: pointer to u8 arr for returning map - * - * Read the rtrup2tc HW register and resolve its content into map - **/ -void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map) -{ - if (hw->mac.ops.get_rtrup2tc) - hw->mac.ops.get_rtrup2tc(hw, map); -} - -/** - * ixgbe_init_shared_code - Initialize the shared code - * @hw: pointer to hardware structure - * - * This will assign function pointers and assign the MAC type and PHY code. - * Does not touch the hardware. This function must be called prior to any - * other function in the shared code. The ixgbe_hw structure should be - * memset to 0 prior to calling this function. The following fields in - * hw structure should be filled in prior to calling this function: - * hw_addr, back, device_id, vendor_id, subsystem_device_id, - * subsystem_vendor_id, and revision_id - **/ -s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) -{ - s32 status; - - DEBUGFUNC("ixgbe_init_shared_code"); - - /* - * Set the mac type - */ - ixgbe_set_mac_type(hw); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - status = ixgbe_init_ops_82598(hw); - break; - case ixgbe_mac_82599EB: - status = ixgbe_init_ops_82599(hw); - break; - case ixgbe_mac_X540: - status = ixgbe_init_ops_X540(hw); - break; - case ixgbe_mac_X550: - status = ixgbe_init_ops_X550(hw); - break; - case ixgbe_mac_X550EM_x: - status = ixgbe_init_ops_X550EM_x(hw); - break; - case ixgbe_mac_X550EM_a: - status = ixgbe_init_ops_X550EM_a(hw); - break; - default: - status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; - break; - } - hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; - - return status; -} - -/** - * ixgbe_set_mac_type - Sets MAC type - * @hw: pointer to the HW structure - * - * This function sets the mac type of the adapter based on the - * vendor ID and device ID stored in the hw structure. - **/ -s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_set_mac_type\n"); - - if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) { - ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, - "Unsupported vendor id: %x", hw->vendor_id); - return IXGBE_ERR_DEVICE_NOT_SUPPORTED; - } - - hw->mvals = ixgbe_mvals_base; - - switch (hw->device_id) { - case IXGBE_DEV_ID_82598: - case IXGBE_DEV_ID_82598_BX: - case IXGBE_DEV_ID_82598AF_SINGLE_PORT: - case IXGBE_DEV_ID_82598AF_DUAL_PORT: - case IXGBE_DEV_ID_82598AT: - case IXGBE_DEV_ID_82598AT2: - case IXGBE_DEV_ID_82598EB_CX4: - case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: - case IXGBE_DEV_ID_82598_DA_DUAL_PORT: - case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: - case IXGBE_DEV_ID_82598EB_XF_LR: - case IXGBE_DEV_ID_82598EB_SFP_LOM: - hw->mac.type = ixgbe_mac_82598EB; - break; - case IXGBE_DEV_ID_82599_KX4: - case IXGBE_DEV_ID_82599_KX4_MEZZ: - case IXGBE_DEV_ID_82599_XAUI_LOM: - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - case IXGBE_DEV_ID_82599_KR: - case IXGBE_DEV_ID_82599_SFP: - case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: - case IXGBE_DEV_ID_82599_SFP_FCOE: - case IXGBE_DEV_ID_82599_SFP_EM: - case IXGBE_DEV_ID_82599_SFP_SF2: - case IXGBE_DEV_ID_82599_SFP_SF_QP: - case IXGBE_DEV_ID_82599_QSFP_SF_QP: - case IXGBE_DEV_ID_82599EN_SFP: - case IXGBE_DEV_ID_82599_CX4: - case IXGBE_DEV_ID_82599_LS: - case IXGBE_DEV_ID_82599_T3_LOM: - hw->mac.type = ixgbe_mac_82599EB; - break; - case IXGBE_DEV_ID_X540T: - case IXGBE_DEV_ID_X540T1: - hw->mac.type = ixgbe_mac_X540; - hw->mvals = ixgbe_mvals_X540; - break; - case IXGBE_DEV_ID_X550T: - case IXGBE_DEV_ID_X550T1: - hw->mac.type = ixgbe_mac_X550; - hw->mvals = ixgbe_mvals_X550; - break; - case IXGBE_DEV_ID_X550EM_X_KX4: - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_X_1G_T: - case IXGBE_DEV_ID_X550EM_X_SFP: - case IXGBE_DEV_ID_X550EM_X_XFI: - hw->mac.type = ixgbe_mac_X550EM_x; - hw->mvals = ixgbe_mvals_X550EM_x; - break; - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - case IXGBE_DEV_ID_X550EM_A_SFP_N: - case IXGBE_DEV_ID_X550EM_A_SGMII: - case IXGBE_DEV_ID_X550EM_A_SGMII_L: - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_A_QSFP: - case IXGBE_DEV_ID_X550EM_A_QSFP_N: - case IXGBE_DEV_ID_X550EM_A_SFP: - hw->mac.type = ixgbe_mac_X550EM_a; - hw->mvals = ixgbe_mvals_X550EM_a; - break; - default: - ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; - ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, - "Unsupported device id: %x", - hw->device_id); - break; - } - - DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", - hw->mac.type, ret_val); - return ret_val; -} - -/** - * ixgbe_init_hw - Initialize the hardware - * @hw: pointer to hardware structure - * - * Initialize the hardware by resetting and then starting the hardware - **/ -s32 ixgbe_init_hw(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_reset_hw - Performs a hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks and - * clears all interrupts, performs a PHY reset, and performs a MAC reset - **/ -s32 ixgbe_reset_hw(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_start_hw - Prepares hardware for Rx/Tx - * @hw: pointer to hardware structure - * - * Starts the hardware by filling the bus info structure and media type, - * clears all on chip counters, initializes receive address registers, - * multicast table, VLAN filter table, calls routine to setup link and - * flow control settings, and leaves transmit and receive units disabled - * and uninitialized. - **/ -s32 ixgbe_start_hw(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_clear_hw_cntrs - Clear hardware counters - * @hw: pointer to hardware structure - * - * Clears all hardware statistics counters by reading them from the hardware - * Statistics counters are clear on read. - **/ -s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_media_type - Get media type - * @hw: pointer to hardware structure - * - * Returns the media type (fiber, copper, backplane) - **/ -enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), - ixgbe_media_type_unknown); -} - -/** - * ixgbe_get_mac_addr - Get MAC address - * @hw: pointer to hardware structure - * @mac_addr: Adapter MAC address - * - * Reads the adapter's MAC address from the first Receive Address Register - * (RAR0) A reset of the adapter must have been performed prior to calling - * this function in order for the MAC address to have been loaded from the - * EEPROM into RAR0 - **/ -s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, - (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_san_mac_addr - Get SAN MAC address - * @hw: pointer to hardware structure - * @san_mac_addr: SAN MAC address - * - * Reads the SAN MAC address from the EEPROM, if it's available. This is - * per-port, so set_lan_id() must be called before reading the addresses. - **/ -s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, - (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_set_san_mac_addr - Write a SAN MAC address - * @hw: pointer to hardware structure - * @san_mac_addr: SAN MAC address - * - * Writes A SAN MAC address to the EEPROM. - **/ -s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) -{ - return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, - (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_device_caps - Get additional device capabilities - * @hw: pointer to hardware structure - * @device_caps: the EEPROM word for device capabilities - * - * Reads the extra device capabilities from the EEPROM - **/ -s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, - (hw, device_caps), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM - * @hw: pointer to hardware structure - * @wwnn_prefix: the alternative WWNN prefix - * @wwpn_prefix: the alternative WWPN prefix - * - * This function will read the EEPROM from the alternative SAN MAC address - * block to check the support for the alternative WWNN/WWPN prefix support. - **/ -s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, - (hw, wwnn_prefix, wwpn_prefix), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM - * @hw: pointer to hardware structure - * @bs: the fcoe boot status - * - * This function will read the FCOE boot status from the iSCSI FCOE block - **/ -s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, - (hw, bs), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_bus_info - Set PCI bus info - * @hw: pointer to hardware structure - * - * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure - **/ -s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_num_of_tx_queues - Get Tx queues - * @hw: pointer to hardware structure - * - * Returns the number of transmit queues for the given adapter. - **/ -u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) -{ - return hw->mac.max_tx_queues; -} - -/** - * ixgbe_get_num_of_rx_queues - Get Rx queues - * @hw: pointer to hardware structure - * - * Returns the number of receive queues for the given adapter. - **/ -u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) -{ - return hw->mac.max_rx_queues; -} - -/** - * ixgbe_stop_adapter - Disable Rx/Tx units - * @hw: pointer to hardware structure - * - * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, - * disables transmit and receive units. The adapter_stopped flag is used by - * the shared code and drivers to determine if the adapter is in a stopped - * state and should not touch the hardware. - **/ -s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_pba_string - Reads part number string from EEPROM - * @hw: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) -{ - return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); -} - -/** - * ixgbe_identify_phy - Get PHY type - * @hw: pointer to hardware structure - * - * Determines the physical layer module found on the current adapter. - **/ -s32 ixgbe_identify_phy(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - - if (hw->phy.type == ixgbe_phy_unknown) { - status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw), - IXGBE_NOT_IMPLEMENTED); - } - - return status; -} - -/** - * ixgbe_reset_phy - Perform a PHY reset - * @hw: pointer to hardware structure - **/ -s32 ixgbe_reset_phy(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - - if (hw->phy.type == ixgbe_phy_unknown) { - if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) - status = IXGBE_ERR_PHY; - } - - if (status == IXGBE_SUCCESS) { - status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), - IXGBE_NOT_IMPLEMENTED); - } - return status; -} - -/** - * ixgbe_get_phy_firmware_version - - * @hw: pointer to hardware structure - * @firmware_version: pointer to firmware version - **/ -s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) -{ - s32 status = IXGBE_SUCCESS; - - status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, - (hw, firmware_version), - IXGBE_NOT_IMPLEMENTED); - return status; -} - -/** - * ixgbe_read_phy_reg - Read PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @phy_data: Pointer to read data from PHY register - * - * Reads a value from a specified PHY register - **/ -s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data) -{ - if (hw->phy.id == 0) - ixgbe_identify_phy(hw); - - return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, - device_type, phy_data), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_phy_reg - Write PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @phy_data: Data to write to the PHY register - * - * Writes a value to specified PHY register - **/ -s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 phy_data) -{ - if (hw->phy.id == 0) - ixgbe_identify_phy(hw); - - return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, - device_type, phy_data), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_setup_phy_link - Restart PHY autoneg - * @hw: pointer to hardware structure - * - * Restart autonegotiation and PHY and waits for completion. - **/ -s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_setup_internal_phy - Configure integrated PHY - * @hw: pointer to hardware structure - * - * Reconfigure the integrated PHY in order to enable talk to the external PHY. - * Returns success if not implemented, since nothing needs to be done in this - * case. - */ -s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw), - IXGBE_SUCCESS); -} - -/** - * ixgbe_check_phy_link - Determine link and speed status - * @hw: pointer to hardware structure - * - * Reads a PHY register to determine if link is up and the current speed for - * the PHY. - **/ -s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) -{ - return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, - link_up), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_setup_phy_link_speed - Set auto advertise - * @hw: pointer to hardware structure - * @speed: new link speed - * - * Sets the auto advertised capabilities - **/ -s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, - autoneg_wait_to_complete), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_set_phy_power - Control the phy power state - * @hw: pointer to hardware structure - * @on: true for on, false for off - */ -s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on) -{ - return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_check_link - Get link and speed status - * @hw: pointer to hardware structure - * - * Reads the links register to determine if link is up and the current speed - **/ -s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete) -{ - return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, - link_up, link_up_wait_to_complete), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_disable_tx_laser - Disable Tx laser - * @hw: pointer to hardware structure - * - * If the driver needs to disable the laser on SFI optics. - **/ -void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.disable_tx_laser) - hw->mac.ops.disable_tx_laser(hw); -} - -/** - * ixgbe_enable_tx_laser - Enable Tx laser - * @hw: pointer to hardware structure - * - * If the driver needs to enable the laser on SFI optics. - **/ -void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.enable_tx_laser) - hw->mac.ops.enable_tx_laser(hw); -} - -/** - * ixgbe_flap_tx_laser - flap Tx laser to start autotry process - * @hw: pointer to hardware structure - * - * When the driver changes the link speeds that it can support then - * flap the tx laser to alert the link partner to start autotry - * process on its end. - **/ -void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.flap_tx_laser) - hw->mac.ops.flap_tx_laser(hw); -} - -/** - * ixgbe_setup_link - Set link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * - * Configures link settings. Restarts the link. - * Performs autonegotiation if needed. - **/ -s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, - autoneg_wait_to_complete), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_setup_mac_link - Set link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * - * Configures link settings. Restarts the link. - * Performs autonegotiation if needed. - **/ -s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed, - autoneg_wait_to_complete), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_link_capabilities - Returns link capabilities - * @hw: pointer to hardware structure - * - * Determines the link capabilities of the current configuration. - **/ -s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *autoneg) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, - speed, autoneg), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_led_on - Turn on LEDs - * @hw: pointer to hardware structure - * @index: led number to turn on - * - * Turns on the software controllable LEDs. - **/ -s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) -{ - return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_led_off - Turn off LEDs - * @hw: pointer to hardware structure - * @index: led number to turn off - * - * Turns off the software controllable LEDs. - **/ -s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) -{ - return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_blink_led_start - Blink LEDs - * @hw: pointer to hardware structure - * @index: led number to blink - * - * Blink LED based on index. - **/ -s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) -{ - return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_blink_led_stop - Stop blinking LEDs - * @hw: pointer to hardware structure - * - * Stop blinking LED based on index. - **/ -s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) -{ - return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_init_eeprom_params - Initialize EEPROM parameters - * @hw: pointer to hardware structure - * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. - **/ -s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), - IXGBE_NOT_IMPLEMENTED); -} - - -/** - * ixgbe_write_eeprom - Write word to EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be written to - * @data: 16 bit word to be written to the EEPROM - * - * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not - * called after this function, the EEPROM will most likely contain an - * invalid checksum. - **/ -s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be written to - * @data: 16 bit word(s) to be written to the EEPROM - * @words: number of words - * - * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not - * called after this function, the EEPROM will most likely contain an - * invalid checksum. - **/ -s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, - u16 *data) -{ - return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer, - (hw, offset, words, data), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_eeprom - Read word from EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @data: read 16 bit value from EEPROM - * - * Reads 16 bit value from EEPROM - **/ -s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) -{ - return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @data: read 16 bit word(s) from EEPROM - * @words: number of words - * - * Reads 16 bit word(s) from EEPROM - **/ -s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer, - (hw, offset, words, data), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum - **/ -s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) -{ - return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, - (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum - * @hw: pointer to hardware structure - **/ -s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_insert_mac_addr - Find a RAR for this mac address - * @hw: pointer to hardware structure - * @addr: Address to put into receive address register - * @vmdq: VMDq pool to assign - * - * Puts an ethernet address into a receive address register, or - * finds the rar that it is aleady in; adds to the pool list - **/ -s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) -{ - return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, - (hw, addr, vmdq), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_set_rar - Set Rx address register - * @hw: pointer to hardware structure - * @index: Receive address register to write - * @addr: Address to put into receive address register - * @vmdq: VMDq "set" - * @enable_addr: set flag that address is active - * - * Puts an ethernet address into a receive address register. - **/ -s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, - u32 enable_addr) -{ - return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, - enable_addr), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_clear_rar - Clear Rx address register - * @hw: pointer to hardware structure - * @index: Receive address register to write - * - * Puts an ethernet address into a receive address register. - **/ -s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) -{ - return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_set_vmdq - Associate a VMDq index with a receive address - * @hw: pointer to hardware structure - * @rar: receive address register index to associate with VMDq index - * @vmdq: VMDq set or pool index - **/ -s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), - IXGBE_NOT_IMPLEMENTED); - -} - -/** - * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address - * @hw: pointer to hardware structure - * @vmdq: VMDq default pool index - **/ -s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq) -{ - return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac, - (hw, vmdq), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address - * @hw: pointer to hardware structure - * @rar: receive address register index to disassociate with VMDq index - * @vmdq: VMDq set or pool index - **/ -s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_init_rx_addrs - Initializes receive address filters. - * @hw: pointer to hardware structure - * - * Places the MAC address in receive address register 0 and clears the rest - * of the receive address registers. Clears the multicast table. Assumes - * the receiver is in reset when the routine is called. - **/ -s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. - * @hw: pointer to hardware structure - **/ -u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) -{ - return hw->mac.num_rar_entries; -} - -/** - * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses - * @hw: pointer to hardware structure - * @addr_list: the list of new multicast addresses - * @addr_count: number of addresses - * @func: iterator function to walk the multicast address list - * - * The given list replaces any existing list. Clears the secondary addrs from - * receive address registers. Uses unused receive address registers for the - * first secondary addresses, and falls back to promiscuous mode as needed. - **/ -s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, - u32 addr_count, ixgbe_mc_addr_itr func) -{ - return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, - addr_list, addr_count, func), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses - * @hw: pointer to hardware structure - * @mc_addr_list: the list of new multicast addresses - * @mc_addr_count: number of addresses - * @func: iterator function to walk the multicast address list - * - * The given list replaces any existing list. Clears the MC addrs from receive - * address registers and the multicast table. Uses unused receive address - * registers for the first multicast addresses, and hashes the rest into the - * multicast table. - **/ -s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr func, - bool clear) -{ - return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, - mc_addr_list, mc_addr_count, func, clear), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_enable_mc - Enable multicast address in RAR - * @hw: pointer to hardware structure - * - * Enables multicast address in RAR and the use of the multicast hash table. - **/ -s32 ixgbe_enable_mc(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_disable_mc - Disable multicast address in RAR - * @hw: pointer to hardware structure - * - * Disables multicast address in RAR and the use of the multicast hash table. - **/ -s32 ixgbe_disable_mc(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_clear_vfta - Clear VLAN filter table - * @hw: pointer to hardware structure - * - * Clears the VLAN filer table, and the VMDq index associated with the filter - **/ -s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_set_vfta - Set VLAN filter table - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VLVFB - * @vlan_on: boolean flag to turn on/off VLAN - * @vlvf_bypass: boolean flag indicating updating the default pool is okay - * - * Turn on/off specified VLAN in the VLAN filter table. - **/ -s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, - bool vlvf_bypass) -{ - return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, - vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_set_vlvf - Set VLAN Pool Filter - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VLVFB - * @vlan_on: boolean flag to turn on/off VLAN in VLVF - * @vfta_delta: pointer to the difference between the current value of VFTA - * and the desired value - * @vfta: the desired value of the VFTA - * @vlvf_bypass: boolean flag indicating updating the default pool is okay - * - * Turn on/off specified bit in VLVF table. - **/ -s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, - u32 *vfta_delta, u32 vfta, bool vlvf_bypass) -{ - return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind, - vlan_on, vfta_delta, vfta, vlvf_bypass), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_fc_enable - Enable flow control - * @hw: pointer to hardware structure - * - * Configures the flow control settings based on SW configuration. - **/ -s32 ixgbe_fc_enable(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_setup_fc - Set up flow control - * @hw: pointer to hardware structure - * - * Called at init time to set up flow control. - **/ -s32 ixgbe_setup_fc(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_set_fw_drv_ver - Try to send the driver version number FW - * @hw: pointer to hardware structure - * @maj: driver major number to be sent to firmware - * @min: driver minor number to be sent to firmware - * @build: driver build number to be sent to firmware - * @ver: driver version number to be sent to firmware - * @len: length of driver_ver string - * @driver_ver: driver string - **/ -s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, - u8 ver, u16 len, char *driver_ver) -{ - return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min, - build, ver, len, driver_ver), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data - * @hw: pointer to hardware structure - * - * Updates the temperatures in mac.thermal_sensor_data - **/ -s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds - * @hw: pointer to hardware structure - * - * Inits the thermal sensor thresholds according to the NVM map - **/ -s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_dmac_config - Configure DMA Coalescing registers. - * @hw: pointer to hardware structure - * - * Configure DMA coalescing. If enabling dmac, dmac is activated. - * When disabling dmac, dmac enable dmac bit is cleared. - **/ -s32 ixgbe_dmac_config(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers. - * @hw: pointer to hardware structure - * - * Disables dmac, updates per TC settings, and then enable dmac. - **/ -s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers. - * @hw: pointer to hardware structure - * - * Configure DMA coalescing threshold per TC and set high priority bit for - * FCOE TC. The dmac enable bit must be cleared before configuring. - **/ -s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_setup_eee - Enable/disable EEE support - * @hw: pointer to the HW structure - * @enable_eee: boolean flag to enable EEE - * - * Enable/disable EEE based on enable_ee flag. - * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C - * are modified. - * - **/ -s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee) -{ - return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_set_source_address_pruning - Enable/Disable source address pruning - * @hw: pointer to hardware structure - * @enbale: enable or disable source address pruning - * @pool: Rx pool - Rx pool to toggle source address pruning - **/ -void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, - unsigned int pool) -{ - if (hw->mac.ops.set_source_address_pruning) - hw->mac.ops.set_source_address_pruning(hw, enable, pool); -} - -/** - * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for Ethertype anti-spoofing - * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing - * - **/ -void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) -{ - if (hw->mac.ops.set_ethertype_anti_spoofing) - hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf); -} - -/** - * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @device_type: type of device you want to communicate with - * @phy_data: Pointer to read data from PHY register - * - * Reads a value from a specified PHY register - **/ -s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *phy_data) -{ - return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr, - device_type, phy_data), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: type of device you want to communicate with - * @phy_data: Data to write to the PHY register - * - * Writes a value to specified PHY register - **/ -s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 phy_data) -{ - return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr, - device_type, phy_data), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_disable_mdd - Disable malicious driver detection - * @hw: pointer to hardware structure - * - **/ -void ixgbe_disable_mdd(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.disable_mdd) - hw->mac.ops.disable_mdd(hw); -} - -/** - * ixgbe_enable_mdd - Enable malicious driver detection - * @hw: pointer to hardware structure - * - **/ -void ixgbe_enable_mdd(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.enable_mdd) - hw->mac.ops.enable_mdd(hw); -} - -/** - * ixgbe_mdd_event - Handle malicious driver detection event - * @hw: pointer to hardware structure - * @vf_bitmap: vf bitmap of malicious vfs - * - **/ -void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap) -{ - if (hw->mac.ops.mdd_event) - hw->mac.ops.mdd_event(hw, vf_bitmap); -} - -/** - * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver - * detection event - * @hw: pointer to hardware structure - * @vf: vf index - * - **/ -void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf) -{ - if (hw->mac.ops.restore_mdd_vf) - hw->mac.ops.restore_mdd_vf(hw, vf); -} - -/** - * ixgbe_enter_lplu - Transition to low power states - * @hw: pointer to hardware structure - * - * Configures Low Power Link Up on transition to low power states - * (from D0 to non-D0). - **/ -s32 ixgbe_enter_lplu(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_handle_lasi - Handle external Base T PHY interrupt - * @hw: pointer to hardware structure - * - * Handle external Base T PHY interrupt. If high temperature - * failure alarm then return error, else if link status change - * then setup internal/external PHY link - * - * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature - * failure alarm, else return PHY access status. - */ -s32 ixgbe_handle_lasi(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_analog_reg8 - Reads 8 bit analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value - * - * Performs write operation to analog register specified. - **/ -s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) -{ - return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, - val), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_analog_reg8 - Writes 8 bit analog register - * @hw: pointer to hardware structure - * @reg: analog register to write - * @val: value to write - * - * Performs write operation to Atlas analog register specified. - **/ -s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) -{ - return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, - val), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. - * @hw: pointer to hardware structure - * - * Initializes the Unicast Table Arrays to zero on device load. This - * is part of the Rx init addr execution path. - **/ -s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @dev_addr: I2C bus address to read from - * @data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, - u8 *data) -{ - return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, - dev_addr, data), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @dev_addr: I2C bus address to read from - * @data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) -{ - return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked, - (hw, byte_offset, dev_addr, data), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_link - Perform read operation on link device - * @hw: pointer to the hardware structure - * @addr: bus address to read from - * @reg: device register to read from - * @val: pointer to location to receive read value - * - * Returns an error code on error. - */ -s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) -{ - return ixgbe_call_func(hw, hw->link.ops.read_link, (hw, addr, - reg, val), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_link_unlocked - Perform read operation on link device - * @hw: pointer to the hardware structure - * @addr: bus address to read from - * @reg: device register to read from - * @val: pointer to location to receive read value - * - * Returns an error code on error. - **/ -s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) -{ - return ixgbe_call_func(hw, hw->link.ops.read_link_unlocked, - (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_i2c_byte - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @dev_addr: I2C bus address to write to - * @data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface - * at a specified device address. - **/ -s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, - u8 data) -{ - return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, - dev_addr, data), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @dev_addr: I2C bus address to write to - * @data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface - * at a specified device address. - **/ -s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) -{ - return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked, - (hw, byte_offset, dev_addr, data), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_link - Perform write operation on link device - * @hw: pointer to the hardware structure - * @addr: bus address to write to - * @reg: device register to write to - * @val: value to write - * - * Returns an error code on error. - */ -s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) -{ - return ixgbe_call_func(hw, hw->link.ops.write_link, - (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_link_unlocked - Perform write operation on link device - * @hw: pointer to the hardware structure - * @addr: bus address to write to - * @reg: device register to write to - * @val: value to write - * - * Returns an error code on error. - **/ -s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) -{ - return ixgbe_call_func(hw, hw->link.ops.write_link_unlocked, - (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to write - * @eeprom_data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, - u8 byte_offset, u8 eeprom_data) -{ - return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, - (hw, byte_offset, eeprom_data), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read - * @eeprom_data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) -{ - return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, - (hw, byte_offset, eeprom_data), - IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_get_supported_physical_layer - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current configuration. - **/ -u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, - (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); -} - -/** - * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics - * @hw: pointer to hardware structure - * @regval: bitfield to write to the Rx DMA register - * - * Enables the Rx DMA unit of the device. - **/ -s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) -{ - return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, - (hw, regval), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_disable_sec_rx_path - Stops the receive data path - * @hw: pointer to hardware structure - * - * Stops the receive data path. - **/ -s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path, - (hw), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_enable_sec_rx_path - Enables the receive data path - * @hw: pointer to hardware structure - * - * Enables the receive data path. - **/ -s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw) -{ - return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path, - (hw), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire - * - * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) - **/ -s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) -{ - return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, - (hw, mask), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_release_swfw_semaphore - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release - * - * Releases the SWFW semaphore through SW_FW_SYNC register for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) - **/ -void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) -{ - if (hw->mac.ops.release_swfw_sync) - hw->mac.ops.release_swfw_sync(hw, mask); -} - -/** - * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore - * @hw: pointer to hardware structure - * - * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register. - * Regardless of whether is succeeds or not it then release the semaphore. - * This is function is called to recover from catastrophic failures that - * may have left the semaphore locked. - **/ -void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.init_swfw_sync) - hw->mac.ops.init_swfw_sync(hw); -} - -void ixgbe_disable_rx(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.disable_rx) - hw->mac.ops.disable_rx(hw); -} - -void ixgbe_enable_rx(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.enable_rx) - hw->mac.ops.enable_rx(hw); -} - -/** - * ixgbe_set_rate_select_speed - Set module link speed - * @hw: pointer to hardware structure - * @speed: link speed to set - * - * Set module link speed via the rate select. - */ -void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) -{ - if (hw->mac.ops.set_rate_select_speed) - hw->mac.ops.set_rate_select_speed(hw, speed); -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h deleted file mode 100644 index 8016a49f2974..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_api.h +++ /dev/null @@ -1,213 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_API_H_ -#define _IXGBE_API_H_ - -#include "ixgbe_type.h" - -void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map); - -s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); - -extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); -extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); -extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); -extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw); -extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw); -extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw); -extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw); - -s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); -s32 ixgbe_init_hw(struct ixgbe_hw *hw); -s32 ixgbe_reset_hw(struct ixgbe_hw *hw); -s32 ixgbe_start_hw(struct ixgbe_hw *hw); -s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); -enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw); -s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); -s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); -u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); -u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); -s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); -s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); - -s32 ixgbe_identify_phy(struct ixgbe_hw *hw); -s32 ixgbe_reset_phy(struct ixgbe_hw *hw); -s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data); -s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 phy_data); - -s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); -s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw); -s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up); -s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on); -void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); -void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); -void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); -s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete); -s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *autoneg); -s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index); - -s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw); -s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data); -s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); -s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); - -s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); -s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); - -s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); -s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, - u32 enable_addr); -s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq); -s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); -u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); -s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, - u32 addr_count, ixgbe_mc_addr_itr func); -s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr func, - bool clear); -void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq); -s32 ixgbe_enable_mc(struct ixgbe_hw *hw); -s32 ixgbe_disable_mc(struct ixgbe_hw *hw); -s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); -s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on, bool vlvf_bypass); -s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, u32 *vfta_delta, u32 vfta, - bool vlvf_bypass); -s32 ixgbe_fc_enable(struct ixgbe_hw *hw); -s32 ixgbe_setup_fc(struct ixgbe_hw *hw); -s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, - u8 ver, u16 len, char *driver_ver); -s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw); -s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw); -void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); -s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, - u16 *firmware_version); -s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); -s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); -s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw); -s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); -u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval); -s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw); -s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw); -s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw); -s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, - bool cloud_mode); -void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue); -s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask, bool cloud_mode); -s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id, u8 queue, bool cloud_mode); -s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id); -s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - union ixgbe_atr_input *mask, - u16 soft_id, - u8 queue, - bool cloud_mode); -void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - union ixgbe_atr_input *mask); -u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common); -bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); -s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, - u8 *data); -s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); -s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); -s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, - u8 data); -void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue); -s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); -s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); -s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); -s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); -s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); -s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); -s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); -s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); -void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); -void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw); -s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix); -s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); -s32 ixgbe_dmac_config(struct ixgbe_hw *hw); -s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw); -s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw); -s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee); -void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, - unsigned int vf); -void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, - int vf); -s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *phy_data); -s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 phy_data); -void ixgbe_disable_mdd(struct ixgbe_hw *hw); -void ixgbe_enable_mdd(struct ixgbe_hw *hw); -void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap); -void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf); -s32 ixgbe_enter_lplu(struct ixgbe_hw *hw); -s32 ixgbe_handle_lasi(struct ixgbe_hw *hw); -void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); -void ixgbe_disable_rx(struct ixgbe_hw *hw); -void ixgbe_enable_rx(struct ixgbe_hw *hw); -s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); - -#endif /* _IXGBE_API_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c deleted file mode 100644 index 5f516296ac8e..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.c +++ /dev/null @@ -1,168 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -#include "ixgbe.h" -#include "ixgbe_cna.h" -#include "ixgbe_vmdq.h" - -static int ixgbe_cna_open(struct net_device *cnadev) -{ - struct ixgbe_adapter *adapter = netdev_priv(cnadev); - strcpy(cnadev->name, adapter->netdev->name); - DPRINTK(PROBE, INFO, "CNA pseudo device opened %s\n", cnadev->name); - return 0; -} - -static int ixgbe_cna_close(struct net_device *cnadev) -{ - struct ixgbe_adapter *adapter = netdev_priv(cnadev); - - DPRINTK(PROBE, INFO, "CNA pseudo device closed %s\n", cnadev->name); - return 0; -} - -static int ixgbe_cna_change_mtu(struct net_device *cnadev, int new_mtu) -{ - struct ixgbe_adapter *adapter = netdev_priv(cnadev); - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - - /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) - return -EINVAL; - - DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", - cnadev->mtu, new_mtu); - /* must set new MTU before calling down or up */ - cnadev->mtu = new_mtu; - - return 0; -} - -int ixgbe_cna_enable(struct ixgbe_adapter *adapter) -{ - struct net_device *cnadev; - struct net_device *netdev; - int err; - u64 wwpn; - u64 wwnn; - - netdev = adapter->netdev; - /* - * Oppositely to regular net device, CNA device doesn't have - * a private allocated region as we don't want to duplicate - * ixgbe_adapter information. Though, the CNA device still need - * to access the ixgbe_adapter while allocating queues or such. Thereby, - * cnadev->priv needs to point to netdev->priv. - */ - cnadev = alloc_etherdev_mq(0, MAX_TX_QUEUES); - if (!cnadev) { - err = -ENOMEM; - goto err_alloc_etherdev; - } - adapter->cnadev = cnadev; - SET_MODULE_OWNER(cnadev); - - cnadev->priv = adapter; - - cnadev->open = &ixgbe_cna_open; - cnadev->stop = &ixgbe_cna_close; - cnadev->change_mtu = &ixgbe_cna_change_mtu; - cnadev->do_ioctl = netdev->do_ioctl; - cnadev->hard_start_xmit = netdev->hard_start_xmit; -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) - cnadev->vlan_rx_register = netdev->vlan_rx_register; - cnadev->vlan_rx_add_vid = netdev->vlan_rx_add_vid; - cnadev->vlan_rx_kill_vid = netdev->vlan_rx_kill_vid; -#endif - ixgbe_set_ethtool_ops(cnadev); - -#if IS_ENABLED(CONFIG_DCB) - cnadev->dcbnl_ops = netdev->dcbnl_ops; -#endif /* CONFIG_DCB */ - - cnadev->mtu = netdev->mtu; - cnadev->pdev = netdev->pdev; - cnadev->gso_max_size = GSO_MAX_SIZE; - cnadev->features = netdev->features | NETIF_F_CNA | NETIF_F_HW_VLAN_FILTER; - - /* set the MAC address to SAN mac address */ - if (ixgbe_validate_mac_addr(adapter->hw.mac.san_addr) == 0) - memcpy(cnadev->dev_addr, - adapter->hw.mac.san_addr, - cnadev->addr_len); - - cnadev->features |= NETIF_F_FCOE_CRC | - NETIF_F_FCOE_MTU | - NETIF_F_FSO; - - cnadev->ndo_fcoe_ddp_setup = &ixgbe_fcoe_ddp_get; - cnadev->ndo_fcoe_ddp_done = &ixgbe_fcoe_ddp_put; - cnadev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; - - netif_carrier_off(cnadev); - netif_tx_stop_all_queues(cnadev); - - VMKNETDDI_REGISTER_QUEUEOPS(cnadev, ixgbe_netqueue_ops); - - err = register_netdev(cnadev); - if (err) - goto err_register; - - DPRINTK(PROBE, INFO, "CNA pseudo device registered %s\n", netdev->name); - - return err; - -err_register: - DPRINTK(PROBE, INFO, "CNA pseudo device cannot be registered %s\n", - netdev->name); - free_netdev(cnadev); -err_alloc_etherdev: - DPRINTK(PROBE, INFO, "CNA cannot be enabled on %s\n", netdev->name); - adapter->flags2 &= ~IXGBE_FLAG2_CNA_ENABLED; - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = 0; - return err; -} - -void ixgbe_cna_disable(struct ixgbe_adapter *adapter) -{ - if (!(adapter->flags2 & IXGBE_FLAG2_CNA_ENABLED)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_CNA_ENABLED; - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = 0; - - if (adapter->cnadev) { - unregister_netdev(adapter->cnadev); - DPRINTK(PROBE, INFO, "CNA pseudo device unregistered %s\n", - adapter->cnadev->name); - - free_netdev(adapter->cnadev); - adapter->cnadev = NULL; - } -} - -/* ixgbe_cna.c */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h deleted file mode 100644 index ee40480b1f44..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_cna.h +++ /dev/null @@ -1,31 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_CNA_H_ -#define _IXGBE_CNA_H_ - -int ixgbe_cna_enable(struct ixgbe_adapter *adapter); -void ixgbe_cna_disable(struct ixgbe_adapter *adapter); - -#endif /* _IXGBE_CNA_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c deleted file mode 100644 index 04369ba69f20..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.c +++ /dev/null @@ -1,5274 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_common.h" -#include "ixgbe_phy.h" -#include "ixgbe_dcb.h" -#include "ixgbe_dcb_82599.h" -#include "ixgbe_api.h" - -STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); -STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); -STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); -STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); -STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw); -STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, - u16 count); -STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); -STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); -STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); -STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw); - -STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); -STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, - u16 *san_mac_offset); -STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, - u16 offset); - -/** - * ixgbe_init_ops_generic - Inits function ptrs - * @hw: pointer to the hardware structure - * - * Initialize the function pointers. - **/ -s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - struct ixgbe_mac_info *mac = &hw->mac; - u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - - DEBUGFUNC("ixgbe_init_ops_generic"); - - /* EEPROM */ - eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; - /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ - if (eec & IXGBE_EEC_PRES) { - eeprom->ops.read = ixgbe_read_eerd_generic; - eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; - } else { - eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; - eeprom->ops.read_buffer = - ixgbe_read_eeprom_buffer_bit_bang_generic; - } - eeprom->ops.write = ixgbe_write_eeprom_generic; - eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; - eeprom->ops.validate_checksum = - ixgbe_validate_eeprom_checksum_generic; - eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; - eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; - - /* MAC */ - mac->ops.init_hw = ixgbe_init_hw_generic; - mac->ops.reset_hw = NULL; - mac->ops.start_hw = ixgbe_start_hw_generic; - mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; - mac->ops.get_media_type = NULL; - mac->ops.get_supported_physical_layer = NULL; - mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; - mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; - mac->ops.stop_adapter = ixgbe_stop_adapter_generic; - mac->ops.get_bus_info = ixgbe_get_bus_info_generic; - mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; - mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; - mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; - mac->ops.prot_autoc_read = prot_autoc_read_generic; - mac->ops.prot_autoc_write = prot_autoc_write_generic; - - /* LEDs */ - mac->ops.led_on = ixgbe_led_on_generic; - mac->ops.led_off = ixgbe_led_off_generic; - mac->ops.blink_led_start = ixgbe_blink_led_start_generic; - mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; - mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; - - /* RAR, Multicast, VLAN */ - mac->ops.set_rar = ixgbe_set_rar_generic; - mac->ops.clear_rar = ixgbe_clear_rar_generic; - mac->ops.insert_mac_addr = NULL; - mac->ops.set_vmdq = NULL; - mac->ops.clear_vmdq = NULL; - mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; - mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; - mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; - mac->ops.enable_mc = ixgbe_enable_mc_generic; - mac->ops.disable_mc = ixgbe_disable_mc_generic; - mac->ops.clear_vfta = NULL; - mac->ops.set_vfta = NULL; - mac->ops.set_vlvf = NULL; - mac->ops.init_uta_tables = NULL; - mac->ops.enable_rx = ixgbe_enable_rx_generic; - mac->ops.disable_rx = ixgbe_disable_rx_generic; - - /* Flow Control */ - mac->ops.fc_enable = ixgbe_fc_enable_generic; - mac->ops.setup_fc = ixgbe_setup_fc_generic; - mac->ops.fc_autoneg = ixgbe_fc_autoneg; - - /* Link */ - mac->ops.get_link_capabilities = NULL; - mac->ops.setup_link = NULL; - mac->ops.check_link = NULL; - mac->ops.dmac_config = NULL; - mac->ops.dmac_update_tcs = NULL; - mac->ops.dmac_config_tcs = NULL; - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation - * of flow control - * @hw: pointer to hardware structure - * - * This function returns true if the device supports flow control - * autonegotiation, and false if it does not. - * - **/ -bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) -{ - bool supported = false; - ixgbe_link_speed speed; - bool link_up; - - DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); - - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber_qsfp: - case ixgbe_media_type_fiber: - /* flow control autoneg black list */ - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_SFP: - case IXGBE_DEV_ID_X550EM_A_SFP_N: - case IXGBE_DEV_ID_X550EM_A_QSFP: - case IXGBE_DEV_ID_X550EM_A_QSFP_N: - supported = false; - break; - default: - hw->mac.ops.check_link(hw, &speed, &link_up, false); - /* if link is down, assume supported */ - if (link_up) - supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? - true : false; - else - supported = true; - } - - break; - case ixgbe_media_type_backplane: - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) - supported = false; - else - supported = true; - break; - case ixgbe_media_type_copper: - /* only some copper devices support flow control autoneg */ - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_T3_LOM: - case IXGBE_DEV_ID_X540T: - case IXGBE_DEV_ID_X540T1: - case IXGBE_DEV_ID_X550T: - case IXGBE_DEV_ID_X550T1: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - supported = true; - break; - default: - supported = false; - } - default: - break; - } - - if (!supported) - ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, - "Device %x does not support flow control autoneg", - hw->device_id); - return supported; -} - -/** - * ixgbe_setup_fc_generic - Set up flow control - * @hw: pointer to hardware structure - * - * Called at init time to set up flow control. - **/ -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_SUCCESS; - u32 reg = 0, reg_bp = 0; - u16 reg_cu = 0; - bool locked = false; - - DEBUGFUNC("ixgbe_setup_fc_generic"); - - /* Validate the requested mode */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, - "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* - * 10gig parts do not have a word in the EEPROM to determine the - * default flow control setting, so we explicitly set it to full. - */ - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; - - /* - * Set up the 1G and 10G flow control advertisement registers so the - * HW will be able to do fc autoneg once the cable is plugged in. If - * we link at 10G, the 1G advertisement is harmless and vice versa. - */ - switch (hw->phy.media_type) { - case ixgbe_media_type_backplane: - /* some MAC's need RMW protection on AUTOC */ - ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); - if (ret_val != IXGBE_SUCCESS) - goto out; - - /* fall through - only backplane uses autoc */ - case ixgbe_media_type_fiber_qsfp: - case ixgbe_media_type_fiber: - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - - break; - case ixgbe_media_type_copper: - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); - break; - default: - break; - } - - /* - * The possible values of fc.requested_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - * other: Invalid. - */ - switch (hw->fc.requested_mode) { - case ixgbe_fc_none: - /* Flow control completely disabled by software override. */ - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) - reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | - IXGBE_AUTOC_ASM_PAUSE); - else if (hw->phy.media_type == ixgbe_media_type_copper) - reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - reg |= IXGBE_PCS1GANA_ASM_PAUSE; - reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; - if (hw->phy.media_type == ixgbe_media_type_backplane) { - reg_bp |= IXGBE_AUTOC_ASM_PAUSE; - reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; - } else if (hw->phy.media_type == ixgbe_media_type_copper) { - reg_cu |= IXGBE_TAF_ASM_PAUSE; - reg_cu &= ~IXGBE_TAF_SYM_PAUSE; - } - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE, as such we fall - * through to the fc_full statement. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; - if (hw->phy.media_type == ixgbe_media_type_backplane) - reg_bp |= IXGBE_AUTOC_SYM_PAUSE | - IXGBE_AUTOC_ASM_PAUSE; - else if (hw->phy.media_type == ixgbe_media_type_copper) - reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; - break; - default: - ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, - "Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - break; - } - - if (hw->mac.type < ixgbe_mac_X540) { - /* - * Enable auto-negotiation between the MAC & PHY; - * the MAC will advertise clause 37 flow control. - */ - IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - - /* Disable AN timeout */ - if (hw->fc.strict_ieee) - reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; - - IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); - DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); - } - - /* - * AUTOC restart handles negotiation of 1G and 10G on backplane - * and copper. There is no need to set the PCS1GCTL register. - * - */ - if (hw->phy.media_type == ixgbe_media_type_backplane) { - reg_bp |= IXGBE_AUTOC_AN_RESTART; - ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); - if (ret_val) - goto out; - } else if ((hw->phy.media_type == ixgbe_media_type_copper) && - (ixgbe_device_supports_autoneg_fc(hw))) { - hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); - } - - DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); -out: - return ret_val; -} - -/** - * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware by filling the bus info structure and media type, clears - * all on chip counters, initializes receive address registers, multicast - * table, VLAN filter table, calls routine to set up link and flow control - * settings, and leaves transmit and receive units disabled and uninitialized - **/ -s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) -{ - s32 ret_val; - u32 ctrl_ext; - u16 device_caps; - s32 rc; - u16 regVal=0; - - DEBUGFUNC("ixgbe_start_hw_generic"); - - /* Set the media type */ - hw->phy.media_type = hw->mac.ops.get_media_type(hw); - - /* PHY ops initialization must be done in reset_hw() */ - - /* Clear the VLAN filter table */ - hw->mac.ops.clear_vfta(hw); - - /* Clear statistics registers */ - hw->mac.ops.clear_hw_cntrs(hw); - - /* Set No Snoop Disable */ - ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); - ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; - IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); - IXGBE_WRITE_FLUSH(hw); - - /* Setup flow control */ - ret_val = ixgbe_setup_fc(hw); - if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { - DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val); - return ret_val; - } - - /* Cache bit indicating need for crosstalk fix */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - hw->mac.ops.get_device_caps(hw, &device_caps); - if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) - hw->need_crosstalk_fix = false; - else - hw->need_crosstalk_fix = true; - break; - default: - hw->need_crosstalk_fix = false; - break; - } - - /* Clear adapter stopped flag */ - hw->adapter_stopped = false; - -#if 1 /* To modify speed LED polarity and configure led on only for speed 1G in M88E1512 - * for Porsche2 platform. - * From 88E1512 datasheet: - * Page register: 0x16 - * LED functon control register: 0x10 in page 3 - * LED polarity control register: 0x11 in page 3 - */ - - if (hw->mac.type == ixgbe_mac_X550EM_a && - (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { - /* For M88E1512, to select page 3 in register 0x16 */ - regVal = 0x03; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "page register write failed, rc:%x\n", rc); - } -#if 0 //for debug - /* For M88E1512, read from register 0x16 */ - regVal = 0x00; - rc = hw->phy.ops.read_reg(hw, 0x16, MDIO_MMD_PMAPMD, ®Val); - if (rc) { - hw_err(hw, "phy register read failed, rc:%x\n", rc); - } - hw_err(hw, "####read phy register 0x16 again, value:%x\n", regVal); -#endif - /* For M88E1512, read from page 3, register 0x11 */ - regVal = 0x00; - rc = hw->phy.ops.read_reg(hw, 0x11, MDIO_MMD_PMAPMD, ®Val); - if (rc) { - hw_err(hw, "led polarity register read failed, rc:%x\n", rc); - } - - /* For M88E1512, write to page 3 register 0x11 with polarity bit set */ - regVal |= 0x01; - rc = hw->phy.ops.write_reg(hw, 0x11, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "led polarity register write failed, rc:%x\n", rc); - } - - /* For M88E1512, read from page 3, register 16 */ - regVal = 0x00; - rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); - if (rc) { - hw_err(hw, "led function control register read failed, rc:%x\n", rc); - } - - /* For M88E1512, write to page 3 register 16 with only 1000M led on */ - regVal = (regVal & 0xFFF0) | 0x0007; - rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "led function control register write failed, rc:%x\n", rc); - } - - /* For M88E1512, write page 22 back to default 0 */ - regVal = 0x00; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "page register write failed, rc:%x\n", rc); - } - } -#endif - return IXGBE_SUCCESS; -} - -/** - * ixgbe_start_hw_gen2 - Init sequence for common device family - * @hw: pointer to hw structure - * - * Performs the init sequence common to the second generation - * of 10 GbE devices. - * Devices in the second generation: - * 82599 - * X540 - **/ -s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) -{ - u32 i; - u32 regval; - - /* Clear the rate limiters */ - for (i = 0; i < hw->mac.max_tx_queues; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); - } - IXGBE_WRITE_FLUSH(hw); - - /* Disable relaxed ordering */ - for (i = 0; i < hw->mac.max_tx_queues; i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); - regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); - } - - for (i = 0; i < hw->mac.max_rx_queues; i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | - IXGBE_DCA_RXCTRL_HEAD_WRO_EN); - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_init_hw_generic - Generic hardware initialization - * @hw: pointer to hardware structure - * - * Initialize the hardware by resetting the hardware, filling the bus info - * structure and media type, clears all on chip counters, initializes receive - * address registers, multicast table, VLAN filter table, calls routine to set - * up link and flow control settings, and leaves transmit and receive units - * disabled and uninitialized - **/ -s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) -{ - s32 status; - - DEBUGFUNC("ixgbe_init_hw_generic"); - - /* Reset the hardware */ - status = hw->mac.ops.reset_hw(hw); - - if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) { - /* Start the HW */ - status = hw->mac.ops.start_hw(hw); - } - - /* Initialize the LED link active for LED blink support */ - if (hw->mac.ops.init_led_link_act) - hw->mac.ops.init_led_link_act(hw); - - if (status != IXGBE_SUCCESS) - DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); - - return status; -} - -/** - * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters - * @hw: pointer to hardware structure - * - * Clears all hardware statistics counters by reading them from the hardware - * Statistics counters are clear on read. - **/ -s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) -{ - u16 i = 0; - - DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); - - IXGBE_READ_REG(hw, IXGBE_CRCERRS); - IXGBE_READ_REG(hw, IXGBE_ILLERRC); - IXGBE_READ_REG(hw, IXGBE_ERRBC); - IXGBE_READ_REG(hw, IXGBE_MSPDC); - for (i = 0; i < 8; i++) - IXGBE_READ_REG(hw, IXGBE_MPC(i)); - - IXGBE_READ_REG(hw, IXGBE_MLFC); - IXGBE_READ_REG(hw, IXGBE_MRFC); - IXGBE_READ_REG(hw, IXGBE_RLEC); - IXGBE_READ_REG(hw, IXGBE_LXONTXC); - IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); - if (hw->mac.type >= ixgbe_mac_82599EB) { - IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); - IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); - } else { - IXGBE_READ_REG(hw, IXGBE_LXONRXC); - IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); - } - - for (i = 0; i < 8; i++) { - IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); - IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); - if (hw->mac.type >= ixgbe_mac_82599EB) { - IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); - IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); - } else { - IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); - IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); - } - } - if (hw->mac.type >= ixgbe_mac_82599EB) - for (i = 0; i < 8; i++) - IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); - IXGBE_READ_REG(hw, IXGBE_PRC64); - IXGBE_READ_REG(hw, IXGBE_PRC127); - IXGBE_READ_REG(hw, IXGBE_PRC255); - IXGBE_READ_REG(hw, IXGBE_PRC511); - IXGBE_READ_REG(hw, IXGBE_PRC1023); - IXGBE_READ_REG(hw, IXGBE_PRC1522); - IXGBE_READ_REG(hw, IXGBE_GPRC); - IXGBE_READ_REG(hw, IXGBE_BPRC); - IXGBE_READ_REG(hw, IXGBE_MPRC); - IXGBE_READ_REG(hw, IXGBE_GPTC); - IXGBE_READ_REG(hw, IXGBE_GORCL); - IXGBE_READ_REG(hw, IXGBE_GORCH); - IXGBE_READ_REG(hw, IXGBE_GOTCL); - IXGBE_READ_REG(hw, IXGBE_GOTCH); - if (hw->mac.type == ixgbe_mac_82598EB) - for (i = 0; i < 8; i++) - IXGBE_READ_REG(hw, IXGBE_RNBC(i)); - IXGBE_READ_REG(hw, IXGBE_RUC); - IXGBE_READ_REG(hw, IXGBE_RFC); - IXGBE_READ_REG(hw, IXGBE_ROC); - IXGBE_READ_REG(hw, IXGBE_RJC); - IXGBE_READ_REG(hw, IXGBE_MNGPRC); - IXGBE_READ_REG(hw, IXGBE_MNGPDC); - IXGBE_READ_REG(hw, IXGBE_MNGPTC); - IXGBE_READ_REG(hw, IXGBE_TORL); - IXGBE_READ_REG(hw, IXGBE_TORH); - IXGBE_READ_REG(hw, IXGBE_TPR); - IXGBE_READ_REG(hw, IXGBE_TPT); - IXGBE_READ_REG(hw, IXGBE_PTC64); - IXGBE_READ_REG(hw, IXGBE_PTC127); - IXGBE_READ_REG(hw, IXGBE_PTC255); - IXGBE_READ_REG(hw, IXGBE_PTC511); - IXGBE_READ_REG(hw, IXGBE_PTC1023); - IXGBE_READ_REG(hw, IXGBE_PTC1522); - IXGBE_READ_REG(hw, IXGBE_MPTC); - IXGBE_READ_REG(hw, IXGBE_BPTC); - for (i = 0; i < 16; i++) { - IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - IXGBE_READ_REG(hw, IXGBE_QPTC(i)); - if (hw->mac.type >= ixgbe_mac_82599EB) { - IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); - IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); - IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); - IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); - IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); - } else { - IXGBE_READ_REG(hw, IXGBE_QBRC(i)); - IXGBE_READ_REG(hw, IXGBE_QBTC(i)); - } - } - - if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { - if (hw->phy.id == 0) - ixgbe_identify_phy(hw); - hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, - IXGBE_MDIO_PCS_DEV_TYPE, &i); - hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, - IXGBE_MDIO_PCS_DEV_TYPE, &i); - hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, - IXGBE_MDIO_PCS_DEV_TYPE, &i); - hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, - IXGBE_MDIO_PCS_DEV_TYPE, &i); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_read_pba_string_generic - Reads part number string from EEPROM - * @hw: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, - u32 pba_num_size) -{ - s32 ret_val; - u16 data; - u16 pba_ptr; - u16 offset; - u16 length; - - DEBUGFUNC("ixgbe_read_pba_string_generic"); - - if (pba_num == NULL) { - DEBUGOUT("PBA string buffer was null\n"); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); - if (ret_val) { - DEBUGOUT("NVM Read Error\n"); - return ret_val; - } - - ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); - if (ret_val) { - DEBUGOUT("NVM Read Error\n"); - return ret_val; - } - - /* - * if data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (data != IXGBE_PBANUM_PTR_GUARD) { - DEBUGOUT("NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - DEBUGOUT("PBA string buffer too small\n"); - return IXGBE_ERR_NO_SPACE; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (data >> 12) & 0xF; - pba_num[1] = (data >> 8) & 0xF; - pba_num[2] = (data >> 4) & 0xF; - pba_num[3] = data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - return IXGBE_SUCCESS; - } - - ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); - if (ret_val) { - DEBUGOUT("NVM Read Error\n"); - return ret_val; - } - - if (length == 0xFFFF || length == 0) { - DEBUGOUT("NVM PBA number section invalid length\n"); - return IXGBE_ERR_PBA_SECTION; - } - - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - DEBUGOUT("PBA string buffer too small\n"); - return IXGBE_ERR_NO_SPACE; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); - if (ret_val) { - DEBUGOUT("NVM Read Error\n"); - return ret_val; - } - pba_num[offset * 2] = (u8)(data >> 8); - pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); - } - pba_num[offset * 2] = '\0'; - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_get_mac_addr_generic - Generic get MAC address - * @hw: pointer to hardware structure - * @mac_addr: Adapter MAC address - * - * Reads the adapter's MAC address from first Receive Address Register (RAR0) - * A reset of the adapter must be performed prior to calling this function - * in order for the MAC address to have been loaded from the EEPROM into RAR0 - **/ -s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) -{ - u32 rar_high; - u32 rar_low; - u16 i; - - DEBUGFUNC("ixgbe_get_mac_addr_generic"); - - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); - rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); - - for (i = 0; i < 4; i++) - mac_addr[i] = (u8)(rar_low >> (i*8)); - - for (i = 0; i < 2; i++) - mac_addr[i+4] = (u8)(rar_high >> (i*8)); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_set_pci_config_data_generic - Generic store PCI bus info - * @hw: pointer to hardware structure - * @link_status: the link status returned by the PCI config space - * - * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure - **/ -void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - if (hw->bus.type == ixgbe_bus_type_unknown) - hw->bus.type = ixgbe_bus_type_pci_express; - - switch (link_status & IXGBE_PCI_LINK_WIDTH) { - case IXGBE_PCI_LINK_WIDTH_1: - hw->bus.width = ixgbe_bus_width_pcie_x1; - break; - case IXGBE_PCI_LINK_WIDTH_2: - hw->bus.width = ixgbe_bus_width_pcie_x2; - break; - case IXGBE_PCI_LINK_WIDTH_4: - hw->bus.width = ixgbe_bus_width_pcie_x4; - break; - case IXGBE_PCI_LINK_WIDTH_8: - hw->bus.width = ixgbe_bus_width_pcie_x8; - break; - default: - hw->bus.width = ixgbe_bus_width_unknown; - break; - } - - switch (link_status & IXGBE_PCI_LINK_SPEED) { - case IXGBE_PCI_LINK_SPEED_2500: - hw->bus.speed = ixgbe_bus_speed_2500; - break; - case IXGBE_PCI_LINK_SPEED_5000: - hw->bus.speed = ixgbe_bus_speed_5000; - break; - case IXGBE_PCI_LINK_SPEED_8000: - hw->bus.speed = ixgbe_bus_speed_8000; - break; - default: - hw->bus.speed = ixgbe_bus_speed_unknown; - break; - } - - mac->ops.set_lan_id(hw); -} - -/** - * ixgbe_get_bus_info_generic - Generic set PCI bus info - * @hw: pointer to hardware structure - * - * Gets the PCI bus info (speed, width, type) then calls helper function to - * store this data within the ixgbe_hw structure. - **/ -s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) -{ - u16 link_status; - - DEBUGFUNC("ixgbe_get_bus_info_generic"); - - /* Get the negotiated link width and speed from PCI config space */ - link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); - - ixgbe_set_pci_config_data_generic(hw, link_status); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices - * @hw: pointer to the HW structure - * - * Determines the LAN function id by reading memory-mapped registers and swaps - * the port value if requested, and set MAC instance for devices that share - * CS4227. - **/ -void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) -{ - struct ixgbe_bus_info *bus = &hw->bus; - u32 reg; - u16 ee_ctrl_4; - - DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); - - reg = IXGBE_READ_REG(hw, IXGBE_STATUS); - bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; - bus->lan_id = (u8)bus->func; - - /* check for a port swap */ - reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); - if (reg & IXGBE_FACTPS_LFS) - bus->func ^= 0x1; - - /* Get MAC instance from EEPROM for configuring CS4227 */ - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); - bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> - IXGBE_EE_CTRL_4_INST_ID_SHIFT; - } -} - -/** - * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units - * @hw: pointer to hardware structure - * - * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, - * disables transmit and receive units. The adapter_stopped flag is used by - * the shared code and drivers to determine if the adapter is in a stopped - * state and should not touch the hardware. - **/ -s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) -{ - u32 reg_val; - u16 i; - - DEBUGFUNC("ixgbe_stop_adapter_generic"); - - /* - * Set the adapter_stopped flag so other driver functions stop touching - * the hardware - */ - hw->adapter_stopped = true; - - /* Disable the receive unit */ - ixgbe_disable_rx(hw); - - /* Clear interrupt mask to stop interrupts from being generated */ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); - - /* Clear any pending interrupts, flush previous writes */ - IXGBE_READ_REG(hw, IXGBE_EICR); - - /* Disable the transmit unit. Each queue must be disabled. */ - for (i = 0; i < hw->mac.max_tx_queues; i++) - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); - - /* Disable the receive unit by stopping each queue */ - for (i = 0; i < hw->mac.max_rx_queues; i++) { - reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); - reg_val &= ~IXGBE_RXDCTL_ENABLE; - reg_val |= IXGBE_RXDCTL_SWFLSH; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); - } - - /* flush all queues disables */ - IXGBE_WRITE_FLUSH(hw); - msec_delay(2); - - /* - * Prevent the PCI-E bus from hanging by disabling PCI-E master - * access and verify no pending requests - */ - return ixgbe_disable_pcie_master(hw); -} - -/** - * ixgbe_init_led_link_act_generic - Store the LED index link/activity. - * @hw: pointer to hardware structure - * - * Store the index for the link active LED. This will be used to support - * blinking the LED. - **/ -s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - u32 led_reg, led_mode; - u8 i; - - led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - /* Get LED link active from the LEDCTL register */ - for (i = 0; i < 4; i++) { - led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); - - if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == - IXGBE_LED_LINK_ACTIVE) { - mac->led_link_act = i; - return IXGBE_SUCCESS; - } - } - - /* - * If LEDCTL register does not have the LED link active set, then use - * known MAC defaults. - */ - switch (hw->mac.type) { - case ixgbe_mac_X550EM_a: - case ixgbe_mac_X550EM_x: - mac->led_link_act = 1; - break; - default: - mac->led_link_act = 2; - } - return IXGBE_SUCCESS; -} - -/** - * ixgbe_led_on_generic - Turns on the software controllable LEDs. - * @hw: pointer to hardware structure - * @index: led number to turn on - **/ -s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - s32 rc; - u16 regVal; - - DEBUGFUNC("ixgbe_led_on_generic"); - if (hw->mac.type == ixgbe_mac_X550EM_a) { - /* For M88E1512, to select page 3 in register 22 */ - regVal = 0x03; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "page register write failed, rc:%x\n", rc); - } - - /* For M88E1512, read from page 3, register 16 */ - regVal = 0x00; - rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); - if (rc) { - hw_err(hw, "led function control register read failed, rc:%x\n", rc); - } - - /* For M88E1512, write to page 3 register 16 with force led on */ - regVal = (regVal & 0xFF00) | 0x0099; - rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "led function control register write failed, rc:%x\n", rc); - } - - /* For M88E1512, write page 22 back to default 0 */ - regVal = 0x00; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "page register write failed, rc:%x\n", rc); - } - } - else - { - if (index > 3) - return IXGBE_ERR_PARAM; - - /* To turn on the LED, set mode to ON. */ - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_led_off_generic - Turns off the software controllable LEDs. - * @hw: pointer to hardware structure - * @index: led number to turn off - **/ -s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - s32 rc; - u16 regVal; - - DEBUGFUNC("ixgbe_led_off_generic"); - - if (hw->mac.type == ixgbe_mac_X550EM_a) { - /* For M88E1512, to select page 3 in register 22 */ - regVal = 0x03; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "page register write failed, rc:%x\n", rc); - } - - /* For M88E1512, read from page 3, register 16 */ - regVal = 0x00; - rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); - if (rc) { - hw_err(hw, "led function control register read failed, rc:%x\n", rc); - } - - /* For M88E1512, write to page 3 register 16 with force led on */ - regVal = (regVal & 0xFF00) | 0x0088; - rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "led function control register write failed, rc:%x\n", rc); - } - - /* For M88E1512, write page 22 back to default 0 */ - regVal = 0x00; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "page register write failed, rc:%x\n", rc); - } - } - else - { - if (index > 3) - return IXGBE_ERR_PARAM; - - /* To turn off the LED, set mode to OFF. */ - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - } - return IXGBE_SUCCESS; -} - -/** - * ixgbe_init_eeprom_params_generic - Initialize EEPROM params - * @hw: pointer to hardware structure - * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. - **/ -s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - u32 eec; - u16 eeprom_size; - - DEBUGFUNC("ixgbe_init_eeprom_params_generic"); - - if (eeprom->type == ixgbe_eeprom_uninitialized) { - eeprom->type = ixgbe_eeprom_none; - /* Set default semaphore delay to 10ms which is a well - * tested value */ - eeprom->semaphore_delay = 10; - /* Clear EEPROM page size, it will be initialized as needed */ - eeprom->word_page_size = 0; - - /* - * Check for EEPROM present first. - * If not present leave as none - */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - if (eec & IXGBE_EEC_PRES) { - eeprom->type = ixgbe_eeprom_spi; - - /* - * SPI EEPROM is assumed here. This code would need to - * change if a future EEPROM is not SPI. - */ - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); - } - - if (eec & IXGBE_EEC_ADDR_SIZE) - eeprom->address_bits = 16; - else - eeprom->address_bits = 8; - DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " - "%d\n", eeprom->type, eeprom->word_size, - eeprom->address_bits); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to write - * @words: number of word(s) - * @data: 16 bit word(s) to write to EEPROM - * - * Reads 16 bit word(s) from EEPROM through bit-bang method - **/ -s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - s32 status = IXGBE_SUCCESS; - u16 i, count; - - DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); - - hw->eeprom.ops.init_params(hw); - - if (words == 0) { - status = IXGBE_ERR_INVALID_ARGUMENT; - goto out; - } - - if (offset + words > hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - /* - * The EEPROM page size cannot be queried from the chip. We do lazy - * initialization. It is worth to do that when we write large buffer. - */ - if ((hw->eeprom.word_page_size == 0) && - (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) - ixgbe_detect_eeprom_page_size_generic(hw, offset); - - /* - * We cannot hold synchronization semaphores for too long - * to avoid other entity starvation. However it is more efficient - * to read in bursts than synchronizing access for each word. - */ - for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { - count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? - IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); - status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, - count, &data[i]); - - if (status != IXGBE_SUCCESS) - break; - } - -out: - return status; -} - -/** - * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be written to - * @words: number of word(s) - * @data: 16 bit word(s) to be written to the EEPROM - * - * If ixgbe_eeprom_update_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. - **/ -STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - s32 status; - u16 word; - u16 page_size; - u16 i; - u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; - - DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); - - /* Prepare the EEPROM for writing */ - status = ixgbe_acquire_eeprom(hw); - - if (status == IXGBE_SUCCESS) { - if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { - ixgbe_release_eeprom(hw); - status = IXGBE_ERR_EEPROM; - } - } - - if (status == IXGBE_SUCCESS) { - for (i = 0; i < words; i++) { - ixgbe_standby_eeprom(hw); - - /* Send the WRITE ENABLE command (8 bit opcode ) */ - ixgbe_shift_out_eeprom_bits(hw, - IXGBE_EEPROM_WREN_OPCODE_SPI, - IXGBE_EEPROM_OPCODE_BITS); - - ixgbe_standby_eeprom(hw); - - /* - * Some SPI eeproms use the 8th address bit embedded - * in the opcode - */ - if ((hw->eeprom.address_bits == 8) && - ((offset + i) >= 128)) - write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ - ixgbe_shift_out_eeprom_bits(hw, write_opcode, - IXGBE_EEPROM_OPCODE_BITS); - ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), - hw->eeprom.address_bits); - - page_size = hw->eeprom.word_page_size; - - /* Send the data in burst via SPI*/ - do { - word = data[i]; - word = (word >> 8) | (word << 8); - ixgbe_shift_out_eeprom_bits(hw, word, 16); - - if (page_size == 0) - break; - - /* do not wrap around page */ - if (((offset + i) & (page_size - 1)) == - (page_size - 1)) - break; - } while (++i < words); - - ixgbe_standby_eeprom(hw); - msec_delay(10); - } - /* Done with writing - release the EEPROM */ - ixgbe_release_eeprom(hw); - } - - return status; -} - -/** - * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be written to - * @data: 16 bit word to be written to the EEPROM - * - * If ixgbe_eeprom_update_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. - **/ -s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - s32 status; - - DEBUGFUNC("ixgbe_write_eeprom_generic"); - - hw->eeprom.ops.init_params(hw); - - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); - -out: - return status; -} - -/** - * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @data: read 16 bit words(s) from EEPROM - * @words: number of word(s) - * - * Reads 16 bit word(s) from EEPROM through bit-bang method - **/ -s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - s32 status = IXGBE_SUCCESS; - u16 i, count; - - DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); - - hw->eeprom.ops.init_params(hw); - - if (words == 0) { - status = IXGBE_ERR_INVALID_ARGUMENT; - goto out; - } - - if (offset + words > hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - /* - * We cannot hold synchronization semaphores for too long - * to avoid other entity starvation. However it is more efficient - * to read in bursts than synchronizing access for each word. - */ - for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { - count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? - IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); - - status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, - count, &data[i]); - - if (status != IXGBE_SUCCESS) - break; - } - -out: - return status; -} - -/** - * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @words: number of word(s) - * @data: read 16 bit word(s) from EEPROM - * - * Reads 16 bit word(s) from EEPROM through bit-bang method - **/ -STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - s32 status; - u16 word_in; - u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; - u16 i; - - DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); - - /* Prepare the EEPROM for reading */ - status = ixgbe_acquire_eeprom(hw); - - if (status == IXGBE_SUCCESS) { - if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { - ixgbe_release_eeprom(hw); - status = IXGBE_ERR_EEPROM; - } - } - - if (status == IXGBE_SUCCESS) { - for (i = 0; i < words; i++) { - ixgbe_standby_eeprom(hw); - /* - * Some SPI eeproms use the 8th address bit embedded - * in the opcode - */ - if ((hw->eeprom.address_bits == 8) && - ((offset + i) >= 128)) - read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; - - /* Send the READ command (opcode + addr) */ - ixgbe_shift_out_eeprom_bits(hw, read_opcode, - IXGBE_EEPROM_OPCODE_BITS); - ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), - hw->eeprom.address_bits); - - /* Read the data. */ - word_in = ixgbe_shift_in_eeprom_bits(hw, 16); - data[i] = (word_in >> 8) | (word_in << 8); - } - - /* End this read operation */ - ixgbe_release_eeprom(hw); - } - - return status; -} - -/** - * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @data: read 16 bit value from EEPROM - * - * Reads 16 bit value from EEPROM through bit-bang method - **/ -s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 *data) -{ - s32 status; - - DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); - - hw->eeprom.ops.init_params(hw); - - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); - -out: - return status; -} - -/** - * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of word(s) - * @data: 16 bit word(s) from the EEPROM - * - * Reads a 16 bit word(s) from the EEPROM using the EERD register. - **/ -s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - u32 eerd; - s32 status = IXGBE_SUCCESS; - u32 i; - - DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); - - hw->eeprom.ops.init_params(hw); - - if (words == 0) { - status = IXGBE_ERR_INVALID_ARGUMENT; - ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); - goto out; - } - - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); - goto out; - } - - for (i = 0; i < words; i++) { - eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | - IXGBE_EEPROM_RW_REG_START; - - IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); - - if (status == IXGBE_SUCCESS) { - data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> - IXGBE_EEPROM_RW_REG_DATA); - } else { - DEBUGOUT("Eeprom read timed out\n"); - goto out; - } - } -out: - return status; -} - -/** - * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be used as a scratch pad - * - * Discover EEPROM page size by writing marching data at given offset. - * This function is called only when we are writing a new large buffer - * at given offset so the data would be overwritten anyway. - **/ -STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, - u16 offset) -{ - u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; - s32 status = IXGBE_SUCCESS; - u16 i; - - DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); - - for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) - data[i] = i; - - hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; - status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, - IXGBE_EEPROM_PAGE_SIZE_MAX, data); - hw->eeprom.word_page_size = 0; - if (status != IXGBE_SUCCESS) - goto out; - - status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); - if (status != IXGBE_SUCCESS) - goto out; - - /* - * When writing in burst more than the actual page size - * EEPROM address wraps around current page. - */ - hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; - - DEBUGOUT1("Detected EEPROM page size = %d words.", - hw->eeprom.word_page_size); -out: - return status; -} - -/** - * ixgbe_read_eerd_generic - Read EEPROM word using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the EERD register. - **/ -s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) -{ - return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); -} - -/** - * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of word(s) - * @data: word(s) write to the EEPROM - * - * Write a 16 bit word(s) to the EEPROM using the EEWR register. - **/ -s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - u32 eewr; - s32 status = IXGBE_SUCCESS; - u16 i; - - DEBUGFUNC("ixgbe_write_eewr_generic"); - - hw->eeprom.ops.init_params(hw); - - if (words == 0) { - status = IXGBE_ERR_INVALID_ARGUMENT; - ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); - goto out; - } - - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); - goto out; - } - - for (i = 0; i < words; i++) { - eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | - (data[i] << IXGBE_EEPROM_RW_REG_DATA) | - IXGBE_EEPROM_RW_REG_START; - - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); - if (status != IXGBE_SUCCESS) { - DEBUGOUT("Eeprom write EEWR timed out\n"); - goto out; - } - - IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); - - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); - if (status != IXGBE_SUCCESS) { - DEBUGOUT("Eeprom write EEWR timed out\n"); - goto out; - } - } - -out: - return status; -} - -/** - * ixgbe_write_eewr_generic - Write EEPROM word using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the EEWR register. - **/ -s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); -} - -/** - * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status - * @hw: pointer to hardware structure - * @ee_reg: EEPROM flag for polling - * - * Polls the status bit (bit 1) of the EERD or EEWR to determine when the - * read or write is done respectively. - **/ -s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) -{ - u32 i; - u32 reg; - s32 status = IXGBE_ERR_EEPROM; - - DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); - - for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { - if (ee_reg == IXGBE_NVM_POLL_READ) - reg = IXGBE_READ_REG(hw, IXGBE_EERD); - else - reg = IXGBE_READ_REG(hw, IXGBE_EEWR); - - if (reg & IXGBE_EEPROM_RW_REG_DONE) { - status = IXGBE_SUCCESS; - break; - } - usec_delay(5); - } - - if (i == IXGBE_EERD_EEWR_ATTEMPTS) - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "EEPROM read/write done polling timed out"); - - return status; -} - -/** - * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang - * @hw: pointer to hardware structure - * - * Prepares EEPROM for access using bit-bang method. This function should - * be called before issuing a command to the EEPROM. - **/ -STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u32 eec; - u32 i; - - DEBUGFUNC("ixgbe_acquire_eeprom"); - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) - != IXGBE_SUCCESS) - status = IXGBE_ERR_SWFW_SYNC; - - if (status == IXGBE_SUCCESS) { - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - - /* Request EEPROM Access */ - eec |= IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - - for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - if (eec & IXGBE_EEC_GNT) - break; - usec_delay(5); - } - - /* Release if grant not acquired */ - if (!(eec & IXGBE_EEC_GNT)) { - eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - DEBUGOUT("Could not acquire EEPROM grant\n"); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - status = IXGBE_ERR_EEPROM; - } - - /* Setup EEPROM for Read/Write */ - if (status == IXGBE_SUCCESS) { - /* Clear CS and SK */ - eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - IXGBE_WRITE_FLUSH(hw); - usec_delay(1); - } - } - return status; -} - -/** - * ixgbe_get_eeprom_semaphore - Get hardware semaphore - * @hw: pointer to hardware structure - * - * Sets the hardware semaphores so EEPROM access can occur for bit-bang method - **/ -STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_EEPROM; - u32 timeout = 2000; - u32 i; - u32 swsm; - - DEBUGFUNC("ixgbe_get_eeprom_semaphore"); - - /* Get SMBI software semaphore between device drivers first */ - for (i = 0; i < timeout; i++) { - /* - * If the SMBI bit is 0 when we read it, then the bit will be - * set and we have the semaphore - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); - if (!(swsm & IXGBE_SWSM_SMBI)) { - status = IXGBE_SUCCESS; - break; - } - usec_delay(50); - } - - if (i == timeout) { - DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " - "not granted.\n"); - /* - * this release is particularly important because our attempts - * above to get the semaphore may have succeeded, and if there - * was a timeout, we should unconditionally clear the semaphore - * bits to free the driver to make progress - */ - ixgbe_release_eeprom_semaphore(hw); - - usec_delay(50); - /* - * one last try - * If the SMBI bit is 0 when we read it, then the bit will be - * set and we have the semaphore - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); - if (!(swsm & IXGBE_SWSM_SMBI)) - status = IXGBE_SUCCESS; - } - - /* Now get the semaphore between SW/FW through the SWESMBI bit */ - if (status == IXGBE_SUCCESS) { - for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); - - /* Set the SW EEPROM semaphore bit to request access */ - swsm |= IXGBE_SWSM_SWESMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); - - /* - * If we set the bit successfully then we got the - * semaphore. - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); - if (swsm & IXGBE_SWSM_SWESMBI) - break; - - usec_delay(50); - } - - /* - * Release semaphores and return error if SW EEPROM semaphore - * was not granted because we don't have access to the EEPROM - */ - if (i >= timeout) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "SWESMBI Software EEPROM semaphore not granted.\n"); - ixgbe_release_eeprom_semaphore(hw); - status = IXGBE_ERR_EEPROM; - } - } else { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "Software semaphore SMBI between device drivers " - "not granted.\n"); - } - - return status; -} - -/** - * ixgbe_release_eeprom_semaphore - Release hardware semaphore - * @hw: pointer to hardware structure - * - * This function clears hardware semaphore bits. - **/ -STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) -{ - u32 swsm; - - DEBUGFUNC("ixgbe_release_eeprom_semaphore"); - - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - - /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ - swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_ready_eeprom - Polls for EEPROM ready - * @hw: pointer to hardware structure - **/ -STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u16 i; - u8 spi_stat_reg; - - DEBUGFUNC("ixgbe_ready_eeprom"); - - /* - * Read "Status Register" repeatedly until the LSB is cleared. The - * EEPROM will signal that the command has been completed by clearing - * bit 0 of the internal status register. If it's not cleared within - * 5 milliseconds, then error out. - */ - for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { - ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, - IXGBE_EEPROM_OPCODE_BITS); - spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); - if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) - break; - - usec_delay(5); - ixgbe_standby_eeprom(hw); - }; - - /* - * On some parts, SPI write time could vary from 0-20mSec on 3.3V - * devices (and only 0-5mSec on 5V devices) - */ - if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { - DEBUGOUT("SPI EEPROM Status error\n"); - status = IXGBE_ERR_EEPROM; - } - - return status; -} - -/** - * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state - * @hw: pointer to hardware structure - **/ -STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw) -{ - u32 eec; - - DEBUGFUNC("ixgbe_standby_eeprom"); - - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - - /* Toggle CS to flush commands */ - eec |= IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - IXGBE_WRITE_FLUSH(hw); - usec_delay(1); - eec &= ~IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - IXGBE_WRITE_FLUSH(hw); - usec_delay(1); -} - -/** - * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. - * @hw: pointer to hardware structure - * @data: data to send to the EEPROM - * @count: number of bits to shift out - **/ -STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, - u16 count) -{ - u32 eec; - u32 mask; - u32 i; - - DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); - - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - - /* - * Mask is used to shift "count" bits of "data" out to the EEPROM - * one bit at a time. Determine the starting bit based on count - */ - mask = 0x01 << (count - 1); - - for (i = 0; i < count; i++) { - /* - * A "1" is shifted out to the EEPROM by setting bit "DI" to a - * "1", and then raising and then lowering the clock (the SK - * bit controls the clock input to the EEPROM). A "0" is - * shifted out to the EEPROM by setting "DI" to "0" and then - * raising and then lowering the clock. - */ - if (data & mask) - eec |= IXGBE_EEC_DI; - else - eec &= ~IXGBE_EEC_DI; - - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - IXGBE_WRITE_FLUSH(hw); - - usec_delay(1); - - ixgbe_raise_eeprom_clk(hw, &eec); - ixgbe_lower_eeprom_clk(hw, &eec); - - /* - * Shift mask to signify next bit of data to shift in to the - * EEPROM - */ - mask = mask >> 1; - }; - - /* We leave the "DI" bit set to "0" when we leave this routine. */ - eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM - * @hw: pointer to hardware structure - **/ -STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) -{ - u32 eec; - u32 i; - u16 data = 0; - - DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); - - /* - * In order to read a register from the EEPROM, we need to shift - * 'count' bits in from the EEPROM. Bits are "shifted in" by raising - * the clock input to the EEPROM (setting the SK bit), and then reading - * the value of the "DO" bit. During this "shifting in" process the - * "DI" bit should always be clear. - */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - - eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); - - for (i = 0; i < count; i++) { - data = data << 1; - ixgbe_raise_eeprom_clk(hw, &eec); - - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - - eec &= ~(IXGBE_EEC_DI); - if (eec & IXGBE_EEC_DO) - data |= 1; - - ixgbe_lower_eeprom_clk(hw, &eec); - } - - return data; -} - -/** - * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. - * @hw: pointer to hardware structure - * @eec: EEC register's current value - **/ -STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) -{ - DEBUGFUNC("ixgbe_raise_eeprom_clk"); - - /* - * Raise the clock input to the EEPROM - * (setting the SK bit), then delay - */ - *eec = *eec | IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); - IXGBE_WRITE_FLUSH(hw); - usec_delay(1); -} - -/** - * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. - * @hw: pointer to hardware structure - * @eecd: EECD's current value - **/ -STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) -{ - DEBUGFUNC("ixgbe_lower_eeprom_clk"); - - /* - * Lower the clock input to the EEPROM (clearing the SK bit), then - * delay - */ - *eec = *eec & ~IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); - IXGBE_WRITE_FLUSH(hw); - usec_delay(1); -} - -/** - * ixgbe_release_eeprom - Release EEPROM, release semaphores - * @hw: pointer to hardware structure - **/ -STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw) -{ - u32 eec; - - DEBUGFUNC("ixgbe_release_eeprom"); - - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - - eec |= IXGBE_EEC_CS; /* Pull CS high */ - eec &= ~IXGBE_EEC_SK; /* Lower SCK */ - - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - IXGBE_WRITE_FLUSH(hw); - - usec_delay(1); - - /* Stop requesting EEPROM access */ - eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - - /* Delay before attempt to obtain semaphore again to allow FW access */ - msec_delay(hw->eeprom.semaphore_delay); -} - -/** - * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum - * @hw: pointer to hardware structure - * - * Returns a negative error code on error, or the 16-bit checksum - **/ -s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) -{ - u16 i; - u16 j; - u16 checksum = 0; - u16 length = 0; - u16 pointer = 0; - u16 word = 0; - - DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); - - /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (hw->eeprom.ops.read(hw, i, &word)) { - DEBUGOUT("EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; - } - checksum += word; - } - - /* Include all data from pointers except for the fw pointer */ - for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - if (hw->eeprom.ops.read(hw, i, &pointer)) { - DEBUGOUT("EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; - } - - /* If the pointer seems invalid */ - if (pointer == 0xFFFF || pointer == 0) - continue; - - if (hw->eeprom.ops.read(hw, pointer, &length)) { - DEBUGOUT("EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; - } - - if (length == 0xFFFF || length == 0) - continue; - - for (j = pointer + 1; j <= pointer + length; j++) { - if (hw->eeprom.ops.read(hw, j, &word)) { - DEBUGOUT("EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; - } - checksum += word; - } - } - - checksum = (u16)IXGBE_EEPROM_SUM - checksum; - - return (s32)checksum; -} - -/** - * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. - **/ -s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, - u16 *checksum_val) -{ - s32 status; - u16 checksum; - u16 read_checksum = 0; - - DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - if (status) { - DEBUGOUT("EEPROM read failed\n"); - return status; - } - - status = hw->eeprom.ops.calc_checksum(hw); - if (status < 0) - return status; - - checksum = (u16)(status & 0xffff); - - status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); - if (status) { - DEBUGOUT("EEPROM read failed\n"); - return status; - } - - /* Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) - status = IXGBE_ERR_EEPROM_CHECKSUM; - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - - return status; -} - -/** - * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum - * @hw: pointer to hardware structure - **/ -s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) -{ - s32 status; - u16 checksum; - - DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - if (status) { - DEBUGOUT("EEPROM read failed\n"); - return status; - } - - status = hw->eeprom.ops.calc_checksum(hw); - if (status < 0) - return status; - - checksum = (u16)(status & 0xffff); - - status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); - - return status; -} - -/** - * ixgbe_validate_mac_addr - Validate MAC address - * @mac_addr: pointer to MAC address. - * - * Tests a MAC address to ensure it is a valid Individual Address. - **/ -s32 ixgbe_validate_mac_addr(u8 *mac_addr) -{ - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_validate_mac_addr"); - - /* Make sure it is not a multicast address */ - if (IXGBE_IS_MULTICAST(mac_addr)) { - status = IXGBE_ERR_INVALID_MAC_ADDR; - /* Not a broadcast address */ - } else if (IXGBE_IS_BROADCAST(mac_addr)) { - status = IXGBE_ERR_INVALID_MAC_ADDR; - /* Reject the zero address */ - } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { - status = IXGBE_ERR_INVALID_MAC_ADDR; - } - return status; -} - -/** - * ixgbe_set_rar_generic - Set Rx address register - * @hw: pointer to hardware structure - * @index: Receive address register to write - * @addr: Address to put into receive address register - * @vmdq: VMDq "set" or "pool" index - * @enable_addr: set flag that address is active - * - * Puts an ethernet address into a receive address register. - **/ -s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, - u32 enable_addr) -{ - u32 rar_low, rar_high; - u32 rar_entries = hw->mac.num_rar_entries; - - DEBUGFUNC("ixgbe_set_rar_generic"); - - /* Make sure we are using a valid rar index range */ - if (index >= rar_entries) { - ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, - "RAR index %d is out of range.\n", index); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - /* setup VMDq pool selection before this RAR gets enabled */ - hw->mac.ops.set_vmdq(hw, index, vmdq); - - /* - * HW expects these in little endian so we reverse the byte - * order from network order (big endian) to little endian - */ - rar_low = ((u32)addr[0] | - ((u32)addr[1] << 8) | - ((u32)addr[2] << 16) | - ((u32)addr[3] << 24)); - /* - * Some parts put the VMDq setting in the extra RAH bits, - * so save everything except the lower 16 bits that hold part - * of the address and the address valid bit. - */ - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); - rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); - - if (enable_addr != 0) - rar_high |= IXGBE_RAH_AV; - - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_clear_rar_generic - Remove Rx address register - * @hw: pointer to hardware structure - * @index: Receive address register to write - * - * Clears an ethernet address from a receive address register. - **/ -s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 rar_high; - u32 rar_entries = hw->mac.num_rar_entries; - - DEBUGFUNC("ixgbe_clear_rar_generic"); - - /* Make sure we are using a valid rar index range */ - if (index >= rar_entries) { - ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, - "RAR index %d is out of range.\n", index); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - /* - * Some parts put the VMDq setting in the extra RAH bits, - * so save everything except the lower 16 bits that hold part - * of the address and the address valid bit. - */ - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); - rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - - /* clear VMDq pool/queue selection for this RAR */ - hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_init_rx_addrs_generic - Initializes receive address filters. - * @hw: pointer to hardware structure - * - * Places the MAC address in receive address register 0 and clears the rest - * of the receive address registers. Clears the multicast table. Assumes - * the receiver is in reset when the routine is called. - **/ -s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) -{ - u32 i; - u32 rar_entries = hw->mac.num_rar_entries; - - DEBUGFUNC("ixgbe_init_rx_addrs_generic"); - - /* - * If the current mac address is valid, assume it is a software override - * to the permanent address. - * Otherwise, use the permanent address from the eeprom. - */ - if (ixgbe_validate_mac_addr(hw->mac.addr) == - IXGBE_ERR_INVALID_MAC_ADDR) { - /* Get the MAC address from the RAR0 for later reference */ - hw->mac.ops.get_mac_addr(hw, hw->mac.addr); - - DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", - hw->mac.addr[0], hw->mac.addr[1], - hw->mac.addr[2]); - DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], - hw->mac.addr[4], hw->mac.addr[5]); - } else { - /* Setup the receive address. */ - DEBUGOUT("Overriding MAC Address in RAR[0]\n"); - DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", - hw->mac.addr[0], hw->mac.addr[1], - hw->mac.addr[2]); - DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], - hw->mac.addr[4], hw->mac.addr[5]); - - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - } - - /* clear VMDq pool/queue selection for RAR 0 */ - hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); - - hw->addr_ctrl.overflow_promisc = 0; - - hw->addr_ctrl.rar_used_count = 1; - - /* Zero out the other receive addresses. */ - DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); - for (i = 1; i < rar_entries; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); - } - - /* Clear the MTA */ - hw->addr_ctrl.mta_in_use = 0; - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); - - DEBUGOUT(" Clearing MTA\n"); - for (i = 0; i < hw->mac.mcft_size; i++) - IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); - - ixgbe_init_uta_tables(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_add_uc_addr - Adds a secondary unicast address. - * @hw: pointer to hardware structure - * @addr: new address - * - * Adds it to unused receive address register or goes into promiscuous mode. - **/ -void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) -{ - u32 rar_entries = hw->mac.num_rar_entries; - u32 rar; - - DEBUGFUNC("ixgbe_add_uc_addr"); - - DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - - /* - * Place this address in the RAR if there is room, - * else put the controller into promiscuous mode - */ - if (hw->addr_ctrl.rar_used_count < rar_entries) { - rar = hw->addr_ctrl.rar_used_count; - hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); - DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); - hw->addr_ctrl.rar_used_count++; - } else { - hw->addr_ctrl.overflow_promisc++; - } - - DEBUGOUT("ixgbe_add_uc_addr Complete\n"); -} - -/** - * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses - * @hw: pointer to hardware structure - * @addr_list: the list of new addresses - * @addr_count: number of addresses - * @next: iterator function to walk the address list - * - * The given list replaces any existing list. Clears the secondary addrs from - * receive address registers. Uses unused receive address registers for the - * first secondary addresses, and falls back to promiscuous mode as needed. - * - * Drivers using secondary unicast addresses must set user_set_promisc when - * manually putting the device into promiscuous mode. - **/ -s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, - u32 addr_count, ixgbe_mc_addr_itr next) -{ - u8 *addr; - u32 i; - u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; - u32 uc_addr_in_use; - u32 fctrl; - u32 vmdq; - - DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); - - /* - * Clear accounting of old secondary address list, - * don't count RAR[0] - */ - uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; - hw->addr_ctrl.rar_used_count -= uc_addr_in_use; - hw->addr_ctrl.overflow_promisc = 0; - - /* Zero out the other receive addresses */ - DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); - for (i = 0; i < uc_addr_in_use; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); - } - - /* Add the new addresses */ - for (i = 0; i < addr_count; i++) { - DEBUGOUT(" Adding the secondary addresses:\n"); - addr = next(hw, &addr_list, &vmdq); - ixgbe_add_uc_addr(hw, addr, vmdq); - } - - if (hw->addr_ctrl.overflow_promisc) { - /* enable promisc if not already in overflow or set by user */ - if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { - DEBUGOUT(" Entering address overflow promisc mode\n"); - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl |= IXGBE_FCTRL_UPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - } - } else { - /* only disable if set by overflow, not by user */ - if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { - DEBUGOUT(" Leaving address overflow promisc mode\n"); - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl &= ~IXGBE_FCTRL_UPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - } - } - - DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); - return IXGBE_SUCCESS; -} - -/** - * ixgbe_mta_vector - Determines bit-vector in multicast table to set - * @hw: pointer to hardware structure - * @mc_addr: the multicast address - * - * Extracts the 12 bits, from a multicast address, to determine which - * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming rx multicast addresses, to determine the bit-vector to check in - * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set - * by the MO field of the MCSTCTRL. The MO field is set during initialization - * to mc_filter_type. - **/ -STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) -{ - u32 vector = 0; - - DEBUGFUNC("ixgbe_mta_vector"); - - switch (hw->mac.mc_filter_type) { - case 0: /* use bits [47:36] of the address */ - vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); - break; - case 1: /* use bits [46:35] of the address */ - vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); - break; - case 2: /* use bits [45:34] of the address */ - vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); - break; - case 3: /* use bits [43:32] of the address */ - vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); - break; - default: /* Invalid mc_filter_type */ - DEBUGOUT("MC filter type param set incorrectly\n"); - ASSERT(0); - break; - } - - /* vector can only be 12-bits or boundary will be exceeded */ - vector &= 0xFFF; - return vector; -} - -/** - * ixgbe_set_mta - Set bit-vector in multicast table - * @hw: pointer to hardware structure - * @hash_value: Multicast address hash value - * - * Sets the bit-vector in the multicast table. - **/ -void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) -{ - u32 vector; - u32 vector_bit; - u32 vector_reg; - - DEBUGFUNC("ixgbe_set_mta"); - - hw->addr_ctrl.mta_in_use++; - - vector = ixgbe_mta_vector(hw, mc_addr); - DEBUGOUT1(" bit-vector = 0x%03X\n", vector); - - /* - * The MTA is a register array of 128 32-bit registers. It is treated - * like an array of 4096 bits. We want to set bit - * BitArray[vector_value]. So we figure out what register the bit is - * in, read it, OR in the new bit, then write back the new value. The - * register is determined by the upper 7 bits of the vector value and - * the bit within that register are determined by the lower 5 bits of - * the value. - */ - vector_reg = (vector >> 5) & 0x7F; - vector_bit = vector & 0x1F; - hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); -} - -/** - * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses - * @hw: pointer to hardware structure - * @mc_addr_list: the list of new multicast addresses - * @mc_addr_count: number of addresses - * @next: iterator function to walk the multicast address list - * @clear: flag, when set clears the table beforehand - * - * When the clear flag is set, the given list replaces any existing list. - * Hashes the given addresses into the multicast table. - **/ -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr next, - bool clear) -{ - u32 i; - u32 vmdq; - - DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); - - /* - * Set the new number of MC addresses that we are being requested to - * use. - */ - hw->addr_ctrl.num_mc_addrs = mc_addr_count; - hw->addr_ctrl.mta_in_use = 0; - - /* Clear mta_shadow */ - if (clear) { - DEBUGOUT(" Clearing MTA\n"); - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); - } - - /* Update mta_shadow */ - for (i = 0; i < mc_addr_count; i++) { - DEBUGOUT(" Adding the multicast addresses:\n"); - ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); - } - - /* Enable mta */ - for (i = 0; i < hw->mac.mcft_size; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, - hw->mac.mta_shadow[i]); - - if (hw->addr_ctrl.mta_in_use > 0) - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, - IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); - - DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); - return IXGBE_SUCCESS; -} - -/** - * ixgbe_enable_mc_generic - Enable multicast address in RAR - * @hw: pointer to hardware structure - * - * Enables multicast address in RAR and the use of the multicast hash table. - **/ -s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; - - DEBUGFUNC("ixgbe_enable_mc_generic"); - - if (a->mta_in_use > 0) - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | - hw->mac.mc_filter_type); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_disable_mc_generic - Disable multicast address in RAR - * @hw: pointer to hardware structure - * - * Disables multicast address in RAR and the use of the multicast hash table. - **/ -s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; - - DEBUGFUNC("ixgbe_disable_mc_generic"); - - if (a->mta_in_use > 0) - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_fc_enable_generic - Enable flow control - * @hw: pointer to hardware structure - * - * Enable flow control according to the current settings. - **/ -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_SUCCESS; - u32 mflcn_reg, fccfg_reg; - u32 reg; - u32 fcrtl, fcrth; - int i; - - DEBUGFUNC("ixgbe_fc_enable_generic"); - - /* Validate the water mark configuration */ - if (!hw->fc.pause_time) { - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* Low water mark of zero causes XOFF floods */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && - hw->fc.high_water[i]) { - if (!hw->fc.low_water[i] || - hw->fc.low_water[i] >= hw->fc.high_water[i]) { - DEBUGOUT("Invalid water mark configuration\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - } - } - - /* Negotiate the fc mode to use */ - hw->mac.ops.fc_autoneg(hw); - - /* Disable any previous flow control settings */ - mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); - - fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); - fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); - - /* - * The possible values of fc.current_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - * other: Invalid. - */ - switch (hw->fc.current_mode) { - case ixgbe_fc_none: - /* - * Flow control is disabled by software override or autoneg. - * The code below will actually disable it in the HW. - */ - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - mflcn_reg |= IXGBE_MFLCN_RFCE; - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; - break; - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - mflcn_reg |= IXGBE_MFLCN_RFCE; - fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; - break; - default: - ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, - "Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - break; - } - - /* Set 802.3x based flow control settings. */ - mflcn_reg |= IXGBE_MFLCN_DPF; - IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); - IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); - - - /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && - hw->fc.high_water[i]) { - fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); - fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; - } else { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); - /* - * In order to prevent Tx hangs when the internal Tx - * switch is enabled we must set the high water mark - * to the Rx packet buffer size - 24KB. This allows - * the Tx switch to function even under heavy Rx - * workloads. - */ - fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; - } - - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); - } - - /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time * 0x00010001; - for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - -out: - return ret_val; -} - -/** - * ixgbe_negotiate_fc - Negotiate flow control - * @hw: pointer to hardware structure - * @adv_reg: flow control advertised settings - * @lp_reg: link partner's flow control settings - * @adv_sym: symmetric pause bit in advertisement - * @adv_asm: asymmetric pause bit in advertisement - * @lp_sym: symmetric pause bit in link partner advertisement - * @lp_asm: asymmetric pause bit in link partner advertisement - * - * Find the intersection between advertised settings and link partner's - * advertised settings - **/ -s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) -{ - if ((!(adv_reg)) || (!(lp_reg))) { - ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, - "Local or link partner's advertised flow control " - "settings are NULL. Local: %x, link partner: %x\n", - adv_reg, lp_reg); - return IXGBE_ERR_FC_NOT_NEGOTIATED; - } - - if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { - /* - * Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == ixgbe_fc_full) { - hw->fc.current_mode = ixgbe_fc_full; - DEBUGOUT("Flow Control = FULL.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_rx_pause; - DEBUGOUT("Flow Control=RX PAUSE frames only\n"); - } - } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && - (lp_reg & lp_sym) && (lp_reg & lp_asm)) { - hw->fc.current_mode = ixgbe_fc_tx_pause; - DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); - } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && - !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { - hw->fc.current_mode = ixgbe_fc_rx_pause; - DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_none; - DEBUGOUT("Flow Control = NONE.\n"); - } - return IXGBE_SUCCESS; -} - -/** - * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber - * @hw: pointer to hardware structure - * - * Enable flow control according on 1 gig fiber. - **/ -STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) -{ - u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; - s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; - - /* - * On multispeed fiber at 1g, bail out if - * - link is up but AN did not complete, or if - * - link is up and AN completed but timed out - */ - - linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || - (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { - DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); - goto out; - } - - pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); - - ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, - pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, - IXGBE_PCS1GANA_ASM_PAUSE, - IXGBE_PCS1GANA_SYM_PAUSE, - IXGBE_PCS1GANA_ASM_PAUSE); - -out: - return ret_val; -} - -/** - * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure - * - * Enable flow control according to IEEE clause 37. - **/ -STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) -{ - u32 links2, anlp1_reg, autoc_reg, links; - s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; - - /* - * On backplane, bail out if - * - backplane autoneg was not completed, or if - * - we are 82599 and link partner is not AN enabled - */ - links = IXGBE_READ_REG(hw, IXGBE_LINKS); - if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { - DEBUGOUT("Auto-Negotiation did not complete\n"); - goto out; - } - - if (hw->mac.type == ixgbe_mac_82599EB) { - links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); - if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { - DEBUGOUT("Link partner is not AN enabled\n"); - goto out; - } - } - /* - * Read the 10g AN autoc and LP ability registers and resolve - * local flow control settings accordingly - */ - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); - - ret_val = ixgbe_negotiate_fc(hw, autoc_reg, - anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, - IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); - -out: - return ret_val; -} - -/** - * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure - * - * Enable flow control according to IEEE clause 37. - **/ -STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) -{ - u16 technology_ability_reg = 0; - u16 lp_technology_ability_reg = 0; - - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &technology_ability_reg); - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &lp_technology_ability_reg); - - return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, - (u32)lp_technology_ability_reg, - IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, - IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); -} - -/** - * ixgbe_fc_autoneg - Configure flow control - * @hw: pointer to hardware structure - * - * Compares our advertised flow control capabilities to those advertised by - * our link partner, and determines the proper flow control mode to use. - **/ -void ixgbe_fc_autoneg(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; - ixgbe_link_speed speed; - bool link_up; - - DEBUGFUNC("ixgbe_fc_autoneg"); - - /* - * AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - */ - if (hw->fc.disable_fc_autoneg) { - ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, - "Flow control autoneg is disabled"); - goto out; - } - - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { - ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); - goto out; - } - - switch (hw->phy.media_type) { - /* Autoneg flow control on fiber adapters */ - case ixgbe_media_type_fiber_qsfp: - case ixgbe_media_type_fiber: - if (speed == IXGBE_LINK_SPEED_1GB_FULL) - ret_val = ixgbe_fc_autoneg_fiber(hw); - break; - - /* Autoneg flow control on backplane adapters */ - case ixgbe_media_type_backplane: - ret_val = ixgbe_fc_autoneg_backplane(hw); - break; - - /* Autoneg flow control on copper adapters */ - case ixgbe_media_type_copper: - if (ixgbe_device_supports_autoneg_fc(hw)) - ret_val = ixgbe_fc_autoneg_copper(hw); - break; - - default: - break; - } - -out: - if (ret_val == IXGBE_SUCCESS) { - hw->fc.fc_was_autonegged = true; - } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - } -} - -/* - * ixgbe_pcie_timeout_poll - Return number of times to poll for completion - * @hw: pointer to hardware structure - * - * System-wide timeout range is encoded in PCIe Device Control2 register. - * - * Add 10% to specified maximum and return the number of times to poll for - * completion timeout, in units of 100 microsec. Never return less than - * 800 = 80 millisec. - */ -STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) -{ - s16 devctl2; - u32 pollcnt; - - devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); - devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; - - switch (devctl2) { - case IXGBE_PCIDEVCTRL2_65_130ms: - pollcnt = 1300; /* 130 millisec */ - break; - case IXGBE_PCIDEVCTRL2_260_520ms: - pollcnt = 5200; /* 520 millisec */ - break; - case IXGBE_PCIDEVCTRL2_1_2s: - pollcnt = 20000; /* 2 sec */ - break; - case IXGBE_PCIDEVCTRL2_4_8s: - pollcnt = 80000; /* 8 sec */ - break; - case IXGBE_PCIDEVCTRL2_17_34s: - pollcnt = 34000; /* 34 sec */ - break; - case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ - case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ - case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ - case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ - default: - pollcnt = 800; /* 80 millisec minimum */ - break; - } - - /* add 10% to spec maximum */ - return (pollcnt * 11) / 10; -} - -/** - * ixgbe_disable_pcie_master - Disable PCI-express master access - * @hw: pointer to hardware structure - * - * Disables PCI-Express master access and verifies there are no pending - * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable - * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS - * is returned signifying master requests disabled. - **/ -s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u32 i, poll; - u16 value; - - DEBUGFUNC("ixgbe_disable_pcie_master"); - - /* Always set this bit to ensure any future transactions are blocked */ - IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); - - /* Exit if master requests are blocked */ - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || - IXGBE_REMOVED(hw->hw_addr)) - goto out; - - /* Poll for master request bit to clear */ - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { - usec_delay(100); - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) - goto out; - } - - /* - * Two consecutive resets are required via CTRL.RST per datasheet - * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine - * of this need. The first reset prevents new master requests from - * being issued by our device. We then must wait 1usec or more for any - * remaining completions from the PCIe bus to trickle in, and then reset - * again to clear out any effects they may have had on our device. - */ - DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); - hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - - if (hw->mac.type >= ixgbe_mac_X550) - goto out; - - /* - * Before proceeding, make sure that the PCIe block does not have - * transactions pending. - */ - poll = ixgbe_pcie_timeout_poll(hw); - for (i = 0; i < poll; i++) { - usec_delay(100); - value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); - if (IXGBE_REMOVED(hw->hw_addr)) - goto out; - if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) - goto out; - } - - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "PCIe transaction pending bit also did not clear.\n"); - status = IXGBE_ERR_MASTER_REQUESTS_PENDING; - -out: - return status; -} - -/** - * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire - * - * Acquires the SWFW semaphore through the GSSR register for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) - **/ -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) -{ - u32 gssr = 0; - u32 swmask = mask; - u32 fwmask = mask << 5; - u32 timeout = 200; - u32 i; - - DEBUGFUNC("ixgbe_acquire_swfw_sync"); - - for (i = 0; i < timeout; i++) { - /* - * SW NVM semaphore bit is used for access to all - * SW_FW_SYNC bits (not just NVM) - */ - if (ixgbe_get_eeprom_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; - - gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); - if (!(gssr & (fwmask | swmask))) { - gssr |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); - ixgbe_release_eeprom_semaphore(hw); - return IXGBE_SUCCESS; - } else { - /* Resource is currently in use by FW or SW */ - ixgbe_release_eeprom_semaphore(hw); - msec_delay(5); - } - } - - /* If time expired clear the bits holding the lock and retry */ - if (gssr & (fwmask | swmask)) - ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); - - msec_delay(5); - return IXGBE_ERR_SWFW_SYNC; -} - -/** - * ixgbe_release_swfw_sync - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release - * - * Releases the SWFW semaphore through the GSSR register for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) - **/ -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) -{ - u32 gssr; - u32 swmask = mask; - - DEBUGFUNC("ixgbe_release_swfw_sync"); - - ixgbe_get_eeprom_semaphore(hw); - - gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); - gssr &= ~swmask; - IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); - - ixgbe_release_eeprom_semaphore(hw); -} - -/** - * ixgbe_disable_sec_rx_path_generic - Stops the receive data path - * @hw: pointer to hardware structure - * - * Stops the receive data path and waits for the HW to internally empty - * the Rx security block - **/ -s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) -{ -#define IXGBE_MAX_SECRX_POLL 40 - - int i; - int secrxreg; - - DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); - - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - secrxreg |= IXGBE_SECRXCTRL_RX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); - for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); - if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) - break; - else - /* Use interrupt-safe sleep just in case */ - usec_delay(1000); - } - - /* For informational purposes only */ - if (i >= IXGBE_MAX_SECRX_POLL) - DEBUGOUT("Rx unit being enabled before security " - "path fully disabled. Continuing with init.\n"); - - return IXGBE_SUCCESS; -} - -/** - * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read - * @hw: pointer to hardware structure - * @reg_val: Value we read from AUTOC - * - * The default case requires no protection so just to the register read. - */ -s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) -{ - *locked = false; - *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); - return IXGBE_SUCCESS; -} - -/** - * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write - * @hw: pointer to hardware structure - * @reg_val: value to write to AUTOC - * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous read. - * - * The default case requires no protection so just to the register write. - */ -s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) -{ - UNREFERENCED_1PARAMETER(locked); - - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); - return IXGBE_SUCCESS; -} - -/** - * ixgbe_enable_sec_rx_path_generic - Enables the receive data path - * @hw: pointer to hardware structure - * - * Enables the receive data path. - **/ -s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) -{ - u32 secrxreg; - - DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); - - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); - IXGBE_WRITE_FLUSH(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit - * @hw: pointer to hardware structure - * @regval: register value to write to RXCTRL - * - * Enables the Rx DMA unit - **/ -s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) -{ - DEBUGFUNC("ixgbe_enable_rx_dma_generic"); - - if (regval & IXGBE_RXCTRL_RXEN) - ixgbe_enable_rx(hw); - else - ixgbe_disable_rx(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_blink_led_start_generic - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - **/ -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) -{ - ixgbe_link_speed speed = 0; - bool link_up = 0; - u32 autoc_reg = 0; - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - s32 ret_val = IXGBE_SUCCESS; - bool locked = false; - - DEBUGFUNC("ixgbe_blink_led_start_generic"); - - if (index > 3) - return IXGBE_ERR_PARAM; - - /* - * Link must be up to auto-blink the LEDs; - * Force it if link is down. - */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - - if (!link_up) { - ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); - if (ret_val != IXGBE_SUCCESS) - goto out; - - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - autoc_reg |= IXGBE_AUTOC_FLU; - - ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); - if (ret_val != IXGBE_SUCCESS) - goto out; - - IXGBE_WRITE_FLUSH(hw); - msec_delay(10); - } - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - -out: - return ret_val; -} - -/** - * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - **/ -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 autoc_reg = 0; - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - s32 ret_val = IXGBE_SUCCESS; - bool locked = false; - - DEBUGFUNC("ixgbe_blink_led_stop_generic"); - - if (index > 3) - return IXGBE_ERR_PARAM; - - ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); - if (ret_val != IXGBE_SUCCESS) - goto out; - - autoc_reg &= ~IXGBE_AUTOC_FLU; - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - - ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); - if (ret_val != IXGBE_SUCCESS) - goto out; - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg &= ~IXGBE_LED_BLINK(index); - led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - -out: - return ret_val; -} - -/** - * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM - * @hw: pointer to hardware structure - * @san_mac_offset: SAN MAC address offset - * - * This function will read the EEPROM location for the SAN MAC address - * pointer, and returns the value at that location. This is used in both - * get and set mac_addr routines. - **/ -STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, - u16 *san_mac_offset) -{ - s32 ret_val; - - DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); - - /* - * First read the EEPROM pointer to see if the MAC addresses are - * available. - */ - ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, - san_mac_offset); - if (ret_val) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom at offset %d failed", - IXGBE_SAN_MAC_ADDR_PTR); - } - - return ret_val; -} - -/** - * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM - * @hw: pointer to hardware structure - * @san_mac_addr: SAN MAC address - * - * Reads the SAN MAC address from the EEPROM, if it's available. This is - * per-port, so set_lan_id() must be called before reading the addresses. - * set_lan_id() is called by identify_sfp(), but this cannot be relied - * upon for non-SFP connections, so we must call it here. - **/ -s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) -{ - u16 san_mac_data, san_mac_offset; - u8 i; - s32 ret_val; - - DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); - - /* - * First read the EEPROM pointer to see if the MAC addresses are - * available. If they're not, no point in calling set_lan_id() here. - */ - ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); - if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) - goto san_mac_addr_out; - - /* make sure we know which port we need to program */ - hw->mac.ops.set_lan_id(hw); - /* apply the port offset to the address offset */ - (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : - (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); - for (i = 0; i < 3; i++) { - ret_val = hw->eeprom.ops.read(hw, san_mac_offset, - &san_mac_data); - if (ret_val) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", - san_mac_offset); - goto san_mac_addr_out; - } - san_mac_addr[i * 2] = (u8)(san_mac_data); - san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); - san_mac_offset++; - } - return IXGBE_SUCCESS; - -san_mac_addr_out: - /* - * No addresses available in this EEPROM. It's not an - * error though, so just wipe the local address and return. - */ - for (i = 0; i < 6; i++) - san_mac_addr[i] = 0xFF; - return IXGBE_SUCCESS; -} - -/** - * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM - * @hw: pointer to hardware structure - * @san_mac_addr: SAN MAC address - * - * Write a SAN MAC address to the EEPROM. - **/ -s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) -{ - s32 ret_val; - u16 san_mac_data, san_mac_offset; - u8 i; - - DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); - - /* Look for SAN mac address pointer. If not defined, return */ - ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); - if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) - return IXGBE_ERR_NO_SAN_ADDR_PTR; - - /* Make sure we know which port we need to write */ - hw->mac.ops.set_lan_id(hw); - /* Apply the port offset to the address offset */ - (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : - (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); - - for (i = 0; i < 3; i++) { - san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); - san_mac_data |= (u16)(san_mac_addr[i * 2]); - hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); - san_mac_offset++; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count - * @hw: pointer to hardware structure - * - * Read PCIe configuration space, and get the MSI-X vector count from - * the capabilities table. - **/ -u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) -{ - u16 msix_count = 1; - u16 max_msix_count; - u16 pcie_offset; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; - max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; - max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; - break; - default: - return msix_count; - } - - DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); - msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); - if (IXGBE_REMOVED(hw->hw_addr)) - msix_count = 0; - msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; - - /* MSI-X count is zero-based in HW */ - msix_count++; - - if (msix_count > max_msix_count) - msix_count = max_msix_count; - - return msix_count; -} - -/** - * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address - * @hw: pointer to hardware structure - * @addr: Address to put into receive address register - * @vmdq: VMDq pool to assign - * - * Puts an ethernet address into a receive address register, or - * finds the rar that it is aleady in; adds to the pool list - **/ -s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) -{ - static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; - u32 first_empty_rar = NO_EMPTY_RAR_FOUND; - u32 rar; - u32 rar_low, rar_high; - u32 addr_low, addr_high; - - DEBUGFUNC("ixgbe_insert_mac_addr_generic"); - - /* swap bytes for HW little endian */ - addr_low = addr[0] | (addr[1] << 8) - | (addr[2] << 16) - | (addr[3] << 24); - addr_high = addr[4] | (addr[5] << 8); - - /* - * Either find the mac_id in rar or find the first empty space. - * rar_highwater points to just after the highest currently used - * rar in order to shorten the search. It grows when we add a new - * rar to the top. - */ - for (rar = 0; rar < hw->mac.rar_highwater; rar++) { - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); - - if (((IXGBE_RAH_AV & rar_high) == 0) - && first_empty_rar == NO_EMPTY_RAR_FOUND) { - first_empty_rar = rar; - } else if ((rar_high & 0xFFFF) == addr_high) { - rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); - if (rar_low == addr_low) - break; /* found it already in the rars */ - } - } - - if (rar < hw->mac.rar_highwater) { - /* already there so just add to the pool bits */ - ixgbe_set_vmdq(hw, rar, vmdq); - } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { - /* stick it into first empty RAR slot we found */ - rar = first_empty_rar; - ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); - } else if (rar == hw->mac.rar_highwater) { - /* add it to the top of the list and inc the highwater mark */ - ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); - hw->mac.rar_highwater++; - } else if (rar >= hw->mac.num_rar_entries) { - return IXGBE_ERR_INVALID_MAC_ADDR; - } - - /* - * If we found rar[0], make sure the default pool bit (we use pool 0) - * remains cleared to be sure default pool packets will get delivered - */ - if (rar == 0) - ixgbe_clear_vmdq(hw, rar, 0); - - return rar; -} - -/** - * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to disassociate - * @vmdq: VMDq pool index to remove from the rar - **/ -s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - u32 mpsar_lo, mpsar_hi; - u32 rar_entries = hw->mac.num_rar_entries; - - DEBUGFUNC("ixgbe_clear_vmdq_generic"); - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, - "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - - if (IXGBE_REMOVED(hw->hw_addr)) - goto done; - - if (!mpsar_lo && !mpsar_hi) - goto done; - - if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { - if (mpsar_lo) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - mpsar_lo = 0; - } - if (mpsar_hi) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); - mpsar_hi = 0; - } - } else if (vmdq < 32) { - mpsar_lo &= ~(1 << vmdq); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); - } else { - mpsar_hi &= ~(1 << (vmdq - 32)); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); - } - - /* was that the last pool using this rar? */ - if (mpsar_lo == 0 && mpsar_hi == 0 && - rar != 0 && rar != hw->mac.san_mac_rar_index) - hw->mac.ops.clear_rar(hw, rar); -done: - return IXGBE_SUCCESS; -} - -/** - * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq pool index - **/ -s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - u32 mpsar; - u32 rar_entries = hw->mac.num_rar_entries; - - DEBUGFUNC("ixgbe_set_vmdq_generic"); - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, - "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - if (vmdq < 32) { - mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= 1 << vmdq; - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); - } else { - mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= 1 << (vmdq - 32); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); - } - return IXGBE_SUCCESS; -} - -/** - * This function should only be involved in the IOV mode. - * In IOV mode, Default pool is next pool after the number of - * VFs advertized and not 0. - * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] - * - * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address - * @hw: pointer to hardware struct - * @vmdq: VMDq pool index - **/ -s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) -{ - u32 rar = hw->mac.san_mac_rar_index; - - DEBUGFUNC("ixgbe_set_vmdq_san_mac"); - - if (vmdq < 32) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); - } else { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array - * @hw: pointer to hardware structure - **/ -s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) -{ - int i; - - DEBUGFUNC("ixgbe_init_uta_tables_generic"); - DEBUGOUT(" Clearing UTA\n"); - - for (i = 0; i < 128; i++) - IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * - * return the VLVF index where this VLAN id should be placed - * - **/ -s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) -{ - s32 regindex, first_empty_slot; - u32 bits; - - /* short cut the special case */ - if (vlan == 0) - return 0; - - /* if vlvf_bypass is set we don't want to use an empty slot, we - * will simply bypass the VLVF if there are no entries present in the - * VLVF that contain our VLAN - */ - first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; - - /* add VLAN enable bit for comparison */ - vlan |= IXGBE_VLVF_VIEN; - - /* Search for the vlan id in the VLVF entries. Save off the first empty - * slot found along the way. - * - * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 - */ - for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { - bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if (bits == vlan) - return regindex; - if (!first_empty_slot && !bits) - first_empty_slot = regindex; - } - - /* If we are here then we didn't find the VLAN. Return first empty - * slot we found during our search, else error. - */ - if (!first_empty_slot) - ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); - - return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; -} - -/** - * ixgbe_set_vfta_generic - Set VLAN filter table - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VLVFB - * @vlan_on: boolean flag to turn on/off VLAN - * @vlvf_bypass: boolean flag indicating updating default pool is okay - * - * Turn on/off specified VLAN in the VLAN filter table. - **/ -s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool vlvf_bypass) -{ - u32 regidx, vfta_delta, vfta; - s32 ret_val; - - DEBUGFUNC("ixgbe_set_vfta_generic"); - - if (vlan > 4095 || vind > 63) - return IXGBE_ERR_PARAM; - - /* - * this is a 2 part operation - first the VFTA, then the - * VLVF and VLVFB if VT Mode is set - * We don't write the VFTA until we know the VLVF part succeeded. - */ - - /* Part 1 - * The VFTA is a bitstring made up of 128 32-bit registers - * that enable the particular VLAN id, much like the MTA: - * bits[11-5]: which register - * bits[4-0]: which bit in the register - */ - regidx = vlan / 32; - vfta_delta = 1 << (vlan % 32); - vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); - - /* - * vfta_delta represents the difference between the current value - * of vfta and the value we want in the register. Since the diff - * is an XOR mask we can just update the vfta using an XOR - */ - vfta_delta &= vlan_on ? ~vfta : vfta; - vfta ^= vfta_delta; - - /* Part 2 - * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF - */ - ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, - vfta, vlvf_bypass); - if (ret_val != IXGBE_SUCCESS) { - if (vlvf_bypass) - goto vfta_update; - return ret_val; - } - -vfta_update: - /* Update VFTA now that we are ready for traffic */ - if (vfta_delta) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_set_vlvf_generic - Set VLAN Pool Filter - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VLVFB - * @vlan_on: boolean flag to turn on/off VLAN in VLVF - * @vfta_delta: pointer to the difference between the current value of VFTA - * and the desired value - * @vfta: the desired value of the VFTA - * @vlvf_bypass: boolean flag indicating updating default pool is okay - * - * Turn on/off specified bit in VLVF table. - **/ -s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, u32 *vfta_delta, u32 vfta, - bool vlvf_bypass) -{ - u32 bits; - s32 vlvf_index; - - DEBUGFUNC("ixgbe_set_vlvf_generic"); - - if (vlan > 4095 || vind > 63) - return IXGBE_ERR_PARAM; - - /* If VT Mode is set - * Either vlan_on - * make sure the vlan is in VLVF - * set the vind bit in the matching VLVFB - * Or !vlan_on - * clear the pool bit and possibly the vind - */ - if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) - return IXGBE_SUCCESS; - - vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); - if (vlvf_index < 0) - return vlvf_index; - - bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); - - /* set the pool bit */ - bits |= 1 << (vind % 32); - if (vlan_on) - goto vlvf_update; - - /* clear the pool bit */ - bits ^= 1 << (vind % 32); - - if (!bits && - !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { - /* Clear VFTA first, then disable VLVF. Otherwise - * we run the risk of stray packets leaking into - * the PF via the default pool - */ - if (*vfta_delta) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); - - /* disable VLVF and clear remaining bit from pool */ - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); - - return IXGBE_SUCCESS; - } - - /* If there are still bits set in the VLVFB registers - * for the VLAN ID indicated we need to see if the - * caller is requesting that we clear the VFTA entry bit. - * If the caller has requested that we clear the VFTA - * entry bit but there are still pools/VFs using this VLAN - * ID entry then ignore the request. We're not worried - * about the case where we're turning the VFTA VLAN ID - * entry bit on, only when requested to turn it off as - * there may be multiple pools and/or VFs using the - * VLAN ID entry. In that case we cannot clear the - * VFTA bit until all pools/VFs using that VLAN ID have also - * been cleared. This will be indicated by "bits" being - * zero. - */ - *vfta_delta = 0; - -vlvf_update: - /* record pool change and enable VLAN ID if not already enabled */ - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_clear_vfta_generic - Clear VLAN filter table - * @hw: pointer to hardware structure - * - * Clears the VLAN filer table, and the VMDq index associated with the filter - **/ -s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) -{ - u32 offset; - - DEBUGFUNC("ixgbe_clear_vfta_generic"); - - for (offset = 0; offset < hw->mac.vft_size; offset++) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); - - for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix - * @hw: pointer to hardware structure - * - * Contains the logic to identify if we need to verify link for the - * crosstalk fix - **/ -static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) -{ - - /* Does FW say we need the fix */ - if (!hw->need_crosstalk_fix) - return false; - - /* Only consider SFP+ PHYs i.e. media type fiber */ - switch (hw->mac.ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - case ixgbe_media_type_fiber_qsfp: - break; - default: - return false; - } - - return true; -} - -/** - * ixgbe_check_mac_link_generic - Determine link and speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true when link is up - * @link_up_wait_to_complete: bool used to wait for link up or not - * - * Reads the links register to determine if link is up and the current speed - **/ -s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete) -{ - u32 links_reg, links_orig; - u32 i; - - DEBUGFUNC("ixgbe_check_mac_link_generic"); - - /* If Crosstalk fix enabled do the sanity check of making sure - * the SFP+ cage is full. - */ - if (ixgbe_need_crosstalk_fix(hw)) { - u32 sfp_cage_full; - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP2; - break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP0; - break; - default: - /* sanity check - No SFP+ devices here */ - sfp_cage_full = false; - break; - } - - if (!sfp_cage_full) { - *link_up = false; - *speed = IXGBE_LINK_SPEED_UNKNOWN; - return IXGBE_SUCCESS; - } - } - - /* clear the old state */ - links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); - - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - - if (links_orig != links_reg) { - DEBUGOUT2("LINKS changed from %08X to %08X\n", - links_orig, links_reg); - } - - if (link_up_wait_to_complete) { - for (i = 0; i < hw->mac.max_link_up_time; i++) { - if (links_reg & IXGBE_LINKS_UP) { - *link_up = true; - break; - } else { - *link_up = false; - } - msec_delay(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - } - } else { - if (links_reg & IXGBE_LINKS_UP) - *link_up = true; - else - *link_up = false; - } - - switch (links_reg & IXGBE_LINKS_SPEED_82599) { - case IXGBE_LINKS_SPEED_10G_82599: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - if (hw->mac.type >= ixgbe_mac_X550) { - if (links_reg & IXGBE_LINKS_SPEED_NON_STD) - *speed = IXGBE_LINK_SPEED_2_5GB_FULL; - } - break; - case IXGBE_LINKS_SPEED_1G_82599: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case IXGBE_LINKS_SPEED_100_82599: - *speed = IXGBE_LINK_SPEED_100_FULL; - if (hw->mac.type == ixgbe_mac_X550) { - if (links_reg & IXGBE_LINKS_SPEED_NON_STD) - *speed = IXGBE_LINK_SPEED_5GB_FULL; - } - break; - case IXGBE_LINKS_SPEED_10_X550EM_A: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || - hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { - *speed = IXGBE_LINK_SPEED_10_FULL; - } - break; - default: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from - * the EEPROM - * @hw: pointer to hardware structure - * @wwnn_prefix: the alternative WWNN prefix - * @wwpn_prefix: the alternative WWPN prefix - * - * This function will read the EEPROM from the alternative SAN MAC address - * block to check the support for the alternative WWNN/WWPN prefix support. - **/ -s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix) -{ - u16 offset, caps; - u16 alt_san_mac_blk_offset; - - DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); - - /* clear output first */ - *wwnn_prefix = 0xFFFF; - *wwpn_prefix = 0xFFFF; - - /* check if alternative SAN MAC is supported */ - offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; - if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) - goto wwn_prefix_err; - - if ((alt_san_mac_blk_offset == 0) || - (alt_san_mac_blk_offset == 0xFFFF)) - goto wwn_prefix_out; - - /* check capability in alternative san mac address block */ - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; - if (hw->eeprom.ops.read(hw, offset, &caps)) - goto wwn_prefix_err; - if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) - goto wwn_prefix_out; - - /* get the corresponding prefix for WWNN/WWPN */ - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; - if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", offset); - } - - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; - if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) - goto wwn_prefix_err; - -wwn_prefix_out: - return IXGBE_SUCCESS; - -wwn_prefix_err: - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", offset); - return IXGBE_SUCCESS; -} - -/** - * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM - * @hw: pointer to hardware structure - * @bs: the fcoe boot status - * - * This function will read the FCOE boot status from the iSCSI FCOE block - **/ -s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) -{ - u16 offset, caps, flags; - s32 status; - - DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); - - /* clear output first */ - *bs = ixgbe_fcoe_bootstatus_unavailable; - - /* check if FCOE IBA block is present */ - offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; - status = hw->eeprom.ops.read(hw, offset, &caps); - if (status != IXGBE_SUCCESS) - goto out; - - if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) - goto out; - - /* check if iSCSI FCOE block is populated */ - status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); - if (status != IXGBE_SUCCESS) - goto out; - - if ((offset == 0) || (offset == 0xFFFF)) - goto out; - - /* read fcoe flags in iSCSI FCOE block */ - offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; - status = hw->eeprom.ops.read(hw, offset, &flags); - if (status != IXGBE_SUCCESS) - goto out; - - if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) - *bs = ixgbe_fcoe_bootstatus_enabled; - else - *bs = ixgbe_fcoe_bootstatus_disabled; - -out: - return status; -} - -/** - * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for MAC anti-spoofing - * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing - * - **/ -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) -{ - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8; - u32 pfvfspoof; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - if (enable) - pfvfspoof |= (1 << vf_target_shift); - else - pfvfspoof &= ~(1 << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); -} - -/** - * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for VLAN anti-spoofing - * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing - * - **/ -void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) -{ - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; - u32 pfvfspoof; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - if (enable) - pfvfspoof |= (1 << vf_target_shift); - else - pfvfspoof &= ~(1 << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); -} - -/** - * ixgbe_get_device_caps_generic - Get additional device capabilities - * @hw: pointer to hardware structure - * @device_caps: the EEPROM word with the extra device capabilities - * - * This function will read the EEPROM location for the device capabilities, - * and return the word through device_caps. - **/ -s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) -{ - DEBUGFUNC("ixgbe_get_device_caps_generic"); - - hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_calculate_checksum - Calculate checksum for buffer - * @buffer: pointer to EEPROM - * @length: size of EEPROM to calculate a checksum for - * Calculates the checksum for some buffer on a specified length. The - * checksum calculated is returned. - **/ -u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) -{ - u32 i; - u8 sum = 0; - - DEBUGFUNC("ixgbe_calculate_checksum"); - - if (!buffer) - return 0; - - for (i = 0; i < length; i++) - sum += buffer[i]; - - return (u8) (0 - sum); -} - -/** - * ixgbe_hic_unlocked - Issue command to manageability block unlocked - * @hw: pointer to the HW structure - * @buffer: command to write and where the return status will be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion - * - * Communicates with the manageability block. On success return IXGBE_SUCCESS - * else returns semaphore error when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - * - * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held - * by the caller. - **/ -s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, - u32 timeout) -{ - u32 hicr, i, fwsts; - u16 dword_len; - - DEBUGFUNC("ixgbe_hic_unlocked"); - - if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } - - /* Set bit 9 of FWSTS clearing FW reset indication */ - fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); - IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); - - /* Check that the host interface is enabled. */ - hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if (!(hicr & IXGBE_HICR_EN)) { - DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } - - /* Calculate length in DWORDs. We must be DWORD aligned */ - if (length % sizeof(u32)) { - DEBUGOUT("Buffer length failure, not aligned to dword"); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - dword_len = length >> 2; - - /* The device driver writes the relevant command block - * into the ram area. - */ - for (i = 0; i < dword_len; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, IXGBE_CPU_TO_LE32(buffer[i])); - - /* Setting this bit tells the ARC that a new command is pending. */ - IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); - - for (i = 0; i < timeout; i++) { - hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if (!(hicr & IXGBE_HICR_C)) - break; - msec_delay(1); - } - - /* Check command completion */ - if ((timeout && i == timeout) || - !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { - ERROR_REPORT1(IXGBE_ERROR_CAUTION, - "Command has failed with no status valid.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_host_interface_command - Issue command to manageability block - * @hw: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (true) or not (false) - * Needed because FW structures are big endian and decoding of - * these fields can be 8 bit or 16 bit based on command. Decoding - * is not easily understood without making a table of commands. - * So we will leave this up to the caller to read back the data - * in these cases. - * - * Communicates with the manageability block. On success return IXGBE_SUCCESS - * else returns semaphore error when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length, u32 timeout, bool return_data) -{ - u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u16 dword_len; - u16 buf_len; - s32 status; - u32 bi; - - DEBUGFUNC("ixgbe_host_interface_command"); - - if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; - } - - /* Take management host interface semaphore */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); - if (status) - return status; - - status = ixgbe_hic_unlocked(hw, buffer, length, timeout); - if (status) - goto rel_out; - - if (!return_data) - goto rel_out; - - /* Calculate length in DWORDs */ - dword_len = hdr_size >> 2; - - /* first pull in the header so we know the buffer length */ - for (bi = 0; bi < dword_len; bi++) { - buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - IXGBE_LE32_TO_CPUS(&buffer[bi]); - } - - /* If there is any thing in data position pull it in */ - buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; - if (!buf_len) - goto rel_out; - - if (length < buf_len + hdr_size) { - DEBUGOUT("Buffer not large enough for reply message.\n"); - status = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto rel_out; - } - - /* Calculate length in DWORDs, add 3 for odd lengths */ - dword_len = (buf_len + 3) >> 2; - - /* Pull in the rest of the buffer (bi is where we left off) */ - for (; bi <= dword_len; bi++) { - buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - IXGBE_LE32_TO_CPUS(&buffer[bi]); - } - -rel_out: - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); - - return status; -} - -/** - * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware - * @hw: pointer to the HW structure - * @maj: driver version major number - * @min: driver version minor number - * @build: driver version build number - * @sub: driver version sub build number - * - * Sends driver version number to firmware through the manageability - * block. On success return IXGBE_SUCCESS - * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - **/ -s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub, u16 len, - const char *driver_ver) -{ - struct ixgbe_hic_drv_info fw_cmd; - int i; - s32 ret_val = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); - UNREFERENCED_2PARAMETER(len, driver_ver); - - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; - fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; - fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = (u8)hw->bus.func; - fw_cmd.ver_maj = maj; - fw_cmd.ver_min = min; - fw_cmd.ver_build = build; - fw_cmd.ver_sub = sub; - fw_cmd.hdr.checksum = 0; - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); - fw_cmd.pad = 0; - fw_cmd.pad2 = 0; - - for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(fw_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (ret_val != IXGBE_SUCCESS) - continue; - - if (fw_cmd.hdr.cmd_or_resp.ret_status == - FW_CEM_RESP_STATUS_SUCCESS) - ret_val = IXGBE_SUCCESS; - else - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - - break; - } - - return ret_val; -} - -/** - * ixgbe_set_rxpba_generic - Initialize Rx packet buffer - * @hw: pointer to hardware structure - * @num_pb: number of packet buffers to allocate - * @headroom: reserve n KB of headroom - * @strategy: packet buffer allocation strategy - **/ -void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, - int strategy) -{ - u32 pbsize = hw->mac.rx_pb_size; - int i = 0; - u32 rxpktsize, txpktsize, txpbthresh; - - /* Reserve headroom */ - pbsize -= headroom; - - if (!num_pb) - num_pb = 1; - - /* Divide remaining packet buffer space amongst the number of packet - * buffers requested using supplied strategy. - */ - switch (strategy) { - case PBA_STRATEGY_WEIGHTED: - /* ixgbe_dcb_pba_80_48 strategy weight first half of packet - * buffer with 5/8 of the packet buffer space. - */ - rxpktsize = (pbsize * 5) / (num_pb * 4); - pbsize -= rxpktsize * (num_pb / 2); - rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; - for (; i < (num_pb / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - /* fall through - configure remaining packet buffers */ - case PBA_STRATEGY_EQUAL: - rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; - for (; i < num_pb; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - break; - default: - break; - } - - /* Only support an equally distributed Tx packet buffer strategy. */ - txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; - txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; - for (i = 0; i < num_pb; i++) { - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); - IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); - } - - /* Clear unused TCs, if any, to zero buffer size*/ - for (; i < IXGBE_MAX_PB; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); - } -} - -/** - * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo - * @hw: pointer to the hardware structure - * - * The 82599 and x540 MACs can experience issues if TX work is still pending - * when a reset occurs. This function prevents this by flushing the PCIe - * buffers on the system. - **/ -void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) -{ - u32 gcr_ext, hlreg0, i, poll; - u16 value; - - /* - * If double reset is not requested then all transactions should - * already be clear and as such there is no work to do - */ - if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) - return; - - /* - * Set loopback enable to prevent any transmits from being sent - * should the link come up. This assumes that the RXCTRL.RXEN bit - * has already been cleared. - */ - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); - - /* Wait for a last completion before clearing buffers */ - IXGBE_WRITE_FLUSH(hw); - msec_delay(3); - - /* - * Before proceeding, make sure that the PCIe block does not have - * transactions pending. - */ - poll = ixgbe_pcie_timeout_poll(hw); - for (i = 0; i < poll; i++) { - usec_delay(100); - value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); - if (IXGBE_REMOVED(hw->hw_addr)) - goto out; - if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) - goto out; - } - -out: - /* initiate cleaning flow for buffers in the PCIe transaction layer */ - gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, - gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); - - /* Flush all writes and allow 20usec for all transactions to clear */ - IXGBE_WRITE_FLUSH(hw); - usec_delay(20); - - /* restore previous register values */ - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); -} - -STATIC const u8 ixgbe_emc_temp_data[4] = { - IXGBE_EMC_INTERNAL_DATA, - IXGBE_EMC_DIODE1_DATA, - IXGBE_EMC_DIODE2_DATA, - IXGBE_EMC_DIODE3_DATA -}; -STATIC const u8 ixgbe_emc_therm_limit[4] = { - IXGBE_EMC_INTERNAL_THERM_LIMIT, - IXGBE_EMC_DIODE1_THERM_LIMIT, - IXGBE_EMC_DIODE2_THERM_LIMIT, - IXGBE_EMC_DIODE3_THERM_LIMIT -}; - -/** - * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data - * @hw: pointer to hardware structure - * @data: pointer to the thermal sensor data structure - * - * Returns the thermal sensor data structure - **/ -s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u16 ets_offset; - u16 ets_cfg; - u16 ets_sensor; - u8 num_sensors; - u8 sensor_index; - u8 sensor_location; - u8 i; - struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; - - DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic"); - - /* Only support thermal sensors attached to 82599 physical port 0 */ - if ((hw->mac.type != ixgbe_mac_82599EB) || - (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { - status = IXGBE_NOT_IMPLEMENTED; - goto out; - } - - status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset); - if (status) - goto out; - - if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) { - status = IXGBE_NOT_IMPLEMENTED; - goto out; - } - - status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg); - if (status) - goto out; - - if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) - != IXGBE_ETS_TYPE_EMC) { - status = IXGBE_NOT_IMPLEMENTED; - goto out; - } - - num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); - if (num_sensors > IXGBE_MAX_SENSORS) - num_sensors = IXGBE_MAX_SENSORS; - - for (i = 0; i < num_sensors; i++) { - status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), - &ets_sensor); - if (status) - goto out; - - sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> - IXGBE_ETS_DATA_INDEX_SHIFT); - sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> - IXGBE_ETS_DATA_LOC_SHIFT); - - if (sensor_location != 0) { - status = hw->phy.ops.read_i2c_byte(hw, - ixgbe_emc_temp_data[sensor_index], - IXGBE_I2C_THERMAL_SENSOR_ADDR, - &data->sensor[i].temp); - if (status) - goto out; - } - } -out: - return status; -} - -/** - * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds - * @hw: pointer to hardware structure - * - * Inits the thermal sensor thresholds according to the NVM map - * and save off the threshold and location values into mac.thermal_sensor_data - **/ -s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u16 offset; - u16 ets_offset; - u16 ets_cfg; - u16 ets_sensor; - u8 low_thresh_delta; - u8 num_sensors; - u8 sensor_index; - u8 sensor_location; - u8 therm_limit; - u8 i; - struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; - - DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic"); - - memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); - - /* Only support thermal sensors attached to 82599 physical port 0 */ - if ((hw->mac.type != ixgbe_mac_82599EB) || - (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) - return IXGBE_NOT_IMPLEMENTED; - - offset = IXGBE_ETS_CFG; - if (hw->eeprom.ops.read(hw, offset, &ets_offset)) - goto eeprom_err; - if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) - return IXGBE_NOT_IMPLEMENTED; - - offset = ets_offset; - if (hw->eeprom.ops.read(hw, offset, &ets_cfg)) - goto eeprom_err; - if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) - != IXGBE_ETS_TYPE_EMC) - return IXGBE_NOT_IMPLEMENTED; - - low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> - IXGBE_ETS_LTHRES_DELTA_SHIFT); - num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); - - for (i = 0; i < num_sensors; i++) { - offset = ets_offset + 1 + i; - if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", - offset); - continue; - } - sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> - IXGBE_ETS_DATA_INDEX_SHIFT); - sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> - IXGBE_ETS_DATA_LOC_SHIFT); - therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; - - hw->phy.ops.write_i2c_byte(hw, - ixgbe_emc_therm_limit[sensor_index], - IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); - - if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) { - data->sensor[i].location = sensor_location; - data->sensor[i].caution_thresh = therm_limit; - data->sensor[i].max_op_thresh = therm_limit - - low_thresh_delta; - } - } - return status; - -eeprom_err: - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", offset); - return IXGBE_NOT_IMPLEMENTED; -} - -/** - * ixgbe_get_orom_version - Return option ROM from EEPROM - * - * @hw: pointer to hardware structure - * @nvm_ver: pointer to output structure - * - * if valid option ROM version, nvm_ver->or_valid set to true - * else nvm_ver->or_valid is false. - **/ -void ixgbe_get_orom_version(struct ixgbe_hw *hw, - struct ixgbe_nvm_version *nvm_ver) -{ - u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; - - nvm_ver->or_valid = false; - /* Option Rom may or may not be present. Start with pointer */ - hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); - - /* make sure offset is valid */ - if ((offset == 0x0) || (offset == NVM_INVALID_PTR)) - return; - - hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); - hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); - - /* option rom exists and is valid */ - if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || - eeprom_cfg_blkl == NVM_VER_INVALID || - eeprom_cfg_blkh == NVM_VER_INVALID) - return; - - nvm_ver->or_valid = true; - nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; - nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | - (eeprom_cfg_blkh >> NVM_OROM_SHIFT); - nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; -} - -/** - * ixgbe_get_oem_prod_version - Return OEM Product version - * - * @hw: pointer to hardware structure - * @nvm_ver: pointer to output structure - * - * if valid OEM product version, nvm_ver->oem_valid set to true - * else nvm_ver->oem_valid is false. - **/ -void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, - struct ixgbe_nvm_version *nvm_ver) -{ - u16 rel_num, prod_ver, mod_len, cap, offset; - - nvm_ver->oem_valid = false; - hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); - - /* Return is offset to OEM Product Version block is invalid */ - if (offset == 0x0 && offset == NVM_INVALID_PTR) - return; - - /* Read product version block */ - hw->eeprom.ops.read(hw, offset, &mod_len); - hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); - - /* Return if OEM product version block is invalid */ - if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || - (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) - return; - - hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); - hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); - - /* Return if version is invalid */ - if ((rel_num | prod_ver) == 0x0 || - rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) - return; - - nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; - nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; - nvm_ver->oem_release = rel_num; - nvm_ver->oem_valid = true; -} - -/** - * ixgbe_get_etk_id - Return Etrack ID from EEPROM - * - * @hw: pointer to hardware structure - * @nvm_ver: pointer to output structure - * - * word read errors will return 0xFFFF - **/ -void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) -{ - u16 etk_id_l, etk_id_h; - - if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) - etk_id_l = NVM_VER_INVALID; - if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) - etk_id_h = NVM_VER_INVALID; - - /* The word order for the version format is determined by high order - * word bit 15. - */ - if ((etk_id_h & NVM_ETK_VALID) == 0) { - nvm_ver->etk_id = etk_id_h; - nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); - } else { - nvm_ver->etk_id = etk_id_l; - nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); - } -} - -/** - * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg - * @hw: pointer to hardware structure - * @map: pointer to u8 arr for returning map - * - * Read the rtrup2tc HW register and resolve its content into map - **/ -void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) -{ - u32 reg, i; - - reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); - for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) - map[i] = IXGBE_RTRUP2TC_UP_MASK & - (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); - return; -} - -void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) -{ - u32 pfdtxgswc; - u32 rxctrl; - - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (rxctrl & IXGBE_RXCTRL_RXEN) { - if (hw->mac.type != ixgbe_mac_82598EB) { - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); - if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { - pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); - hw->mac.set_lben = true; - } else { - hw->mac.set_lben = false; - } - } - rxctrl &= ~IXGBE_RXCTRL_RXEN; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); - } -} - -void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) -{ - u32 pfdtxgswc; - u32 rxctrl; - - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); - - if (hw->mac.type != ixgbe_mac_82598EB) { - if (hw->mac.set_lben) { - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); - pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); - hw->mac.set_lben = false; - } - } -} - -/** - * ixgbe_mng_present - returns true when management capability is present - * @hw: pointer to hardware structure - */ -bool ixgbe_mng_present(struct ixgbe_hw *hw) -{ - u32 fwsm; - - if (hw->mac.type < ixgbe_mac_82599EB) - return false; - - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); - fwsm &= IXGBE_FWSM_MODE_MASK; - return fwsm == IXGBE_FWSM_FW_MODE_PT; -} - -/** - * ixgbe_mng_enabled - Is the manageability engine enabled? - * @hw: pointer to hardware structure - * - * Returns true if the manageability engine is enabled. - **/ -bool ixgbe_mng_enabled(struct ixgbe_hw *hw) -{ - u32 fwsm, manc, factps; - - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); - if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) - return false; - - manc = IXGBE_READ_REG(hw, IXGBE_MANC); - if (!(manc & IXGBE_MANC_RCV_TCO_EN)) - return false; - - if (hw->mac.type <= ixgbe_mac_X540) { - factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); - if (factps & IXGBE_FACTPS_MNGCG) - return false; - } - - return true; -} - -/** - * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Set the link speed in the MAC and/or PHY register and restarts link. - **/ -s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; - ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; - s32 status = IXGBE_SUCCESS; - u32 speedcnt = 0; - u32 i = 0; - bool autoneg, link_up = false; - - DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); - - /* Mask off requested but non-supported speeds */ - status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); - if (status != IXGBE_SUCCESS) - return status; - - speed &= link_speed; - - /* Try each speed one by one, highest priority first. We do this in - * software because 10Gb fiber doesn't support speed autonegotiation. - */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - speedcnt++; - highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: - ixgbe_set_rate_select_speed(hw, - IXGBE_LINK_SPEED_10GB_FULL); - break; - case ixgbe_media_type_fiber_qsfp: - /* QSFP module automatically detects MAC link speed */ - break; - default: - DEBUGOUT("Unexpected media type.\n"); - break; - } - - /* Allow module to change analog characteristics (1G->10G) */ - msec_delay(40); - - status = ixgbe_setup_mac_link(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg_wait_to_complete); - if (status != IXGBE_SUCCESS) - return status; - - /* Flap the Tx laser if it has not already been done */ - ixgbe_flap_tx_laser(hw); - - /* Wait for the controller to acquire link. Per IEEE 802.3ap, - * Section 73.10.2, we may have to wait up to 500ms if KR is - * attempted. 82599 uses the same timing for 10g SFI. - */ - for (i = 0; i < 5; i++) { - /* Wait for the link partner to also set speed */ - msec_delay(100); - - /* If we have link, just jump out */ - status = ixgbe_check_link(hw, &link_speed, - &link_up, false); - if (status != IXGBE_SUCCESS) - return status; - - if (link_up) - goto out; - } - } - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) { - speedcnt++; - if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) - highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; - - /* Set the module link speed */ - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: - ixgbe_set_rate_select_speed(hw, - IXGBE_LINK_SPEED_1GB_FULL); - break; - case ixgbe_media_type_fiber_qsfp: - /* QSFP module automatically detects link speed */ - break; - default: - DEBUGOUT("Unexpected media type.\n"); - break; - } - - /* Allow module to change analog characteristics (10G->1G) */ - msec_delay(40); - - status = ixgbe_setup_mac_link(hw, - IXGBE_LINK_SPEED_1GB_FULL, - autoneg_wait_to_complete); - if (status != IXGBE_SUCCESS) - return status; - - /* Flap the Tx laser if it has not already been done */ - ixgbe_flap_tx_laser(hw); - - /* Wait for the link partner to also set speed */ - msec_delay(100); - - /* If we have link, just jump out */ - status = ixgbe_check_link(hw, &link_speed, &link_up, false); - if (status != IXGBE_SUCCESS) - return status; - - if (link_up) - goto out; - } - - /* We didn't get link. Configure back to the highest speed we tried, - * (if there was more than one). We call ourselves back with just the - * single highest speed that the user requested. - */ - if (speedcnt > 1) - status = ixgbe_setup_mac_link_multispeed_fiber(hw, - highest_link_speed, - autoneg_wait_to_complete); - -out: - /* Set autoneg_advertised value based on input link speed */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - return status; -} - -/** - * ixgbe_set_soft_rate_select_speed - Set module link speed - * @hw: pointer to hardware structure - * @speed: link speed to set - * - * Set module link speed via the soft rate select. - */ -void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed) -{ - s32 status; - u8 rs, eeprom_data; - - switch (speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - /* one bit mask same as setting on */ - rs = IXGBE_SFF_SOFT_RS_SELECT_10G; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - rs = IXGBE_SFF_SOFT_RS_SELECT_1G; - break; - default: - DEBUGOUT("Invalid fixed module speed\n"); - return; - } - - /* Set RS0 */ - status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - &eeprom_data); - if (status) { - DEBUGOUT("Failed to read Rx Rate Select RS0\n"); - goto out; - } - - eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; - - status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - eeprom_data); - if (status) { - DEBUGOUT("Failed to write Rx Rate Select RS0\n"); - goto out; - } - - /* Set RS1 */ - status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - &eeprom_data); - if (status) { - DEBUGOUT("Failed to read Rx Rate Select RS1\n"); - goto out; - } - - eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; - - status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - eeprom_data); - if (status) { - DEBUGOUT("Failed to write Rx Rate Select RS1\n"); - goto out; - } -out: - return; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h deleted file mode 100644 index 4b657305974d..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_common.h +++ /dev/null @@ -1,171 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_COMMON_H_ -#define _IXGBE_COMMON_H_ - -#include "ixgbe_type.h" - -void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map); - -u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); -s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, - u32 pba_num_size); -s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); -s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); -void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status); -void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); -s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); - -s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); - -s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); -s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); -s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); -s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); -s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 *data); -s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); -s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, - u16 *checksum_val); -s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); -s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); - -s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, - u32 enable_addr); -s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, - ixgbe_mc_addr_itr func, bool clear); -s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, - u32 addr_count, ixgbe_mc_addr_itr func); -s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); -s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); -s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw); -s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw); - -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); -bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); -void ixgbe_fc_autoneg(struct ixgbe_hw *hw); -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw); - -s32 ixgbe_validate_mac_addr(u8 *mac_addr); -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); -s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); - -s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); -s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); - -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); - -s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); -s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); - -s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); -s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); -s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); -s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on, bool vlvf_bypass); -s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, u32 *vfta_delta, u32 vfta, - bool vlvf_bypass); -s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); -s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass); - -s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete); - -s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix); - -s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); -void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); -s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); -void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, - int strategy); -s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 ver, u16 len, const char *str); -u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length, u32 timeout, bool return_data); -s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout); -s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *); -s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity, - u32 (*data)[FW_PHY_ACT_DATA_COUNT]); -void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); - -extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); -extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); -bool ixgbe_mng_present(struct ixgbe_hw *hw); -bool ixgbe_mng_enabled(struct ixgbe_hw *hw); - -#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 -#define IXGBE_EMC_INTERNAL_DATA 0x00 -#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 -#define IXGBE_EMC_DIODE1_DATA 0x01 -#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 -#define IXGBE_EMC_DIODE2_DATA 0x23 -#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A -#define IXGBE_EMC_DIODE3_DATA 0x2A -#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30 - -s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); - -void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); -void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, - struct ixgbe_nvm_version *nvm_ver); -void ixgbe_get_orom_version(struct ixgbe_hw *hw, - struct ixgbe_nvm_version *nvm_ver); -void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); -void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed); -#endif /* IXGBE_COMMON */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c deleted file mode 100644 index 3eee95ce93c3..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.c +++ /dev/null @@ -1,718 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -#include "ixgbe_type.h" -#include "ixgbe_dcb.h" -#include "ixgbe_dcb_82598.h" -#include "ixgbe_dcb_82599.h" - -/** - * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class - * credits from the configured bandwidth percentages. Credits - * are the smallest unit programmable into the underlying - * hardware. The IEEE 802.1Qaz specification do not use bandwidth - * groups so this is much simplified from the CEE case. - */ -s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, - int max_frame_size) -{ - int min_percent = 100; - int min_credit, multiplier; - int i; - - min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / - IXGBE_DCB_CREDIT_QUANTUM; - - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - if (bw[i] < min_percent && bw[i]) - min_percent = bw[i]; - } - - multiplier = (min_credit / min_percent) + 1; - - /* Find out the hw credits for each TC */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL); - - if (val < min_credit) - val = min_credit; - refill[i] = (u16)val; - - max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit; - } - - return 0; -} - -/** - * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits - * @ixgbe_dcb_config: Struct containing DCB settings. - * @direction: Configuring either Tx or Rx. - * - * This function calculates the credits allocated to each traffic class. - * It should be called only after the rules are checked by - * ixgbe_dcb_check_config_cee(). - */ -s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config, - u32 max_frame_size, u8 direction) -{ - struct ixgbe_dcb_tc_path *p; - u32 min_multiplier = 0; - u16 min_percent = 100; - s32 ret_val = IXGBE_SUCCESS; - /* Initialization values default for Tx settings */ - u32 min_credit = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u16 link_percentage = 0; - u8 bw_percent = 0; - u8 i; - - if (dcb_config == NULL) { - ret_val = IXGBE_ERR_CONFIG; - goto out; - } - - min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / - IXGBE_DCB_CREDIT_QUANTUM; - - /* Find smallest link percentage */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[direction]; - bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; - link_percentage = p->bwg_percent; - - link_percentage = (link_percentage * bw_percent) / 100; - - if (link_percentage && link_percentage < min_percent) - min_percent = link_percentage; - } - - /* - * The ratio between traffic classes will control the bandwidth - * percentages seen on the wire. To calculate this ratio we use - * a multiplier. It is required that the refill credits must be - * larger than the max frame size so here we find the smallest - * multiplier that will allow all bandwidth percentages to be - * greater than the max frame size. - */ - min_multiplier = (min_credit / min_percent) + 1; - - /* Find out the link percentage for each TC first */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[direction]; - bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; - - link_percentage = p->bwg_percent; - /* Must be careful of integer division for very small nums */ - link_percentage = (link_percentage * bw_percent) / 100; - if (p->bwg_percent > 0 && link_percentage == 0) - link_percentage = 1; - - /* Save link_percentage for reference */ - p->link_percent = (u8)link_percentage; - - /* Calculate credit refill ratio using multiplier */ - credit_refill = min(link_percentage * min_multiplier, - (u32)IXGBE_DCB_MAX_CREDIT_REFILL); - - /* Refill at least minimum credit */ - if (credit_refill < min_credit) - credit_refill = min_credit; - - p->data_credits_refill = (u16)credit_refill; - - /* Calculate maximum credit for the TC */ - credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100; - - /* - * Adjustment based on rule checking, if the percentage - * of a TC is too small, the maximum credit may not be - * enough to send out a jumbo frame in data plane arbitration. - */ - if (credit_max < min_credit) - credit_max = min_credit; - - if (direction == IXGBE_DCB_TX_CONFIG) { - /* - * Adjustment based on rule checking, if the - * percentage of a TC is too small, the maximum - * credit may not be enough to send out a TSO - * packet in descriptor plane arbitration. - */ - if (credit_max && (credit_max < - IXGBE_DCB_MIN_TSO_CREDIT) - && (hw->mac.type == ixgbe_mac_82598EB)) - credit_max = IXGBE_DCB_MIN_TSO_CREDIT; - - dcb_config->tc_config[i].desc_credits_max = - (u16)credit_max; - } - - p->data_credits_max = (u16)credit_max; - } - -out: - return ret_val; -} - -/** - * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info - * @cfg: dcb configuration to unpack into hardware consumable fields - * @map: user priority to traffic class map - * @pfc_up: u8 to store user priority PFC bitmask - * - * This unpacks the dcb configuration PFC info which is stored per - * traffic class into a 8bit user priority bitmask that can be - * consumed by hardware routines. The priority to tc map must be - * updated before calling this routine to use current up-to maps. - */ -void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) -{ - struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; - int up; - - /* - * If the TC for this user priority has PFC enabled then set the - * matching bit in 'pfc_up' to reflect that PFC is enabled. - */ - for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { - if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled) - *pfc_up |= 1 << up; - } -} - -void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction, - u16 *refill) -{ - struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; - int tc; - - for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) - refill[tc] = tc_config[tc].path[direction].data_credits_refill; -} - -void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max) -{ - struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; - int tc; - - for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) - max[tc] = tc_config[tc].desc_credits_max; -} - -void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction, - u8 *bwgid) -{ - struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; - int tc; - - for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) - bwgid[tc] = tc_config[tc].path[direction].bwg_id; -} - -void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction, - u8 *tsa) -{ - struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; - int tc; - - for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) - tsa[tc] = tc_config[tc].path[direction].tsa; -} - -u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) -{ - struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; - u8 prio_mask = 1 << up; - u8 tc = cfg->num_tcs.pg_tcs; - - /* If tc is 0 then DCB is likely not enabled or supported */ - if (!tc) - goto out; - - /* - * Test from maximum TC to 1 and report the first match we find. If - * we find no match we can assume that the TC is 0 since the TC must - * be set for all user priorities - */ - for (tc--; tc; tc--) { - if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) - break; - } -out: - return tc; -} - -void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction, - u8 *map) -{ - u8 up; - - for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) - map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); -} - -/** - * ixgbe_dcb_config - Struct containing DCB settings. - * @dcb_config: Pointer to DCB config structure - * - * This function checks DCB rules for DCB settings. - * The following rules are checked: - * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. - * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth - * Group must total 100. - * 3. A Traffic Class should not be set to both Link Strict Priority - * and Group Strict Priority. - * 4. Link strict Bandwidth Groups can only have link strict traffic classes - * with zero bandwidth. - */ -s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config) -{ - struct ixgbe_dcb_tc_path *p; - s32 ret_val = IXGBE_SUCCESS; - u8 i, j, bw = 0, bw_id; - u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP]; - bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP]; - - memset(bw_sum, 0, sizeof(bw_sum)); - memset(link_strict, 0, sizeof(link_strict)); - - /* First Tx, then Rx */ - for (i = 0; i < 2; i++) { - /* Check each traffic class for rule violation */ - for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { - p = &dcb_config->tc_config[j].path[i]; - - bw = p->bwg_percent; - bw_id = p->bwg_id; - - if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) { - ret_val = IXGBE_ERR_CONFIG; - goto err_config; - } - if (p->tsa == ixgbe_dcb_tsa_strict) { - link_strict[i][bw_id] = true; - /* Link strict should have zero bandwidth */ - if (bw) { - ret_val = IXGBE_ERR_CONFIG; - goto err_config; - } - } else if (!bw) { - /* - * Traffic classes without link strict - * should have non-zero bandwidth. - */ - ret_val = IXGBE_ERR_CONFIG; - goto err_config; - } - bw_sum[i][bw_id] += bw; - } - - bw = 0; - - /* Check each bandwidth group for rule violation */ - for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) { - bw += dcb_config->bw_percentage[i][j]; - /* - * Sum of bandwidth percentages of all traffic classes - * within a Bandwidth Group must total 100 except for - * link strict group (zero bandwidth). - */ - if (link_strict[i][j]) { - if (bw_sum[i][j]) { - /* - * Link strict group should have zero - * bandwidth. - */ - ret_val = IXGBE_ERR_CONFIG; - goto err_config; - } - } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT && - bw_sum[i][j] != 0) { - ret_val = IXGBE_ERR_CONFIG; - goto err_config; - } - } - - if (bw != IXGBE_DCB_BW_PERCENT) { - ret_val = IXGBE_ERR_CONFIG; - goto err_config; - } - } - -err_config: - - return ret_val; -} - -/** - * ixgbe_dcb_get_tc_stats - Returns status of each traffic class - * @hw: pointer to hardware structure - * @stats: pointer to statistics structure - * @tc_count: Number of elements in bwg_array. - * - * This function returns the status data for each of the Traffic Classes in use. - */ -s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, - u8 tc_count) -{ - s32 ret = IXGBE_NOT_IMPLEMENTED; - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); - break; - default: - break; - } - return ret; -} - -/** - * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class - * @hw: pointer to hardware structure - * @stats: pointer to statistics structure - * @tc_count: Number of elements in bwg_array. - * - * This function returns the CBFC status data for each of the Traffic Classes. - */ -s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, - u8 tc_count) -{ - s32 ret = IXGBE_NOT_IMPLEMENTED; - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); - break; - default: - break; - } - return ret; -} - -/** - * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Rx Data Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) -{ - s32 ret = IXGBE_NOT_IMPLEMENTED; - u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; - u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; - u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; - u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; - u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; - - ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max_cee(dcb_config, max); - ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); - ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); - ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid, - tsa, map); - break; - default: - break; - } - return ret; -} - -/** - * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Tx Descriptor Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) -{ - s32 ret = IXGBE_NOT_IMPLEMENTED; - u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - - ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max_cee(dcb_config, max); - ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); - ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, - bwgid, tsa); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, - bwgid, tsa); - break; - default: - break; - } - return ret; -} - -/** - * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Tx Data Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) -{ - s32 ret = IXGBE_NOT_IMPLEMENTED; - u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; - u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - - ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max_cee(dcb_config, max); - ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); - ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); - ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, - bwgid, tsa); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, - bwgid, tsa, - map); - break; - default: - break; - } - return ret; -} - -/** - * ixgbe_dcb_config_pfc_cee - Config priority flow control - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Priority Flow Control for each traffic class. - */ -s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) -{ - s32 ret = IXGBE_NOT_IMPLEMENTED; - u8 pfc_en; - u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; - - ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); - ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); - break; - default: - break; - } - return ret; -} - -/** - * ixgbe_dcb_config_tc_stats - Config traffic class statistics - * @hw: pointer to hardware structure - * - * Configure queue statistics registers, all queues belonging to same traffic - * class uses a single set of queue statistics counters. - */ -s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) -{ - s32 ret = IXGBE_NOT_IMPLEMENTED; - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_config_tc_stats_82598(hw); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL); - break; - default: - break; - } - return ret; -} - -/** - * ixgbe_dcb_hw_config_cee - Config and enable DCB - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure dcb settings and enable dcb mode. - */ -s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) -{ - s32 ret = IXGBE_NOT_IMPLEMENTED; - u8 pfc_en; - u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; - u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - - /* Unpack CEE standard containers */ - ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max_cee(dcb_config, max); - ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); - ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); - ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed, - refill, max, bwgid, tsa); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ixgbe_dcb_config_82599(hw, dcb_config); - ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed, - refill, max, bwgid, - tsa, map); - - ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); - break; - default: - break; - } - - if (!ret && dcb_config->pfc_mode_enable) { - ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); - ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); - } - - return ret; -} - -/* Helper routines to abstract HW specifics from DCB netlink ops */ -s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) -{ - int ret = IXGBE_ERR_PARAM; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); - break; - default: - break; - } - return ret; -} - -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, - u8 *bwg_id, u8 *tsa, u8 *map) -{ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, - tsa); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, - tsa); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, - tsa, map); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, - tsa); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, - tsa, map); - break; - default: - break; - } - return 0; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h deleted file mode 100644 index d19c3c277095..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb.h +++ /dev/null @@ -1,165 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_DCB_H_ -#define _IXGBE_DCB_H_ - -#include "ixgbe_type.h" - -/* DCB defines */ -/* DCB credit calculation defines */ -#define IXGBE_DCB_CREDIT_QUANTUM 64 -#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ -#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/ -#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL) - -/* 513 for 32KB TSO packet */ -#define IXGBE_DCB_MIN_TSO_CREDIT \ - ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1) - -/* DCB configuration defines */ -#define IXGBE_DCB_MAX_USER_PRIORITY 8 -#define IXGBE_DCB_MAX_BW_GROUP 8 -#define IXGBE_DCB_BW_PERCENT 100 - -#define IXGBE_DCB_TX_CONFIG 0 -#define IXGBE_DCB_RX_CONFIG 1 - -/* DCB capability defines */ -#define IXGBE_DCB_PG_SUPPORT 0x00000001 -#define IXGBE_DCB_PFC_SUPPORT 0x00000002 -#define IXGBE_DCB_BCN_SUPPORT 0x00000004 -#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 -#define IXGBE_DCB_GSP_SUPPORT 0x00000010 - -struct ixgbe_dcb_support { - u32 capabilities; /* DCB capabilities */ - - /* Each bit represents a number of TCs configurable in the hw. - * If 8 traffic classes can be configured, the value is 0x80. */ - u8 traffic_classes; - u8 pfc_traffic_classes; -}; - -enum ixgbe_dcb_tsa { - ixgbe_dcb_tsa_ets = 0, - ixgbe_dcb_tsa_group_strict_cee, - ixgbe_dcb_tsa_strict -}; - -/* Traffic class bandwidth allocation per direction */ -struct ixgbe_dcb_tc_path { - u8 bwg_id; /* Bandwidth Group (BWG) ID */ - u8 bwg_percent; /* % of BWG's bandwidth */ - u8 link_percent; /* % of link bandwidth */ - u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ - u16 data_credits_refill; /* Credit refill amount in 64B granularity */ - u16 data_credits_max; /* Max credits for a configured packet buffer - * in 64B granularity.*/ - enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */ -}; - -enum ixgbe_dcb_pfc { - ixgbe_dcb_pfc_disabled = 0, - ixgbe_dcb_pfc_enabled, - ixgbe_dcb_pfc_enabled_txonly, - ixgbe_dcb_pfc_enabled_rxonly -}; - -/* Traffic class configuration */ -struct ixgbe_dcb_tc_config { - struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */ - enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */ - - u16 desc_credits_max; /* For Tx Descriptor arbitration */ - u8 tc; /* Traffic class (TC) */ -}; - -enum ixgbe_dcb_pba { - /* PBA[0-7] each use 64KB FIFO */ - ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL, - /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ - ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED -}; - -struct ixgbe_dcb_num_tcs { - u8 pg_tcs; - u8 pfc_tcs; -}; - -struct ixgbe_dcb_config { - struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS]; - struct ixgbe_dcb_support support; - struct ixgbe_dcb_num_tcs num_tcs; - u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */ - bool pfc_mode_enable; - bool round_robin_enable; - - enum ixgbe_dcb_pba rx_pba_cfg; - - u32 dcb_cfg_version; /* Not used...OS-specific? */ - u32 link_speed; /* For bandwidth allocation validation purpose */ - bool vt_mode; -}; - -/* DCB driver APIs */ - -/* DCB rule checking */ -s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *); - -/* DCB credits calculation */ -s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int); -s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *, - struct ixgbe_dcb_config *, u32, u8); - -/* DCB PFC */ -s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *); -s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); - -/* DCB stats */ -s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); -s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); -s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); - -/* DCB config arbiters */ -s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *, - struct ixgbe_dcb_config *); -s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *, - struct ixgbe_dcb_config *); -s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *, - struct ixgbe_dcb_config *); - -/* DCB unpack routines */ -void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *); -void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *); -void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *); -void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *); -void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *); -void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *); -u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); - -/* DCB initialization */ -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *); -s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); -#endif /* _IXGBE_DCB_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c deleted file mode 100644 index b1d8df9f10b1..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.c +++ /dev/null @@ -1,350 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -#include "ixgbe_type.h" -#include "ixgbe_dcb.h" -#include "ixgbe_dcb_82598.h" - -/** - * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class - * @hw: pointer to hardware structure - * @stats: pointer to statistics structure - * @tc_count: Number of elements in bwg_array. - * - * This function returns the status data for each of the Traffic Classes in use. - */ -s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, - struct ixgbe_hw_stats *stats, - u8 tc_count) -{ - int tc; - - DEBUGFUNC("dcb_get_tc_stats"); - - if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) - return IXGBE_ERR_PARAM; - - /* Statistics pertaining to each traffic class */ - for (tc = 0; tc < tc_count; tc++) { - /* Transmitted Packets */ - stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); - /* Transmitted Bytes */ - stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); - /* Received Packets */ - stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); - /* Received Bytes */ - stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); - -#if 0 - /* Can we get rid of these?? Consequently, getting rid - * of the tc_stats structure. - */ - tc_stats_array[up]->in_overflow_discards = 0; - tc_stats_array[up]->out_overflow_discards = 0; -#endif - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data - * @hw: pointer to hardware structure - * @stats: pointer to statistics structure - * @tc_count: Number of elements in bwg_array. - * - * This function returns the CBFC status data for each of the Traffic Classes. - */ -s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, - struct ixgbe_hw_stats *stats, - u8 tc_count) -{ - int tc; - - DEBUGFUNC("dcb_get_pfc_stats"); - - if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) - return IXGBE_ERR_PARAM; - - for (tc = 0; tc < tc_count; tc++) { - /* Priority XOFF Transmitted */ - stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); - /* Priority XOFF Received */ - stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Rx Data Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, - u16 *max, u8 *tsa) -{ - u32 reg = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u8 i = 0; - - reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; - IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - /* Enable Arbiter */ - reg &= ~IXGBE_RMCS_ARBDIS; - /* Enable Receive Recycle within the BWG */ - reg |= IXGBE_RMCS_RRM; - /* Enable Deficit Fixed Priority arbitration*/ - reg |= IXGBE_RMCS_DFP; - - IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - credit_refill = refill[i]; - credit_max = max[i]; - - reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); - - if (tsa[i] == ixgbe_dcb_tsa_strict) - reg |= IXGBE_RT2CR_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); - } - - reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - reg |= IXGBE_RDRXCTL_RDMTS_1_2; - reg |= IXGBE_RDRXCTL_MPBEN; - reg |= IXGBE_RDRXCTL_MCEN; - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - /* Make sure there is enough descriptors before arbitration */ - reg &= ~IXGBE_RXCTRL_DMBYPS; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Tx Descriptor Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, u16 *max, u8 *bwg_id, - u8 *tsa) -{ - u32 reg, max_credits; - u8 i; - - reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); - - /* Enable arbiter */ - reg &= ~IXGBE_DPMCS_ARBDIS; - reg |= IXGBE_DPMCS_TSOEF; - - /* Configure Max TSO packet size 34KB including payload and headers */ - reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); - - IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - max_credits = max[i]; - reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; - reg |= refill[i]; - reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; - - if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) - reg |= IXGBE_TDTQ2TCCR_GSP; - - if (tsa[i] == ixgbe_dcb_tsa_strict) - reg |= IXGBE_TDTQ2TCCR_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Tx Data Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, u16 *max, u8 *bwg_id, - u8 *tsa) -{ - u32 reg; - u8 i; - - reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); - /* Enable Data Plane Arbiter */ - reg &= ~IXGBE_PDPMCS_ARBDIS; - /* Enable DFP and Transmit Recycle Mode */ - reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); - - IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - reg = refill[i]; - reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; - reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; - - if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) - reg |= IXGBE_TDPT2TCCR_GSP; - - if (tsa[i] == ixgbe_dcb_tsa_strict) - reg |= IXGBE_TDPT2TCCR_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); - } - - /* Enable Tx packet buffer division */ - reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); - reg |= IXGBE_DTXCTL_ENDBUBD; - IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_pfc_82598 - Config priority flow control - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Priority Flow Control for each traffic class. - */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) -{ - u32 fcrtl, reg; - u8 i; - - /* Enable Transmit Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - reg &= ~IXGBE_RMCS_TFCE_802_3X; - reg |= IXGBE_RMCS_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); - - /* Enable Receive Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE); - - if (pfc_en) - reg |= IXGBE_FCTRL_RPFCE; - - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); - - /* Configure PFC Tx thresholds per TC */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - if (!(pfc_en & (1 << i))) { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); - continue; - } - - fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; - reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); - } - - /* Configure pause time */ - reg = hw->fc.pause_time | (hw->fc.pause_time << 16); - for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics - * @hw: pointer to hardware structure - * - * Configure queue statistics registers, all queues belonging to same traffic - * class uses a single set of queue statistics counters. - */ -s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) -{ - u32 reg = 0; - u8 i = 0; - u8 j = 0; - - /* Receive Queues stats setting - 8 queues per statistics reg */ - for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { - reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); - reg |= ((0x1010101) * j); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); - reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); - reg |= ((0x1010101) * j); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); - } - /* Transmit Queues stats setting - 4 queues per statistics reg*/ - for (i = 0; i < 8; i++) { - reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); - reg |= ((0x1010101) * i); - IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_hw_config_82598 - Config and enable DCB - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure dcb settings and enable dcb mode. - */ -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed, - u16 *refill, u16 *max, u8 *bwg_id, - u8 *tsa) -{ - UNREFERENCED_1PARAMETER(link_speed); - - ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, - tsa); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, - tsa); - ixgbe_dcb_config_tc_stats_82598(hw); - - return IXGBE_SUCCESS; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h deleted file mode 100644 index d340a691d7d2..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82598.h +++ /dev/null @@ -1,90 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_DCB_82598_H_ -#define _IXGBE_DCB_82598_H_ - -/* DCB register definitions */ - -#define IXGBE_DPMCS_MTSOS_SHIFT 16 -#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, - * 1 DFP - Deficit Fixed Priority */ -#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ -#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ - -#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ - -#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ -#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ - -#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet - * buffers enable */ -#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores - * (RSS) enable */ - -#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 -#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 -#define IXGBE_TDTQ2TCCR_GSP 0x40000000 -#define IXGBE_TDTQ2TCCR_LSP 0x80000000 - -#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 -#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 -#define IXGBE_TDPT2TCCR_GSP 0x40000000 -#define IXGBE_TDPT2TCCR_LSP 0x80000000 - -#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, - * 1 DFP - Deficit Fixed Priority */ -#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ -#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ - -#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ - -#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ -#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ -#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ -#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ - -/* DCB driver APIs */ - -/* DCB PFC */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8); - -/* DCB stats */ -s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *); -s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, - struct ixgbe_hw_stats *, u8); -s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, - struct ixgbe_hw_stats *, u8); - -/* DCB config arbiters */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, - u8 *, u8 *); -s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, - u8 *, u8 *); -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *); - -/* DCB initialization */ -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *); -#endif /* _IXGBE_DCB_82958_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c deleted file mode 100644 index b0c5e523093b..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.c +++ /dev/null @@ -1,584 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -#include "ixgbe_type.h" -#include "ixgbe_dcb.h" -#include "ixgbe_dcb_82599.h" - -/** - * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class - * @hw: pointer to hardware structure - * @stats: pointer to statistics structure - * @tc_count: Number of elements in bwg_array. - * - * This function returns the status data for each of the Traffic Classes in use. - */ -s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, - struct ixgbe_hw_stats *stats, - u8 tc_count) -{ - int tc; - - DEBUGFUNC("dcb_get_tc_stats"); - - if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) - return IXGBE_ERR_PARAM; - - /* Statistics pertaining to each traffic class */ - for (tc = 0; tc < tc_count; tc++) { - /* Transmitted Packets */ - stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); - /* Transmitted Bytes (read low first to prevent missed carry) */ - stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); - stats->qbtc[tc] += - (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); - /* Received Packets */ - stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); - /* Received Bytes (read low first to prevent missed carry) */ - stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); - stats->qbrc[tc] += - (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); - - /* Received Dropped Packet */ - stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data - * @hw: pointer to hardware structure - * @stats: pointer to statistics structure - * @tc_count: Number of elements in bwg_array. - * - * This function returns the CBFC status data for each of the Traffic Classes. - */ -s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, - struct ixgbe_hw_stats *stats, - u8 tc_count) -{ - int tc; - - DEBUGFUNC("dcb_get_pfc_stats"); - - if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) - return IXGBE_ERR_PARAM; - - for (tc = 0; tc < tc_count; tc++) { - /* Priority XOFF Transmitted */ - stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); - /* Priority XOFF Received */ - stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Rx Packet Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, - u16 *max, u8 *bwg_id, u8 *tsa, - u8 *map) -{ - u32 reg = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u8 i = 0; - - /* - * Disable the arbiter before changing parameters - * (always enable recycle mode; WSP) - */ - reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - - /* - * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding - * bits sets for the UPs that needs to be mappped to that TC. - * e.g if priorities 6 and 7 are to be mapped to a TC then the - * up_to_tc_bitmap value for that TC will be 11000000 in binary. - */ - reg = 0; - for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) - reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); - - IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - credit_refill = refill[i]; - credit_max = max[i]; - reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); - - reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; - - if (tsa[i] == ixgbe_dcb_tsa_strict) - reg |= IXGBE_RTRPT4C_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); - } - - /* - * Configure Rx packet plane (recycle mode; WSP) and - * enable arbiter - */ - reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; - IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Tx Descriptor Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, - u16 *max, u8 *bwg_id, u8 *tsa) -{ - u32 reg, max_credits; - u8 i; - - /* Clear the per-Tx queue credits; we use per-TC instead */ - for (i = 0; i < 128; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); - IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); - } - - /* Configure traffic class credits and priority */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - max_credits = max[i]; - reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; - reg |= refill[i]; - reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; - - if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) - reg |= IXGBE_RTTDT2C_GSP; - - if (tsa[i] == ixgbe_dcb_tsa_strict) - reg |= IXGBE_RTTDT2C_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); - } - - /* - * Configure Tx descriptor plane (recycle mode; WSP) and - * enable arbiter - */ - reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Tx Packet Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, - u16 *max, u8 *bwg_id, u8 *tsa, - u8 *map) -{ - u32 reg; - u8 i; - - /* - * Disable the arbiter before changing parameters - * (always enable recycle mode; SP; arb delay) - */ - reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | - (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | - IXGBE_RTTPCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); - - /* - * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding - * bits sets for the UPs that needs to be mappped to that TC. - * e.g if priorities 6 and 7 are to be mapped to a TC then the - * up_to_tc_bitmap value for that TC will be 11000000 in binary. - */ - reg = 0; - for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) - reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); - - IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - reg = refill[i]; - reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; - reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; - - if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) - reg |= IXGBE_RTTPT2C_GSP; - - if (tsa[i] == ixgbe_dcb_tsa_strict) - reg |= IXGBE_RTTPT2C_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); - } - - /* - * Configure Tx packet plane (recycle mode; SP; arb delay) and - * enable arbiter - */ - reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | - (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_pfc_82599 - Configure priority flow control - * @hw: pointer to hardware structure - * @pfc_en: enabled pfc bitmask - * @map: priority to tc assignments indexed by priority - * - * Configure Priority Flow Control (PFC) for each traffic class. - */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) -{ - u32 i, j, fcrtl, reg; - u8 max_tc = 0; - - /* Enable Transmit Priority Flow Control */ - IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); - - /* Enable Receive Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - reg |= IXGBE_MFLCN_DPF; - - /* - * X540 supports per TC Rx priority flow control. So - * clear all TCs and only enable those that should be - * enabled. - */ - reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); - - if (hw->mac.type >= ixgbe_mac_X540) - reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; - - if (pfc_en) - reg |= IXGBE_MFLCN_RPFCE; - - IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); - - for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { - if (map[i] > max_tc) - max_tc = map[i]; - } - - - /* Configure PFC Tx thresholds per TC */ - for (i = 0; i <= max_tc; i++) { - int enabled = 0; - - for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { - if ((map[j] == i) && (pfc_en & (1 << j))) { - enabled = 1; - break; - } - } - - if (enabled) { - reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; - fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); - } else { - /* - * In order to prevent Tx hangs when the internal Tx - * switch is enabled we must set the high water mark - * to the Rx packet buffer size - 24KB. This allows - * the Tx switch to function even under heavy Rx - * workloads. - */ - reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); - } - - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); - } - - for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); - } - - /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time | (hw->fc.pause_time << 16); - for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics - * @hw: pointer to hardware structure - * - * Configure queue statistics registers, all queues belonging to same traffic - * class uses a single set of queue statistics counters. - */ -s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) -{ - u32 reg = 0; - u8 i = 0; - u8 tc_count = 8; - bool vt_mode = false; - - if (dcb_config != NULL) { - tc_count = dcb_config->num_tcs.pg_tcs; - vt_mode = dcb_config->vt_mode; - } - - if (!((tc_count == 8 && vt_mode == false) || tc_count == 4)) - return IXGBE_ERR_PARAM; - - if (tc_count == 8 && vt_mode == false) { - /* - * Receive Queues stats setting - * 32 RQSMR registers, each configuring 4 queues. - * - * Set all 16 queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - */ - for (i = 0; i < 32; i++) { - reg = 0x01010101 * (i / 4); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); - } - /* - * Transmit Queues stats setting - * 32 TQSM registers, each controlling 4 queues. - * - * Set all queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - * Tx queues are allocated non-uniformly to TCs: - * 32, 32, 16, 16, 8, 8, 8, 8. - */ - for (i = 0; i < 32; i++) { - if (i < 8) - reg = 0x00000000; - else if (i < 16) - reg = 0x01010101; - else if (i < 20) - reg = 0x02020202; - else if (i < 24) - reg = 0x03030303; - else if (i < 26) - reg = 0x04040404; - else if (i < 28) - reg = 0x05050505; - else if (i < 30) - reg = 0x06060606; - else - reg = 0x07070707; - IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); - } - } else if (tc_count == 4 && vt_mode == false) { - /* - * Receive Queues stats setting - * 32 RQSMR registers, each configuring 4 queues. - * - * Set all 16 queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - */ - for (i = 0; i < 32; i++) { - if (i % 8 > 3) - /* In 4 TC mode, odd 16-queue ranges are - * not used. - */ - continue; - reg = 0x01010101 * (i / 8); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); - } - /* - * Transmit Queues stats setting - * 32 TQSM registers, each controlling 4 queues. - * - * Set all queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - * Tx queues are allocated non-uniformly to TCs: - * 64, 32, 16, 16. - */ - for (i = 0; i < 32; i++) { - if (i < 16) - reg = 0x00000000; - else if (i < 24) - reg = 0x01010101; - else if (i < 28) - reg = 0x02020202; - else - reg = 0x03030303; - IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); - } - } else if (tc_count == 4 && vt_mode == true) { - /* - * Receive Queues stats setting - * 32 RQSMR registers, each configuring 4 queues. - * - * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each - * pool. Set all 32 queues of each TC across pools to the same - * stat with TC 'n' going to stat 'n'. - */ - for (i = 0; i < 32; i++) - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); - /* - * Transmit Queues stats setting - * 32 TQSM registers, each controlling 4 queues. - * - * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each - * pool. Set all 32 queues of each TC across pools to the same - * stat with TC 'n' going to stat 'n'. - */ - for (i = 0; i < 32; i++) - IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_config_82599 - Configure general DCB parameters - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure general DCB parameters. - */ -s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) -{ - u32 reg; - u32 q; - - /* Disable the Tx desc arbiter so that MTQC can be changed */ - reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); - reg |= IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_MRQC); - if (dcb_config->num_tcs.pg_tcs == 8) { - /* Enable DCB for Rx with 8 TCs */ - switch (reg & IXGBE_MRQC_MRQE_MASK) { - case 0: - case IXGBE_MRQC_RT4TCEN: - /* RSS disabled cases */ - reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RT8TCEN; - break; - case IXGBE_MRQC_RSSEN: - case IXGBE_MRQC_RTRSS4TCEN: - /* RSS enabled cases */ - reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RTRSS8TCEN; - break; - default: - /* - * Unsupported value, assume stale data, - * overwrite no RSS - */ - ASSERT(0); - reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RT8TCEN; - } - } - if (dcb_config->num_tcs.pg_tcs == 4) { - /* We support both VT-on and VT-off with 4 TCs. */ - if (dcb_config->vt_mode) - reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_VMDQRT4TCEN; - else - reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RTRSS4TCEN; - } - IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); - - /* Enable DCB for Tx with 8 TCs */ - if (dcb_config->num_tcs.pg_tcs == 8) - reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - else { - /* We support both VT-on and VT-off with 4 TCs. */ - reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; - if (dcb_config->vt_mode) - reg |= IXGBE_MTQC_VT_ENA; - } - IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); - - /* Disable drop for all queues */ - for (q = 0; q < 128; q++) - IXGBE_WRITE_REG(hw, IXGBE_QDE, - (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); - - /* Enable the Tx desc arbiter */ - reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); - reg &= ~IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); - - /* Enable Security TX Buffer IFG for DCB */ - reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); - reg |= IXGBE_SECTX_DCB; - IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dcb_hw_config_82599 - Configure and enable DCB - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure dcb settings and enable dcb mode. - */ -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, - u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, - u8 *map) -{ - UNREFERENCED_1PARAMETER(link_speed); - - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, - map); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, - tsa); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, - tsa, map); - - return IXGBE_SUCCESS; -} - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h deleted file mode 100644 index 24be9065d90d..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_82599.h +++ /dev/null @@ -1,118 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_DCB_82599_H_ -#define _IXGBE_DCB_82599_H_ - -/* DCB register definitions */ -#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, - * 1 WSP - Weighted Strict Priority - */ -#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, - * 1 WRR - Weighted Round Robin - */ -#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ -#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ -#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must - * clear! - */ -#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ - -/* Receive UP2TC mapping */ -#define IXGBE_RTRUP2TC_UP_SHIFT 3 -#define IXGBE_RTRUP2TC_UP_MASK 7 -/* Transmit UP2TC mapping */ -#define IXGBE_RTTUP2TC_UP_SHIFT 3 - -#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ -#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ -#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ -#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ - -#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet - * buffers enable - */ -#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores - * (RSS) enable - */ - -/* RTRPCS Bit Masks */ -#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ -/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ -#define IXGBE_RTRPCS_RAC 0x00000004 -#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ - -/* RTTDT2C Bit Masks */ -#define IXGBE_RTTDT2C_MCL_SHIFT 12 -#define IXGBE_RTTDT2C_BWG_SHIFT 9 -#define IXGBE_RTTDT2C_GSP 0x40000000 -#define IXGBE_RTTDT2C_LSP 0x80000000 - -#define IXGBE_RTTPT2C_MCL_SHIFT 12 -#define IXGBE_RTTPT2C_BWG_SHIFT 9 -#define IXGBE_RTTPT2C_GSP 0x40000000 -#define IXGBE_RTTPT2C_LSP 0x80000000 - -/* RTTPCS Bit Masks */ -#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, - * 1 SP - Strict Priority - */ -#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ -#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ -#define IXGBE_RTTPCS_ARBD_SHIFT 22 -#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ - -#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ - -/* SECTXMINIFG DCB */ -#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */ - -/* DCB driver APIs */ - -/* DCB PFC */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *); - -/* DCB stats */ -s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *, - struct ixgbe_dcb_config *); -s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *, - struct ixgbe_hw_stats *, u8); -s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *, - struct ixgbe_hw_stats *, u8); - -/* DCB config arbiters */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, - u8 *, u8 *); -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, - u8 *, u8 *, u8 *); -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *, - u8 *, u8 *); - -/* DCB initialization */ -s32 ixgbe_dcb_config_82599(struct ixgbe_hw *, - struct ixgbe_dcb_config *); - -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, - u8 *, u8 *); -#endif /* _IXGBE_DCB_82959_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c deleted file mode 100644 index 20d9d05c76ea..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_dcb_nl.c +++ /dev/null @@ -1,898 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" - -#if IS_ENABLED(CONFIG_DCB) -#include -#include "ixgbe_dcb_82598.h" -#include "ixgbe_dcb_82599.h" - -/* Callbacks for DCB netlink in the kernel */ -#define BIT_DCB_MODE 0x01 -#define BIT_PFC 0x02 -#define BIT_PG_RX 0x04 -#define BIT_PG_TX 0x08 -#define BIT_APP_UPCHG 0x10 -#define BIT_RESETLINK 0x40 -#define BIT_LINKSPEED 0x80 - -/* Responses for the DCB_C_SET_ALL command */ -#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ -#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ -#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ - -int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) -{ - struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; - struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; - struct ixgbe_dcb_tc_config *src = NULL; - struct ixgbe_dcb_tc_config *dst = NULL; - int i, j; - int tx = IXGBE_DCB_TX_CONFIG; - int rx = IXGBE_DCB_RX_CONFIG; - int changes = 0; - -#if IS_ENABLED(CONFIG_FCOE) - if (adapter->fcoe.up_set != adapter->fcoe.up) - changes |= BIT_APP_UPCHG; -#endif /* CONFIG_FCOE */ - - for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { - src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; - dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; - - if (dst->path[tx].tsa != src->path[tx].tsa) { - dst->path[tx].tsa = src->path[tx].tsa; - changes |= BIT_PG_TX; - } - - if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { - dst->path[tx].bwg_id = src->path[tx].bwg_id; - changes |= BIT_PG_TX; - } - - if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { - dst->path[tx].bwg_percent = src->path[tx].bwg_percent; - changes |= BIT_PG_TX; - } - - if (dst->path[tx].up_to_tc_bitmap != - src->path[tx].up_to_tc_bitmap) { - dst->path[tx].up_to_tc_bitmap = - src->path[tx].up_to_tc_bitmap; - changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); - } - - if (dst->path[rx].tsa != src->path[rx].tsa) { - dst->path[rx].tsa = src->path[rx].tsa; - changes |= BIT_PG_RX; - } - - if (dst->path[rx].bwg_id != src->path[rx].bwg_id) { - dst->path[rx].bwg_id = src->path[rx].bwg_id; - changes |= BIT_PG_RX; - } - - if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) { - dst->path[rx].bwg_percent = src->path[rx].bwg_percent; - changes |= BIT_PG_RX; - } - - if (dst->path[rx].up_to_tc_bitmap != - src->path[rx].up_to_tc_bitmap) { - dst->path[rx].up_to_tc_bitmap = - src->path[rx].up_to_tc_bitmap; - changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); - } - } - - for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { - j = i - DCB_PG_ATTR_BW_ID_0; - - if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { - dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; - changes |= BIT_PG_TX; - } - if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) { - dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j]; - changes |= BIT_PG_RX; - } - } - - for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { - j = i - DCB_PFC_UP_ATTR_0; - if (dcfg->tc_config[j].pfc != scfg->tc_config[j].pfc) { - dcfg->tc_config[j].pfc = scfg->tc_config[j].pfc; - changes |= BIT_PFC; - } - } - - if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) { - dcfg->pfc_mode_enable = scfg->pfc_mode_enable; - changes |= BIT_PFC; - } - - return changes; -} - -static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); -} - -static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int err = 0; - - /* Fail command if not in CEE mode */ - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) - return 1; - - /* verify there is something to do, if not then exit */ - if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - goto out; - - err = ixgbe_setup_tc(netdev, - state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); -out: - return !!err; -} - -static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, - u8 *perm_addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i, j; - - memset(perm_addr, 0xff, MAX_ADDR_LEN); - - for (i = 0; i < netdev->addr_len; i++) - perm_addr[i] = adapter->hw.mac.perm_addr[i]; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - for (j = 0; j < netdev->addr_len; j++, i++) - perm_addr[i] = adapter->hw.mac.san_addr[j]; - break; - default: - break; - } -} - -static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, - u8 prio, u8 bwg_id, u8 bw_pct, - u8 up_map) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (prio != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].tsa = prio; - if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; - if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = - bw_pct; - if (up_map != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = - up_map; -} - -static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, - u8 bw_pct) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; -} - -static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, - u8 prio, u8 bwg_id, u8 bw_pct, - u8 up_map) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (prio != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].tsa = prio; - if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; - if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = - bw_pct; - if (up_map != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = - up_map; -} - -static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, - u8 bw_pct) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; -} - -static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, - u8 *prio, u8 *bwg_id, u8 *bw_pct, - u8 *up_map) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *prio = adapter->dcb_cfg.tc_config[tc].path[0].tsa; - *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; - *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; - *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; -} - -static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, - u8 *bw_pct) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; -} - -static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, - u8 *prio, u8 *bwg_id, u8 *bw_pct, - u8 *up_map) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *prio = adapter->dcb_cfg.tc_config[tc].path[1].tsa; - *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; - *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; - *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; -} - -static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, - u8 *bw_pct) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; -} - -static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up); - - adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc; - if (adapter->temp_dcb_cfg.tc_config[tc].pfc != - adapter->dcb_cfg.tc_config[tc].pfc) - adapter->temp_dcb_cfg.pfc_mode_enable = true; -} - -static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up); - *pfc = adapter->dcb_cfg.tc_config[tc].pfc; -} - -static void ixgbe_dcbnl_devreset(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - if (netif_running(dev)) -#ifdef HAVE_NET_DEVICE_OPS - dev->netdev_ops->ndo_stop(dev); -#else - dev->stop(dev); -#endif - - ixgbe_clear_interrupt_scheme(adapter); - ixgbe_init_interrupt_scheme(adapter); - - if (netif_running(dev)) -#ifdef HAVE_NET_DEVICE_OPS - dev->netdev_ops->ndo_open(dev); -#else - dev->open(dev); -#endif - - clear_bit(__IXGBE_RESETTING, &adapter->state); -} - -static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; - struct ixgbe_hw *hw = &adapter->hw; - int ret = DCB_NO_HW_CHG; - u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; - - /* Fail command if not in CEE mode */ - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) - return ret; - - adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, - IXGBE_DCB_MAX_TRAFFIC_CLASS); - if (!adapter->dcb_set_bitmap) - return ret; - - ixgbe_dcb_unpack_map_cee(dcb_cfg, IXGBE_DCB_TX_CONFIG, prio_tc); - - if (adapter->dcb_set_bitmap & (BIT_PG_TX | BIT_PG_RX)) { - /* Priority to TC mapping in CEE case default to 1:1 */ - int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; -#ifdef HAVE_MQPRIO - int i; -#endif - -#if IS_ENABLED(CONFIG_FCOE) - if (adapter->netdev->features & NETIF_F_FCOE_MTU) - max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif - - ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, - IXGBE_DCB_TX_CONFIG); - - ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, - IXGBE_DCB_RX_CONFIG); - - ixgbe_dcb_hw_config_cee(hw, dcb_cfg); - -#ifdef HAVE_MQPRIO - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - netdev_set_prio_tc_map(netdev, i, prio_tc[i]); -#endif /* HAVE_MQPRIO */ - ret = DCB_HW_CHG_RST; - } - - if (adapter->dcb_set_bitmap & BIT_PFC) { - if (dcb_cfg->pfc_mode_enable) { - u8 pfc_en; - ixgbe_dcb_unpack_pfc_cee(dcb_cfg, prio_tc, &pfc_en); - ixgbe_dcb_config_pfc(hw, pfc_en, prio_tc); - } else { - hw->mac.ops.fc_enable(hw); - } - /* This is known driver so disable MDD before updating SRRCTL */ - if ((adapter->num_vfs) && (hw->mac.ops.disable_mdd) && - (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) - hw->mac.ops.disable_mdd(hw); - - ixgbe_set_rx_drop_en(adapter); - - if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && - (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) - hw->mac.ops.enable_mdd(hw); - - if (ret != DCB_HW_CHG_RST) - ret = DCB_HW_CHG; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* Reprogam FCoE hardware offloads when the traffic class - * FCoE is using changes. This happens if the APP info - * changes or the up2tc mapping is updated. - */ - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - adapter->fcoe.up_set = adapter->fcoe.up; - ixgbe_dcbnl_devreset(netdev); - ret = DCB_HW_CHG_RST; - } -#endif /* CONFIG_FCOE */ - - adapter->dcb_set_bitmap = 0x00; - return ret; -} - -static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) -{ -#ifdef HAVE_DCBNL_IEEE - struct ixgbe_adapter *adapter = netdev_priv(netdev); -#endif - - switch (capid) { - case DCB_CAP_ATTR_PG: - *cap = true; - break; - case DCB_CAP_ATTR_PFC: - *cap = true; - break; - case DCB_CAP_ATTR_UP2TC: - *cap = false; - break; - case DCB_CAP_ATTR_PG_TCS: - *cap = 0x80; - break; - case DCB_CAP_ATTR_PFC_TCS: - *cap = 0x80; - break; - case DCB_CAP_ATTR_GSP: - *cap = true; - break; - case DCB_CAP_ATTR_BCN: - *cap = false; - break; -#ifdef HAVE_DCBNL_IEEE - case DCB_CAP_ATTR_DCBX: - *cap = adapter->dcbx_cap; - break; -#endif - default: - *cap = false; - break; - } - - return 0; -} - -#ifdef NUMTCS_RETURNS_U8 -static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) -#else -static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) -#endif -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 rval = 0; - - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - switch (tcid) { - case DCB_NUMTCS_ATTR_PG: - *num = adapter->dcb_cfg.num_tcs.pg_tcs; - break; - case DCB_NUMTCS_ATTR_PFC: - *num = adapter->dcb_cfg.num_tcs.pfc_tcs; - break; - default: - rval = -EINVAL; - break; - } - } else { - rval = -EINVAL; - } - - return rval; -} - -#ifdef NUMTCS_RETURNS_U8 -static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) -#else -static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) -#endif -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 rval = 0; - - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - switch (tcid) { - case DCB_NUMTCS_ATTR_PG: - adapter->dcb_cfg.num_tcs.pg_tcs = num; - break; - case DCB_NUMTCS_ATTR_PFC: - adapter->dcb_cfg.num_tcs.pfc_tcs = num; - break; - default: - rval = -EINVAL; - break; - } - } else { - rval = -EINVAL; - } - - return rval; -} - -static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return adapter->dcb_cfg.pfc_mode_enable; -} - -static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->temp_dcb_cfg.pfc_mode_enable = state; - return; -} - -#ifdef HAVE_DCBNL_OPS_GETAPP -/** - * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority - * @netdev : the corresponding netdev - * @idtype : identifies the id as ether type or TCP/UDP port number - * @id: id is either ether type or TCP/UDP port number - * - * Returns : on success, returns a non-zero 802.1p user priority bitmap - * otherwise returns 0 as the invalid user priority bitmap to indicate an - * error. - */ -#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT -static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) -#else -static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) -#endif -{ - u8 rval = 0; -#ifdef HAVE_DCBNL_IEEE - struct dcb_app app = { - .selector = idtype, - .protocol = id, - }; - - rval = dcb_getapp(netdev, &app); -#endif - - switch (idtype) { - case DCB_APP_IDTYPE_ETHTYPE: -#if IS_ENABLED(CONFIG_FCOE) - if (id == ETH_P_FCOE) - rval = ixgbe_fcoe_getapp(netdev); -#endif - break; - case DCB_APP_IDTYPE_PORTNUM: - break; - default: - break; - } - - return rval; -} - -/** - * ixgbe_dcbnl_setapp - set the DCBX application user priority - * @netdev : the corresponding netdev - * @idtype : identifies the id as ether type or TCP/UDP port number - * @id: id is either ether type or TCP/UDP port number - * @up: the 802.1p user priority bitmap - * - * Returns : 0 on success or 1 on error - */ -#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT -static int ixgbe_dcbnl_setapp(struct net_device *netdev, -#else -static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, -#endif - u8 idtype, u16 id, u8 up) -{ - int err = 0; -#ifdef HAVE_DCBNL_IEEE - struct dcb_app app; - - app.selector = idtype; - app.protocol = id; - app.priority = up; - err = dcb_setapp(netdev, &app); -#endif - - switch (idtype) { - case DCB_APP_IDTYPE_ETHTYPE: -#if IS_ENABLED(CONFIG_FCOE) - if (id == ETH_P_FCOE) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->fcoe.up = up ? ffs(up) - 1 : IXGBE_FCOE_DEFUP; - } -#endif - break; - case DCB_APP_IDTYPE_PORTNUM: - break; - default: - break; - } - - return err; -} -#endif /* HAVE_DCBNL_OPS_GETAPP */ - -#ifdef HAVE_DCBNL_IEEE -static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, - struct ieee_ets *ets) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; - - /* No IEEE PFC settings available */ - if (!my_ets) - return -EINVAL; - - ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; - ets->cbs = my_ets->cbs; - memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); - memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); - memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); - memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); - return 0; -} - -static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, - struct ieee_ets *ets) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i, err = 0; - __u8 max_tc = 0; - __u8 map_chg = 0; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return -EINVAL; - - if (!adapter->ixgbe_ieee_ets) { - adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), - GFP_KERNEL); - if (!adapter->ixgbe_ieee_ets) - return -ENOMEM; - /* initialize UP2TC mappings to invalid value */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - adapter->ixgbe_ieee_ets->prio_tc[i] = - IEEE_8021QAZ_MAX_TCS; - /* if possible update UP2TC mappings from HW */ - if (adapter->hw.mac.ops.get_rtrup2tc) - adapter->hw.mac.ops.get_rtrup2tc(&adapter->hw, - adapter->ixgbe_ieee_ets->prio_tc); - } - - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - if (ets->prio_tc[i] > max_tc) - max_tc = ets->prio_tc[i]; - if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i]) - map_chg = 1; - } - - memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); - - if (max_tc) - max_tc++; - - if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) - return -EINVAL; - - if (max_tc != netdev_get_num_tc(dev)) - err = ixgbe_setup_tc(dev, max_tc); - else if (map_chg) - ixgbe_dcbnl_devreset(dev); - - if (err) - goto err_out; - - err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); -err_out: - return err; -} - -static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, - struct ieee_pfc *pfc) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; - int i; - - /* No IEEE PFC settings available */ - if (!my_pfc) - return -EINVAL; - - pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; - pfc->pfc_en = my_pfc->pfc_en; - pfc->mbc = my_pfc->mbc; - pfc->delay = my_pfc->delay; - - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - pfc->requests[i] = adapter->stats.pxoffrxc[i]; - pfc->indications[i] = adapter->stats.pxofftxc[i]; - } - - return 0; -} - -static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, - struct ieee_pfc *pfc) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - u8 *prio_tc; - int err; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return -EINVAL; - - if (!adapter->ixgbe_ieee_pfc) { - adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), - GFP_KERNEL); - if (!adapter->ixgbe_ieee_pfc) - return -ENOMEM; - } - - prio_tc = adapter->ixgbe_ieee_ets->prio_tc; - memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); - - - /* Enable link flow control parameters if PFC is disabled */ - if (pfc->pfc_en) - err = ixgbe_dcb_config_pfc(hw, pfc->pfc_en, prio_tc); - else - err = hw->mac.ops.fc_enable(hw); - - /* This is known driver so disable MDD before updating SRRCTL */ - if ((adapter->num_vfs) && (hw->mac.ops.disable_mdd) && - (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) - hw->mac.ops.disable_mdd(hw); - - ixgbe_set_rx_drop_en(adapter); - - if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && - (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) - hw->mac.ops.enable_mdd(hw); - - return err; -} - -static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, - struct dcb_app *app) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err = -EINVAL; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return err; - - err = dcb_ieee_setapp(dev, app); - -#if IS_ENABLED(CONFIG_FCOE) - if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == ETH_P_FCOE) { - u8 app_mask = dcb_ieee_getapp_mask(dev, app); - - if (app_mask & (1 << adapter->fcoe.up)) - return err; - - adapter->fcoe.up = app->priority; - adapter->fcoe.up_set = adapter->fcoe.up; - ixgbe_dcbnl_devreset(dev); - } -#endif - return 0; -} - -#ifdef HAVE_DCBNL_IEEE_DELAPP -static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, - struct dcb_app *app) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return -EINVAL; - - err = dcb_ieee_delapp(dev, app); - -#if IS_ENABLED(CONFIG_FCOE) - if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == ETH_P_FCOE) { - u8 app_mask = dcb_ieee_getapp_mask(dev, app); - - if (app_mask & (1 << adapter->fcoe.up)) - return err; - - adapter->fcoe.up = app_mask ? - ffs(app_mask) - 1 : IXGBE_FCOE_DEFUP; - ixgbe_dcbnl_devreset(dev); - } -#endif - return err; -} -#endif /* HAVE_DCBNL_IEEE_DELAPP */ - -static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - return adapter->dcbx_cap; -} - -static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ieee_ets ets = { .ets_cap = 0 }; - struct ieee_pfc pfc = { .pfc_en = 0 }; - - /* no support for LLD_MANAGED modes or CEE+IEEE */ - if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || - ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || - !(mode & DCB_CAP_DCBX_HOST)) - return 1; - - if (mode == adapter->dcbx_cap) - return 0; - - adapter->dcbx_cap = mode; - - /* ETS and PFC defaults */ - ets.ets_cap = 8; - pfc.pfc_cap = 8; - - if (mode & DCB_CAP_DCBX_VER_IEEE) { - ixgbe_dcbnl_ieee_setets(dev, &ets); - ixgbe_dcbnl_ieee_setpfc(dev, &pfc); - } else if (mode & DCB_CAP_DCBX_VER_CEE) { - u8 mask = (BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG); - - adapter->dcb_set_bitmap |= mask; - ixgbe_dcbnl_set_all(dev); - } else { - /* Drop into single TC mode strict priority as this - * indicates CEE and IEEE versions are disabled - */ - ixgbe_dcbnl_ieee_setets(dev, &ets); - ixgbe_dcbnl_ieee_setpfc(dev, &pfc); - ixgbe_setup_tc(dev, 0); - } - - return 0; -} - -#endif - -struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { -#ifdef HAVE_DCBNL_IEEE - .ieee_getets = ixgbe_dcbnl_ieee_getets, - .ieee_setets = ixgbe_dcbnl_ieee_setets, - .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, - .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, - .ieee_setapp = ixgbe_dcbnl_ieee_setapp, -#ifdef HAVE_DCBNL_IEEE_DELAPP - .ieee_delapp = ixgbe_dcbnl_ieee_delapp, -#endif -#endif - .getstate = ixgbe_dcbnl_get_state, - .setstate = ixgbe_dcbnl_set_state, - .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, - .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, - .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, - .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, - .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, - .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, - .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, - .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, - .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, - .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, - .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, - .setall = ixgbe_dcbnl_set_all, - .getcap = ixgbe_dcbnl_getcap, - .getnumtcs = ixgbe_dcbnl_getnumtcs, - .setnumtcs = ixgbe_dcbnl_setnumtcs, - .getpfcstate = ixgbe_dcbnl_getpfcstate, - .setpfcstate = ixgbe_dcbnl_setpfcstate, -#ifdef HAVE_DCBNL_OPS_GETAPP - .getapp = ixgbe_dcbnl_getapp, - .setapp = ixgbe_dcbnl_setapp, -#endif -#ifdef HAVE_DCBNL_IEEE - .getdcbx = ixgbe_dcbnl_getdcbx, - .setdcbx = ixgbe_dcbnl_setdcbx, -#endif -}; - -#endif /* CONFIG_DCB */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c deleted file mode 100644 index 66f52e211040..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_debugfs.c +++ /dev/null @@ -1,281 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" - -#ifdef HAVE_IXGBE_DEBUG_FS -#include -#include - -static struct dentry *ixgbe_dbg_root; - -static char ixgbe_dbg_reg_ops_buf[256] = ""; - -/** - * ixgbe_dbg_reg_ops_read - read for reg_ops datum - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - struct ixgbe_adapter *adapter = filp->private_data; - char *buf; - int len; - - /* don't allow partial reads */ - if (*ppos != 0) - return 0; - - buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_reg_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); - return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - - kfree(buf); - return len; -} - -/** - * ixgbe_dbg_reg_ops_write - write into reg_ops datum - * @filp: the opened file - * @buffer: where to find the user's data - * @count: the length of the user's data - * @ppos: file position offset - **/ -static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct ixgbe_adapter *adapter = filp->private_data; - int len; - - /* don't allow partial writes */ - if (*ppos != 0) - return 0; - if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) - return -ENOSPC; - - len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf, - sizeof(ixgbe_dbg_reg_ops_buf)-1, - ppos, - buffer, - count); - if (len < 0) - return len; - - ixgbe_dbg_reg_ops_buf[len] = '\0'; - - if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { - u32 reg, value; - int cnt; - cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); - if (cnt == 2) { - IXGBE_WRITE_REG(&adapter->hw, reg, value); - value = IXGBE_READ_REG(&adapter->hw, reg); - e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); - } else { - e_dev_info("write \n"); - } - } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) { - u32 reg, value; - int cnt; - cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", ®); - if (cnt == 1) { - value = IXGBE_READ_REG(&adapter->hw, reg); - e_dev_info("read 0x%08x = 0x%08x\n", reg, value); - } else { - e_dev_info("read \n"); - } - } else { - e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf); - e_dev_info("Available commands:\n"); - e_dev_info(" read \n"); - e_dev_info(" write \n"); - } - return count; -} - -static const struct file_operations ixgbe_dbg_reg_ops_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = ixgbe_dbg_reg_ops_read, - .write = ixgbe_dbg_reg_ops_write, -}; - -static char ixgbe_dbg_netdev_ops_buf[256] = ""; - -/** - * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, - char __user *buffer, - size_t count, loff_t *ppos) -{ - struct ixgbe_adapter *adapter = filp->private_data; - char *buf; - int len; - - /* don't allow partial reads */ - if (*ppos != 0) - return 0; - - buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_netdev_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); - return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - - kfree(buf); - return len; -} - -/** - * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum - * @filp: the opened file - * @buffer: where to find the user's data - * @count: the length of the user's data - * @ppos: file position offset - **/ -static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct ixgbe_adapter *adapter = filp->private_data; - int len; - - /* don't allow partial writes */ - if (*ppos != 0) - return 0; - if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) - return -ENOSPC; - - len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf, - sizeof(ixgbe_dbg_netdev_ops_buf)-1, - ppos, - buffer, - count); - if (len < 0) - return len; - - ixgbe_dbg_netdev_ops_buf[len] = '\0'; - - if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { -#ifdef HAVE_NET_DEVICE_OPS - adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); -#else - adapter->netdev->tx_timeout(adapter->netdev); -#endif /* HAVE_NET_DEVICE_OPS */ - e_dev_info("tx_timeout called\n"); - } else { - e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); - e_dev_info("Available commands:\n"); - e_dev_info(" tx_timeout\n"); - } - return count; -} - -static struct file_operations ixgbe_dbg_netdev_ops_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = ixgbe_dbg_netdev_ops_read, - .write = ixgbe_dbg_netdev_ops_write, -}; - -/** - * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter - * @adapter: the adapter that is starting up - **/ -void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) -{ - const char *name = pci_name(adapter->pdev); - struct dentry *pfile; - adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); - if (adapter->ixgbe_dbg_adapter) { - pfile = debugfs_create_file("reg_ops", 0600, - adapter->ixgbe_dbg_adapter, adapter, - &ixgbe_dbg_reg_ops_fops); - if (!pfile) - e_dev_err("debugfs reg_ops for %s failed\n", name); - pfile = debugfs_create_file("netdev_ops", 0600, - adapter->ixgbe_dbg_adapter, adapter, - &ixgbe_dbg_netdev_ops_fops); - if (!pfile) - e_dev_err("debugfs netdev_ops for %s failed\n", name); - } else { - e_dev_err("debugfs entry for %s failed\n", name); - } -} - -/** - * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries - * @pf: the pf that is stopping - **/ -void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) -{ - if (adapter->ixgbe_dbg_adapter) - debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); - adapter->ixgbe_dbg_adapter = NULL; -} - -/** - * ixgbe_dbg_init - start up debugfs for the driver - **/ -void ixgbe_dbg_init(void) -{ - ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); - if (ixgbe_dbg_root == NULL) - pr_err("init of debugfs failed\n"); -} - -/** - * ixgbe_dbg_exit - clean out the driver's debugfs entries - **/ -void ixgbe_dbg_exit(void) -{ - debugfs_remove_recursive(ixgbe_dbg_root); -} - -#endif /* HAVE_IXGBE_DEBUG_FS */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c deleted file mode 100644 index a446f227820c..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ethtool.c +++ /dev/null @@ -1,4429 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ethtool support for ixgbe */ - -#include -#include -#include -#include -#include -#include -#include - -#ifdef SIOCETHTOOL -#include - -#include "ixgbe.h" -#ifdef ETHTOOL_GMODULEINFO -#include "ixgbe_phy.h" -#endif -#ifdef HAVE_ETHTOOL_GET_TS_INFO -#include -#endif - -#ifndef ETH_GSTRING_LEN -#define ETH_GSTRING_LEN 32 -#endif - -#define IXGBE_ALL_RAR_ENTRIES 16 - -#ifdef ETHTOOL_OPS_COMPAT -#include "kcompat_ethtool.c" -#endif -#ifdef ETHTOOL_GSTATS -struct ixgbe_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; - -#define IXGBE_NETDEV_STAT(_net_stat) { \ - .stat_string = #_net_stat, \ - .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ - .stat_offset = offsetof(struct net_device_stats, _net_stat) \ -} -static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = { - IXGBE_NETDEV_STAT(rx_packets), - IXGBE_NETDEV_STAT(tx_packets), - IXGBE_NETDEV_STAT(rx_bytes), - IXGBE_NETDEV_STAT(tx_bytes), - IXGBE_NETDEV_STAT(rx_errors), - IXGBE_NETDEV_STAT(tx_errors), - IXGBE_NETDEV_STAT(rx_dropped), - IXGBE_NETDEV_STAT(tx_dropped), - IXGBE_NETDEV_STAT(multicast), - IXGBE_NETDEV_STAT(collisions), - IXGBE_NETDEV_STAT(rx_over_errors), - IXGBE_NETDEV_STAT(rx_crc_errors), - IXGBE_NETDEV_STAT(rx_frame_errors), - IXGBE_NETDEV_STAT(rx_fifo_errors), - IXGBE_NETDEV_STAT(rx_missed_errors), - IXGBE_NETDEV_STAT(tx_aborted_errors), - IXGBE_NETDEV_STAT(tx_carrier_errors), - IXGBE_NETDEV_STAT(tx_fifo_errors), - IXGBE_NETDEV_STAT(tx_heartbeat_errors), -}; - -#define IXGBE_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbe_adapter, _stat), \ - .stat_offset = offsetof(struct ixgbe_adapter, _stat) \ -} -static struct ixgbe_stats ixgbe_gstrings_stats[] = { - IXGBE_STAT("rx_pkts_nic", stats.gprc), - IXGBE_STAT("tx_pkts_nic", stats.gptc), - IXGBE_STAT("rx_bytes_nic", stats.gorc), - IXGBE_STAT("tx_bytes_nic", stats.gotc), - IXGBE_STAT("lsc_int", lsc_int), - IXGBE_STAT("tx_busy", tx_busy), - IXGBE_STAT("non_eop_descs", non_eop_descs), - IXGBE_STAT("broadcast", stats.bprc), - IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) , - IXGBE_STAT("tx_timeout_count", tx_timeout_count), - IXGBE_STAT("tx_restart_queue", restart_queue), - IXGBE_STAT("rx_long_length_errors", stats.roc), - IXGBE_STAT("rx_short_length_errors", stats.ruc), - IXGBE_STAT("tx_flow_control_xon", stats.lxontxc), - IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc), - IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), - IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), - IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), - IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), - IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), - IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), - IXGBE_STAT("hw_rsc_aggregated", rsc_total_count), - IXGBE_STAT("hw_rsc_flushed", rsc_total_flush), -#ifdef HAVE_TX_MQ - IXGBE_STAT("fdir_match", stats.fdirmatch), - IXGBE_STAT("fdir_miss", stats.fdirmiss), - IXGBE_STAT("fdir_overflow", fdir_overflow), -#endif /* HAVE_TX_MQ */ -#if IS_ENABLED(CONFIG_FCOE) - IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc), - IXGBE_STAT("fcoe_last_errors", stats.fclast), - IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc), - IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc), - IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc), - IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp), - IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff), - IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc), - IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc), -#endif /* CONFIG_FCOE */ - IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), - IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), - IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), - IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), -#ifdef HAVE_PTP_1588_CLOCK - IXGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), - IXGBE_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), - IXGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), -#endif /* HAVE_PTP_1588_CLOCK */ -}; - -/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so - * we set the num_rx_queues to evaluate to num_tx_queues. This is - * used because we do not have a good way to get the max number of - * rx queues with CONFIG_RPS disabled. - */ -#ifdef HAVE_TX_MQ -#ifdef HAVE_NETDEV_SELECT_QUEUE -#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues -#define IXGBE_NUM_TX_QUEUES netdev->num_tx_queues -#else -#define IXGBE_NUM_RX_QUEUES adapter->indices -#define IXGBE_NUM_TX_QUEUES adapter->indices -#endif /* HAVE_NETDEV_SELECT_QUEUE */ -#else /* HAVE_TX_MQ */ -#define IXGBE_NUM_TX_QUEUES 1 -#define IXGBE_NUM_RX_QUEUES ( \ - ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) -#endif /* HAVE_TX_MQ */ - -#define IXGBE_QUEUE_STATS_LEN ( \ - (IXGBE_NUM_TX_QUEUES + IXGBE_NUM_RX_QUEUES) * \ - (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) -#define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats) -#define IXGBE_PB_STATS_LEN ( \ - (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ - / sizeof(u64)) -#define IXGBE_VF_STATS_LEN \ - ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \ - (sizeof(struct vf_stats) / sizeof(u64))) -#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ - IXGBE_NETDEV_STATS_LEN + \ - IXGBE_PB_STATS_LEN + \ - IXGBE_QUEUE_STATS_LEN + \ - IXGBE_VF_STATS_LEN) - -#endif /* ETHTOOL_GSTATS */ -#ifdef ETHTOOL_TEST -static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" -}; -#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) -#endif /* ETHTOOL_TEST */ - -#ifdef HAVE_ETHTOOL_GET_SSET_COUNT -static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { -#define IXGBE_PRIV_FLAGS_FD_ATR BIT(0) - "flow-director-atr", -#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC -#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(1) - "legacy-rx", -#endif -}; - -#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) - -#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ -/* currently supported speeds for 10G */ -#define ADVERTISED_MASK_10G (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full) - -#define ixgbe_isbackplane(type) ((type == ixgbe_media_type_backplane)? true : false) - -static __u32 ixgbe_backplane_type(struct ixgbe_hw *hw) -{ - __u32 mode = 0x00; - switch(hw->device_id) - { - case IXGBE_DEV_ID_82598: - case IXGBE_DEV_ID_82599_KX4: - case IXGBE_DEV_ID_82599_KX4_MEZZ: - case IXGBE_DEV_ID_X550EM_X_KX4: - mode = SUPPORTED_10000baseKX4_Full; - break; - case IXGBE_DEV_ID_82598_BX: - case IXGBE_DEV_ID_82599_KR: - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_XFI: - mode = SUPPORTED_10000baseKR_Full; - break; - default: - mode = (SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full); - break; - } - return mode; -} - -#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE -static int ixgbe_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - ixgbe_link_speed supported_link; - bool autoneg = false; - u32 supported, advertising; - - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - - hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); - - /* set the supported link speeds */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? - ixgbe_backplane_type(hw) : - SUPPORTED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - supported |= SUPPORTED_100baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_10_FULL) - supported |= SUPPORTED_10baseT_Full; - - /* default advertised speed if phy.autoneg_advertised isn't set */ - advertising = supported; - - /* set the advertised speeds */ - if (hw->phy.autoneg_advertised) { - advertising = 0; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) - advertising |= ADVERTISED_10baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - advertising |= supported & ADVERTISED_MASK_10G; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { - if (supported & SUPPORTED_1000baseKX_Full) - advertising |= ADVERTISED_1000baseKX_Full; - else - advertising |= ADVERTISED_1000baseT_Full; - } - } else { - if (hw->phy.multispeed_fiber && !autoneg) { - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - advertising = ADVERTISED_10000baseT_Full; - } - } - - if (autoneg) { - supported |= SUPPORTED_Autoneg; - advertising |= ADVERTISED_Autoneg; - cmd->base.autoneg = AUTONEG_ENABLE; - } else { - cmd->base.autoneg = AUTONEG_DISABLE; - } - - /* Determine the remaining settings based on the PHY type. */ - switch (adapter->hw.phy.type) { - case ixgbe_phy_tn: - case ixgbe_phy_aq: - case ixgbe_phy_x550em_ext_t: - case ixgbe_phy_fw: - case ixgbe_phy_cu_unknown: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; - cmd->base.port = PORT_TP; - break; - case ixgbe_phy_qt: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; - cmd->base.port = PORT_FIBRE; - break; - case ixgbe_phy_nl: - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - case ixgbe_phy_qsfp_passive_unknown: - case ixgbe_phy_qsfp_active_unknown: - case ixgbe_phy_qsfp_intel: - case ixgbe_phy_qsfp_unknown: - switch (adapter->hw.phy.sfp_type) { - /* SFP+ devices, further checking needed */ - case ixgbe_sfp_type_da_cu: - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; - cmd->base.port = PORT_DA; - break; - case ixgbe_sfp_type_sr: - case ixgbe_sfp_type_lr: - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - case ixgbe_sfp_type_1g_sx_core0: - case ixgbe_sfp_type_1g_sx_core1: - case ixgbe_sfp_type_1g_lx_core0: - case ixgbe_sfp_type_1g_lx_core1: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; - cmd->base.port = PORT_FIBRE; - break; - case ixgbe_sfp_type_not_present: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; - cmd->base.port = PORT_NONE; - break; - case ixgbe_sfp_type_1g_cu_core0: - case ixgbe_sfp_type_1g_cu_core1: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; - cmd->base.port = PORT_TP; - break; - case ixgbe_sfp_type_unknown: - default: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; - cmd->base.port = PORT_OTHER; - break; - } - break; - case ixgbe_phy_xaui: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; - cmd->base.port = PORT_NONE; - break; - case ixgbe_phy_unknown: - case ixgbe_phy_generic: - case ixgbe_phy_sfp_unsupported: - default: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; - cmd->base.port = PORT_OTHER; - break; - } - - /* Indicate pause support */ - supported |= SUPPORTED_Pause; - - switch (hw->fc.requested_mode) { - case ixgbe_fc_full: - advertising |= ADVERTISED_Pause; - break; - case ixgbe_fc_rx_pause: - advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - break; - case ixgbe_fc_tx_pause: - advertising |= ADVERTISED_Asym_Pause; - break; - default: - advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - } - - if (netif_carrier_ok(netdev)) { - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - cmd->base.speed = SPEED_10000; - break; - case IXGBE_LINK_SPEED_5GB_FULL: - cmd->base.speed = SPEED_5000; - break; -#ifdef SUPPORTED_2500baseX_Full - case IXGBE_LINK_SPEED_2_5GB_FULL: - cmd->base.speed = SPEED_2500; - break; -#endif /* SUPPORTED_2500baseX_Full */ - case IXGBE_LINK_SPEED_1GB_FULL: - cmd->base.speed = SPEED_1000; - break; - case IXGBE_LINK_SPEED_100_FULL: - cmd->base.speed = SPEED_100; - break; - case IXGBE_LINK_SPEED_10_FULL: - cmd->base.speed = SPEED_10; - break; - default: - break; - } - cmd->base.duplex = DUPLEX_FULL; - } else { - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - } - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - supported); - - return 0; -} -#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ -static int ixgbe_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - ixgbe_link_speed supported_link; - bool autoneg = false; - - hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); - - /* set the supported link speeds */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? - ixgbe_backplane_type(hw) : - SUPPORTED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= SUPPORTED_100baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_10_FULL) - ecmd->supported |= SUPPORTED_10baseT_Full; - - /* default advertised speed if phy.autoneg_advertised isn't set */ - ecmd->advertising = ecmd->supported; - - /* set the advertised speeds */ - if (hw->phy.autoneg_advertised) { - ecmd->advertising = 0; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) - ecmd->advertising |= ADVERTISED_10baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= (ecmd->supported & ADVERTISED_MASK_10G); - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { - if (ecmd->supported & SUPPORTED_1000baseKX_Full) - ecmd->advertising |= ADVERTISED_1000baseKX_Full; - else - ecmd->advertising |= ADVERTISED_1000baseT_Full; - } - } else { - if (hw->phy.multispeed_fiber && !autoneg) { - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising = ADVERTISED_10000baseT_Full; - } - } - - if (autoneg) { - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; - ecmd->autoneg = AUTONEG_ENABLE; - } else { - ecmd->autoneg = AUTONEG_DISABLE; - } - - ecmd->transceiver = XCVR_EXTERNAL; - - /* Determine the remaining settings based on the PHY type. */ - switch (adapter->hw.phy.type) { - case ixgbe_phy_tn: - case ixgbe_phy_aq: - case ixgbe_phy_x550em_ext_t: - case ixgbe_phy_fw: - case ixgbe_phy_cu_unknown: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - break; - case ixgbe_phy_qt: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - break; - case ixgbe_phy_nl: - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - case ixgbe_phy_qsfp_passive_unknown: - case ixgbe_phy_qsfp_active_unknown: - case ixgbe_phy_qsfp_intel: - case ixgbe_phy_qsfp_unknown: - switch (adapter->hw.phy.sfp_type) { - /* SFP+ devices, further checking needed */ - case ixgbe_sfp_type_da_cu: - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_DA; - break; - case ixgbe_sfp_type_sr: - case ixgbe_sfp_type_lr: - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - case ixgbe_sfp_type_1g_sx_core0: - case ixgbe_sfp_type_1g_sx_core1: - case ixgbe_sfp_type_1g_lx_core0: - case ixgbe_sfp_type_1g_lx_core1: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - break; - case ixgbe_sfp_type_not_present: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_NONE; - break; - case ixgbe_sfp_type_1g_cu_core0: - case ixgbe_sfp_type_1g_cu_core1: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - break; - case ixgbe_sfp_type_unknown: - default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_OTHER; - break; - } - break; - case ixgbe_phy_xaui: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_NONE; - break; - case ixgbe_phy_unknown: - case ixgbe_phy_generic: - case ixgbe_phy_sfp_unsupported: - default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_OTHER; - break; - } - - /* Indicate pause support */ - ecmd->supported |= SUPPORTED_Pause; - - switch (hw->fc.requested_mode) { - case ixgbe_fc_full: - ecmd->advertising |= ADVERTISED_Pause; - break; - case ixgbe_fc_rx_pause: - ecmd->advertising |= ADVERTISED_Pause | - ADVERTISED_Asym_Pause; - break; - case ixgbe_fc_tx_pause: - ecmd->advertising |= ADVERTISED_Asym_Pause; - break; - default: - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - } - - if (netif_carrier_ok(netdev)) { - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_10000); - break; - case IXGBE_LINK_SPEED_5GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_5000); - break; -#ifdef SUPPORTED_2500baseX_Full - case IXGBE_LINK_SPEED_2_5GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_2500); - break; -#endif /* SUPPORTED_2500baseX_Full */ - case IXGBE_LINK_SPEED_1GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_1000); - break; - case IXGBE_LINK_SPEED_100_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_100); - break; - case IXGBE_LINK_SPEED_10_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_10); - break; - default: - break; - } - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - } - - return 0; -} -#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ - -#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE -static int ixgbe_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 advertised, old; - s32 err = 0; - u32 supported, advertising; - - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - - if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->phy.multispeed_fiber)) { - /* - * this function does not support duplex forcing, but can - * limit the advertising of the adapter to the specified speed - */ - if (advertising & ~supported) - return -EINVAL; - - /* only allow one speed at a time if no autoneg */ - if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { - if (advertising == - (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full)) - return -EINVAL; - } - - old = hw->phy.autoneg_advertised; - advertised = 0; - if (advertising & ADVERTISED_10000baseT_Full) - advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (advertising & ADVERTISED_1000baseT_Full) - advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (advertising & ADVERTISED_100baseT_Full) - advertised |= IXGBE_LINK_SPEED_100_FULL; - - if (advertising & ADVERTISED_10baseT_Full) - advertised |= IXGBE_LINK_SPEED_10_FULL; - - if (old == advertised) - return err; - /* this sets the link speed and restarts auto-neg */ - while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - usleep_range(1000, 2000); - - hw->mac.autotry_restart = true; - err = hw->mac.ops.setup_link(hw, advertised, true); - if (err) { - e_info(probe, "setup link failed with code %d\n", err); - hw->mac.ops.setup_link(hw, old, true); - } - clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - } else { - /* in this case we currently only support 10Gb/FULL */ - u32 speed = cmd->base.speed; - - if ((cmd->base.autoneg == AUTONEG_ENABLE) || - (advertising != ADVERTISED_10000baseT_Full) || - (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) - return -EINVAL; - } - - return err; -} -#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ -static int ixgbe_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 advertised, old; - s32 err = 0; - - if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->phy.multispeed_fiber)) { - /* - * this function does not support duplex forcing, but can - * limit the advertising of the adapter to the specified speed - */ - if (ecmd->advertising & ~ecmd->supported) - return -EINVAL; - - /* only allow one speed at a time if no autoneg */ - if (!ecmd->autoneg && hw->phy.multispeed_fiber) { - if (ecmd->advertising == - (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full)) - return -EINVAL; - } - - old = hw->phy.autoneg_advertised; - advertised = 0; - if (ecmd->advertising & ADVERTISED_10000baseT_Full) - advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (ecmd->advertising & ADVERTISED_1000baseT_Full) - advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (ecmd->advertising & ADVERTISED_100baseT_Full) - advertised |= IXGBE_LINK_SPEED_100_FULL; - - if (ecmd->advertising & ADVERTISED_10baseT_Full) - advertised |= IXGBE_LINK_SPEED_10_FULL; - - if (old == advertised) - return err; - /* this sets the link speed and restarts auto-neg */ - while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - usleep_range(1000, 2000); - - hw->mac.autotry_restart = true; - err = hw->mac.ops.setup_link(hw, advertised, true); - if (err) { - e_info(probe, "setup link failed with code %d\n", err); - hw->mac.ops.setup_link(hw, old, true); - } - clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - } - else { - /* in this case we currently only support 10Gb/FULL */ - u32 speed = ethtool_cmd_speed(ecmd); - - if ((ecmd->autoneg == AUTONEG_ENABLE) || - (ecmd->advertising != ADVERTISED_10000baseT_Full) || - (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) - return -EINVAL; - } - - return err; -} -#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ - -static void ixgbe_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (ixgbe_device_supports_autoneg_fc(hw) && - !hw->fc.disable_fc_autoneg) - pause->autoneg = 1; - else - pause->autoneg = 0; - - if (hw->fc.current_mode == ixgbe_fc_rx_pause) { - pause->rx_pause = 1; - } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { - pause->tx_pause = 1; - } else if (hw->fc.current_mode == ixgbe_fc_full) { - pause->rx_pause = 1; - pause->tx_pause = 1; - } -} - -static int ixgbe_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_fc_info fc = hw->fc; - - /* 82598 does no support link flow control with DCB enabled */ - if ((hw->mac.type == ixgbe_mac_82598EB) && - (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return -EINVAL; - - - /* some devices do not support autoneg of flow control */ - if ((pause->autoneg == AUTONEG_ENABLE) && - !ixgbe_device_supports_autoneg_fc(hw)) - return -EINVAL; - - fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); - - if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) - fc.requested_mode = ixgbe_fc_full; - else if (pause->rx_pause) - fc.requested_mode = ixgbe_fc_rx_pause; - else if (pause->tx_pause) - fc.requested_mode = ixgbe_fc_tx_pause; - else - fc.requested_mode = ixgbe_fc_none; - - /* if the thing changed then we'll update and use new autoneg */ - if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { - hw->fc = fc; - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); - } - - return 0; -} - -static u32 ixgbe_get_msglevel(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} - -static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u16 regVal; - s32 rc; - - adapter->msg_enable = data; - - regVal = 0x03; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) - hw_err(hw, "page register write failed, rc:%x\n", rc); - - /* For M88E1512, read from (page 3, register 16)[LED Function Control Register] */ - regVal = 0x00; - rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); - /*hw_err(hw, "[Pega Debug] : current register value = 0x%x\n", regVal);*/ - if (rc) - hw_err(hw, "led function control register read failed, rc:%x\n", rc); - - if (data == 0) /* Turn off OOB LED. */ - { - /* For M88E1512, write to (page 3, register 16) with force led off */ - regVal = (regVal & 0xFF00) | 0x0088; - rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); - if (rc) - hw_err(hw, "led function control register write failed, rc:%x\n", rc); - } - else if (data == 1) /* Turn on OOB LED. */ - { - /* For M88E1512, write to (page 3, register 16) with force led on */ - regVal = (regVal & 0xFF00) | 0x0099; - rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); - if (rc) - hw_err(hw, "led function control register write failed, rc:%x\n", rc); - } - else /* Switch OOB LED back to normal. */ - { - /* For M88E1512, set led back to nornmal in (page 3, register 16). */ - regVal = (regVal & 0xFF00) | 0x0017; - rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); - if (rc) - hw_err(hw, "led function control register write failed, rc:%x\n", rc); - } - - /* For M88E1512, write 0 in (page 0, register 22) to back to page 0 */ - regVal = 0x00; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) - hw_err(hw, "page register write failed, rc:%x\n", rc); - -} - -static int ixgbe_get_regs_len(struct net_device __always_unused *netdev) -{ -#define IXGBE_REGS_LEN 1129 - return IXGBE_REGS_LEN * sizeof(u32); -} - -#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) - -static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, - void *p) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 *regs_buff = p; - u8 i; - - memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); - - regs->version = hw->mac.type << 24 | hw->revision_id << 16 | - hw->device_id; - - /* General Registers */ - regs_buff[0] = IXGBE_R32_Q(hw, IXGBE_CTRL); - regs_buff[1] = IXGBE_R32_Q(hw, IXGBE_STATUS); - regs_buff[2] = IXGBE_R32_Q(hw, IXGBE_CTRL_EXT); - regs_buff[3] = IXGBE_R32_Q(hw, IXGBE_ESDP); - regs_buff[4] = IXGBE_R32_Q(hw, IXGBE_EODSDP); - regs_buff[5] = IXGBE_R32_Q(hw, IXGBE_LEDCTL); - regs_buff[6] = IXGBE_R32_Q(hw, IXGBE_FRTIMER); - regs_buff[7] = IXGBE_R32_Q(hw, IXGBE_TCPTIMER); - - /* NVM Register */ - regs_buff[8] = IXGBE_R32_Q(hw, IXGBE_EEC); - regs_buff[9] = IXGBE_R32_Q(hw, IXGBE_EERD); - regs_buff[10] = IXGBE_R32_Q(hw, IXGBE_FLA); - regs_buff[11] = IXGBE_R32_Q(hw, IXGBE_EEMNGCTL); - regs_buff[12] = IXGBE_R32_Q(hw, IXGBE_EEMNGDATA); - regs_buff[13] = IXGBE_R32_Q(hw, IXGBE_FLMNGCTL); - regs_buff[14] = IXGBE_R32_Q(hw, IXGBE_FLMNGDATA); - regs_buff[15] = IXGBE_R32_Q(hw, IXGBE_FLMNGCNT); - regs_buff[16] = IXGBE_R32_Q(hw, IXGBE_FLOP); - regs_buff[17] = IXGBE_R32_Q(hw, IXGBE_GRC); - - /* Interrupt */ - /* don't read EICR because it can clear interrupt causes, instead - * read EICS which is a shadow but doesn't clear EICR */ - regs_buff[18] = IXGBE_R32_Q(hw, IXGBE_EICS); - regs_buff[19] = IXGBE_R32_Q(hw, IXGBE_EICS); - regs_buff[20] = IXGBE_R32_Q(hw, IXGBE_EIMS); - regs_buff[21] = IXGBE_R32_Q(hw, IXGBE_EIMC); - regs_buff[22] = IXGBE_R32_Q(hw, IXGBE_EIAC); - regs_buff[23] = IXGBE_R32_Q(hw, IXGBE_EIAM); - regs_buff[24] = IXGBE_R32_Q(hw, IXGBE_EITR(0)); - regs_buff[25] = IXGBE_R32_Q(hw, IXGBE_IVAR(0)); - regs_buff[26] = IXGBE_R32_Q(hw, IXGBE_MSIXT); - regs_buff[27] = IXGBE_R32_Q(hw, IXGBE_MSIXPBA); - regs_buff[28] = IXGBE_R32_Q(hw, IXGBE_PBACL(0)); - regs_buff[29] = IXGBE_R32_Q(hw, IXGBE_GPIE); - - /* Flow Control */ - regs_buff[30] = IXGBE_R32_Q(hw, IXGBE_PFCTOP); - regs_buff[31] = IXGBE_R32_Q(hw, IXGBE_FCTTV(0)); - regs_buff[32] = IXGBE_R32_Q(hw, IXGBE_FCTTV(1)); - regs_buff[33] = IXGBE_R32_Q(hw, IXGBE_FCTTV(2)); - regs_buff[34] = IXGBE_R32_Q(hw, IXGBE_FCTTV(3)); - for (i = 0; i < 8; i++) { - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - regs_buff[35 + i] = IXGBE_R32_Q(hw, IXGBE_FCRTL(i)); - regs_buff[43 + i] = IXGBE_R32_Q(hw, IXGBE_FCRTH(i)); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - regs_buff[35 + i] = IXGBE_R32_Q(hw, - IXGBE_FCRTL_82599(i)); - regs_buff[43 + i] = IXGBE_R32_Q(hw, - IXGBE_FCRTH_82599(i)); - break; - default: - break; - } - } - regs_buff[51] = IXGBE_R32_Q(hw, IXGBE_FCRTV); - regs_buff[52] = IXGBE_R32_Q(hw, IXGBE_TFCS); - - /* Receive DMA */ - for (i = 0; i < 64; i++) - regs_buff[53 + i] = IXGBE_R32_Q(hw, IXGBE_RDBAL(i)); - for (i = 0; i < 64; i++) - regs_buff[117 + i] = IXGBE_R32_Q(hw, IXGBE_RDBAH(i)); - for (i = 0; i < 64; i++) - regs_buff[181 + i] = IXGBE_R32_Q(hw, IXGBE_RDLEN(i)); - for (i = 0; i < 64; i++) - regs_buff[245 + i] = IXGBE_R32_Q(hw, IXGBE_RDH(i)); - for (i = 0; i < 64; i++) - regs_buff[309 + i] = IXGBE_R32_Q(hw, IXGBE_RDT(i)); - for (i = 0; i < 64; i++) - regs_buff[373 + i] = IXGBE_R32_Q(hw, IXGBE_RXDCTL(i)); - for (i = 0; i < 16; i++) - regs_buff[437 + i] = IXGBE_R32_Q(hw, IXGBE_SRRCTL(i)); - for (i = 0; i < 16; i++) - regs_buff[453 + i] = IXGBE_R32_Q(hw, IXGBE_DCA_RXCTRL(i)); - regs_buff[469] = IXGBE_R32_Q(hw, IXGBE_RDRXCTL); - for (i = 0; i < 8; i++) - regs_buff[470 + i] = IXGBE_R32_Q(hw, IXGBE_RXPBSIZE(i)); - regs_buff[478] = IXGBE_R32_Q(hw, IXGBE_RXCTRL); - regs_buff[479] = IXGBE_R32_Q(hw, IXGBE_DROPEN); - - /* Receive */ - regs_buff[480] = IXGBE_R32_Q(hw, IXGBE_RXCSUM); - regs_buff[481] = IXGBE_R32_Q(hw, IXGBE_RFCTL); - for (i = 0; i < 16; i++) - regs_buff[482 + i] = IXGBE_R32_Q(hw, IXGBE_RAL(i)); - for (i = 0; i < 16; i++) - regs_buff[498 + i] = IXGBE_R32_Q(hw, IXGBE_RAH(i)); - regs_buff[514] = IXGBE_R32_Q(hw, IXGBE_PSRTYPE(0)); - regs_buff[515] = IXGBE_R32_Q(hw, IXGBE_FCTRL); - regs_buff[516] = IXGBE_R32_Q(hw, IXGBE_VLNCTRL); - regs_buff[517] = IXGBE_R32_Q(hw, IXGBE_MCSTCTRL); - regs_buff[518] = IXGBE_R32_Q(hw, IXGBE_MRQC); - regs_buff[519] = IXGBE_R32_Q(hw, IXGBE_VMD_CTL); - for (i = 0; i < 8; i++) - regs_buff[520 + i] = IXGBE_R32_Q(hw, IXGBE_IMIR(i)); - for (i = 0; i < 8; i++) - regs_buff[528 + i] = IXGBE_R32_Q(hw, IXGBE_IMIREXT(i)); - regs_buff[536] = IXGBE_R32_Q(hw, IXGBE_IMIRVP); - - /* Transmit */ - for (i = 0; i < 32; i++) - regs_buff[537 + i] = IXGBE_R32_Q(hw, IXGBE_TDBAL(i)); - for (i = 0; i < 32; i++) - regs_buff[569 + i] = IXGBE_R32_Q(hw, IXGBE_TDBAH(i)); - for (i = 0; i < 32; i++) - regs_buff[601 + i] = IXGBE_R32_Q(hw, IXGBE_TDLEN(i)); - for (i = 0; i < 32; i++) - regs_buff[633 + i] = IXGBE_R32_Q(hw, IXGBE_TDH(i)); - for (i = 0; i < 32; i++) - regs_buff[665 + i] = IXGBE_R32_Q(hw, IXGBE_TDT(i)); - for (i = 0; i < 32; i++) - regs_buff[697 + i] = IXGBE_R32_Q(hw, IXGBE_TXDCTL(i)); - for (i = 0; i < 32; i++) - regs_buff[729 + i] = IXGBE_R32_Q(hw, IXGBE_TDWBAL(i)); - for (i = 0; i < 32; i++) - regs_buff[761 + i] = IXGBE_R32_Q(hw, IXGBE_TDWBAH(i)); - regs_buff[793] = IXGBE_R32_Q(hw, IXGBE_DTXCTL); - for (i = 0; i < 16; i++) - regs_buff[794 + i] = IXGBE_R32_Q(hw, IXGBE_DCA_TXCTRL(i)); - regs_buff[810] = IXGBE_R32_Q(hw, IXGBE_TIPG); - for (i = 0; i < 8; i++) - regs_buff[811 + i] = IXGBE_R32_Q(hw, IXGBE_TXPBSIZE(i)); - regs_buff[819] = IXGBE_R32_Q(hw, IXGBE_MNGTXMAP); - - /* Wake Up */ - regs_buff[820] = IXGBE_R32_Q(hw, IXGBE_WUC); - regs_buff[821] = IXGBE_R32_Q(hw, IXGBE_WUFC); - regs_buff[822] = IXGBE_R32_Q(hw, IXGBE_WUS); - regs_buff[823] = IXGBE_R32_Q(hw, IXGBE_IPAV); - regs_buff[824] = IXGBE_R32_Q(hw, IXGBE_IP4AT); - regs_buff[825] = IXGBE_R32_Q(hw, IXGBE_IP6AT); - regs_buff[826] = IXGBE_R32_Q(hw, IXGBE_WUPL); - regs_buff[827] = IXGBE_R32_Q(hw, IXGBE_WUPM); - regs_buff[828] = IXGBE_R32_Q(hw, IXGBE_FHFT(0)); - - /* DCB */ - regs_buff[829] = IXGBE_R32_Q(hw, IXGBE_RMCS); - regs_buff[830] = IXGBE_R32_Q(hw, IXGBE_DPMCS); - regs_buff[831] = IXGBE_R32_Q(hw, IXGBE_PDPMCS); - regs_buff[832] = IXGBE_R32_Q(hw, IXGBE_RUPPBMR); - for (i = 0; i < 8; i++) - regs_buff[833 + i] = IXGBE_R32_Q(hw, IXGBE_RT2CR(i)); - for (i = 0; i < 8; i++) - regs_buff[841 + i] = IXGBE_R32_Q(hw, IXGBE_RT2SR(i)); - for (i = 0; i < 8; i++) - regs_buff[849 + i] = IXGBE_R32_Q(hw, IXGBE_TDTQ2TCCR(i)); - for (i = 0; i < 8; i++) - regs_buff[857 + i] = IXGBE_R32_Q(hw, IXGBE_TDTQ2TCSR(i)); - for (i = 0; i < 8; i++) - regs_buff[865 + i] = IXGBE_R32_Q(hw, IXGBE_TDPT2TCCR(i)); - for (i = 0; i < 8; i++) - regs_buff[873 + i] = IXGBE_R32_Q(hw, IXGBE_TDPT2TCSR(i)); - - /* Statistics */ - regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); - regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); - regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); - regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); - for (i = 0; i < 8; i++) - regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); - regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); - regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); - regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); - regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); - regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); - regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); - regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); - for (i = 0; i < 8; i++) - regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); - for (i = 0; i < 8; i++) - regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); - for (i = 0; i < 8; i++) - regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); - for (i = 0; i < 8; i++) - regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); - regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); - regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); - regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); - regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); - regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); - regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); - regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); - regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); - regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); - regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); - regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); - regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); - for (i = 0; i < 8; i++) - regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); - regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); - regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); - regs_buff[956] = IXGBE_GET_STAT(adapter, roc); - regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); - regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); - regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); - regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); - regs_buff[961] = IXGBE_GET_STAT(adapter, tor); - regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); - regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); - regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); - regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); - regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); - regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); - regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); - regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); - regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); - regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); - regs_buff[973] = IXGBE_GET_STAT(adapter, xec); - for (i = 0; i < 16; i++) - regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); - for (i = 0; i < 16; i++) - regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); - for (i = 0; i < 16; i++) - regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); - for (i = 0; i < 16; i++) - regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); - - /* MAC */ - regs_buff[1038] = IXGBE_R32_Q(hw, IXGBE_PCS1GCFIG); - regs_buff[1039] = IXGBE_R32_Q(hw, IXGBE_PCS1GLCTL); - regs_buff[1040] = IXGBE_R32_Q(hw, IXGBE_PCS1GLSTA); - regs_buff[1041] = IXGBE_R32_Q(hw, IXGBE_PCS1GDBG0); - regs_buff[1042] = IXGBE_R32_Q(hw, IXGBE_PCS1GDBG1); - regs_buff[1043] = IXGBE_R32_Q(hw, IXGBE_PCS1GANA); - regs_buff[1044] = IXGBE_R32_Q(hw, IXGBE_PCS1GANLP); - regs_buff[1045] = IXGBE_R32_Q(hw, IXGBE_PCS1GANNP); - regs_buff[1046] = IXGBE_R32_Q(hw, IXGBE_PCS1GANLPNP); - regs_buff[1047] = IXGBE_R32_Q(hw, IXGBE_HLREG0); - regs_buff[1048] = IXGBE_R32_Q(hw, IXGBE_HLREG1); - regs_buff[1049] = IXGBE_R32_Q(hw, IXGBE_PAP); - regs_buff[1050] = IXGBE_R32_Q(hw, IXGBE_MACA); - regs_buff[1051] = IXGBE_R32_Q(hw, IXGBE_APAE); - regs_buff[1052] = IXGBE_R32_Q(hw, IXGBE_ARD); - regs_buff[1053] = IXGBE_R32_Q(hw, IXGBE_AIS); - regs_buff[1054] = IXGBE_R32_Q(hw, IXGBE_MSCA); - regs_buff[1055] = IXGBE_R32_Q(hw, IXGBE_MSRWD); - regs_buff[1056] = IXGBE_R32_Q(hw, IXGBE_MLADD); - regs_buff[1057] = IXGBE_R32_Q(hw, IXGBE_MHADD); - regs_buff[1058] = IXGBE_R32_Q(hw, IXGBE_TREG); - regs_buff[1059] = IXGBE_R32_Q(hw, IXGBE_PCSS1); - regs_buff[1060] = IXGBE_R32_Q(hw, IXGBE_PCSS2); - regs_buff[1061] = IXGBE_R32_Q(hw, IXGBE_XPCSS); - regs_buff[1062] = IXGBE_R32_Q(hw, IXGBE_SERDESC); - regs_buff[1063] = IXGBE_R32_Q(hw, IXGBE_MACS); - regs_buff[1064] = IXGBE_R32_Q(hw, IXGBE_AUTOC); - regs_buff[1065] = IXGBE_R32_Q(hw, IXGBE_LINKS); - regs_buff[1066] = IXGBE_R32_Q(hw, IXGBE_AUTOC2); - regs_buff[1067] = IXGBE_R32_Q(hw, IXGBE_AUTOC3); - regs_buff[1068] = IXGBE_R32_Q(hw, IXGBE_ANLP1); - regs_buff[1069] = IXGBE_R32_Q(hw, IXGBE_ANLP2); - regs_buff[1070] = IXGBE_R32_Q(hw, IXGBE_ATLASCTL); - - /* Diagnostic */ - regs_buff[1071] = IXGBE_R32_Q(hw, IXGBE_RDSTATCTL); - for (i = 0; i < 8; i++) - regs_buff[1072 + i] = IXGBE_R32_Q(hw, IXGBE_RDSTAT(i)); - regs_buff[1080] = IXGBE_R32_Q(hw, IXGBE_RDHMPN); - for (i = 0; i < 4; i++) - regs_buff[1081 + i] = IXGBE_R32_Q(hw, IXGBE_RIC_DW(i)); - regs_buff[1085] = IXGBE_R32_Q(hw, IXGBE_RDPROBE); - regs_buff[1095] = IXGBE_R32_Q(hw, IXGBE_TDHMPN); - for (i = 0; i < 4; i++) - regs_buff[1096 + i] = IXGBE_R32_Q(hw, IXGBE_TIC_DW(i)); - regs_buff[1100] = IXGBE_R32_Q(hw, IXGBE_TDPROBE); - regs_buff[1101] = IXGBE_R32_Q(hw, IXGBE_TXBUFCTRL); - regs_buff[1102] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA0); - regs_buff[1103] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA1); - regs_buff[1104] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA2); - regs_buff[1105] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA3); - regs_buff[1106] = IXGBE_R32_Q(hw, IXGBE_RXBUFCTRL); - regs_buff[1107] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA0); - regs_buff[1108] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA1); - regs_buff[1109] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA2); - regs_buff[1110] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA3); - for (i = 0; i < 8; i++) - regs_buff[1111 + i] = IXGBE_R32_Q(hw, IXGBE_PCIE_DIAG(i)); - regs_buff[1119] = IXGBE_R32_Q(hw, IXGBE_RFVAL); - regs_buff[1120] = IXGBE_R32_Q(hw, IXGBE_MDFTC1); - regs_buff[1121] = IXGBE_R32_Q(hw, IXGBE_MDFTC2); - regs_buff[1122] = IXGBE_R32_Q(hw, IXGBE_MDFTFIFO1); - regs_buff[1123] = IXGBE_R32_Q(hw, IXGBE_MDFTFIFO2); - regs_buff[1124] = IXGBE_R32_Q(hw, IXGBE_MDFTS); - regs_buff[1125] = IXGBE_R32_Q(hw, IXGBE_PCIEECCCTL); - regs_buff[1126] = IXGBE_R32_Q(hw, IXGBE_PBTXECC); - regs_buff[1127] = IXGBE_R32_Q(hw, IXGBE_PBRXECC); - - /* 82599 X540 specific registers */ - regs_buff[1128] = IXGBE_R32_Q(hw, IXGBE_MFLCN); - -} - -static int ixgbe_get_eeprom_len(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - return adapter->hw.eeprom.word_size * 2; -} - -static int ixgbe_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u16 *eeprom_buff; - int first_word, last_word, eeprom_len; - int ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = hw->vendor_id | (hw->device_id << 16); - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_len = last_word - first_word + 1; - - eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, - eeprom_buff); - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < eeprom_len; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); - kfree(eeprom_buff); - - return ret_val; -} - -static int ixgbe_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int max_len, first_word, last_word, ret_val = 0; - u16 *eeprom_buff, i; - void *ptr; - - if (eeprom->len == 0) - return -EINVAL; - - if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) - return -EINVAL; - - max_len = hw->eeprom.word_size * 2; - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(max_len, GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - ptr = eeprom_buff; - - if (eeprom->offset & 1) { - /* - * need read/modify/write of first changed EEPROM word - * only the second byte of the word is being modified - */ - ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); - if (ret_val) - goto err; - - ptr++; - } - if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { - /* - * need read/modify/write of last changed EEPROM word - * only the first byte of the word is being modified - */ - ret_val = hw->eeprom.ops.read(hw, last_word, - &eeprom_buff[last_word - first_word]); - if (ret_val) - goto err; - } - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(ptr, bytes, eeprom->len); - - for (i = 0; i < last_word - first_word + 1; i++) - cpu_to_le16s(&eeprom_buff[i]); - - ret_val = hw->eeprom.ops.write_buffer(hw, first_word, - last_word - first_word + 1, - eeprom_buff); - - /* Update the checksum */ - if (ret_val == 0) - hw->eeprom.ops.update_checksum(hw); - -err: - kfree(eeprom_buff); - return ret_val; -} - -static void ixgbe_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - strncpy(drvinfo->driver, ixgbe_driver_name, - sizeof(drvinfo->driver) - 1); - strncpy(drvinfo->version, ixgbe_driver_version, - sizeof(drvinfo->version) - 1); - - strncpy(drvinfo->fw_version, adapter->eeprom_id, - sizeof(drvinfo->fw_version) - 1); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info) - 1); -#ifdef HAVE_ETHTOOL_GET_SSET_COUNT - - drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; -#endif -} - -static void ixgbe_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - ring->rx_max_pending = IXGBE_MAX_RXD; - ring->tx_max_pending = IXGBE_MAX_TXD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = adapter->rx_ring_count; - ring->tx_pending = adapter->tx_ring_count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int ixgbe_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *temp_ring; - int i, err = 0; - u32 new_rx_count, new_tx_count; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_tx_count = clamp_t(u32, ring->tx_pending, - IXGBE_MIN_TXD, IXGBE_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); - - new_rx_count = clamp_t(u32, ring->rx_pending, - IXGBE_MIN_RXD, IXGBE_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); - - if ((new_tx_count == adapter->tx_ring_count) && - (new_rx_count == adapter->rx_ring_count)) { - /* nothing to do */ - return 0; - } - - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - if (!netif_running(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->count = new_tx_count; - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->count = new_rx_count; - adapter->tx_ring_count = new_tx_count; - adapter->rx_ring_count = new_rx_count; - goto clear_reset; - } - - /* allocate temporary buffer to store rings in */ - i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); - temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); - - if (!temp_ring) { - err = -ENOMEM; - goto clear_reset; - } - - ixgbe_down(adapter); - - /* - * Setup new Tx resources and free the old Tx resources in that order. - * We can then assign the new resources to the rings via a memcpy. - * The advantage to this approach is that we are guaranteed to still - * have resources even in the case of an allocation failure. - */ - if (new_tx_count != adapter->tx_ring_count) { - for (i = 0; i < adapter->num_tx_queues; i++) { - memcpy(&temp_ring[i], adapter->tx_ring[i], - sizeof(struct ixgbe_ring)); - - temp_ring[i].count = new_tx_count; - err = ixgbe_setup_tx_resources(&temp_ring[i]); - if (err) { - while (i) { - i--; - ixgbe_free_tx_resources(&temp_ring[i]); - } - goto err_setup; - } - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - ixgbe_free_tx_resources(adapter->tx_ring[i]); - - memcpy(adapter->tx_ring[i], &temp_ring[i], - sizeof(struct ixgbe_ring)); - } - - adapter->tx_ring_count = new_tx_count; - } - - /* Repeat the process for the Rx rings if needed */ - if (new_rx_count != adapter->rx_ring_count) { - for (i = 0; i < adapter->num_rx_queues; i++) { - memcpy(&temp_ring[i], adapter->rx_ring[i], - sizeof(struct ixgbe_ring)); - - temp_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(&temp_ring[i]); - if (err) { - while (i) { - i--; - ixgbe_free_rx_resources(&temp_ring[i]); - } - goto err_setup; - } - } - - - for (i = 0; i < adapter->num_rx_queues; i++) { - ixgbe_free_rx_resources(adapter->rx_ring[i]); - - memcpy(adapter->rx_ring[i], &temp_ring[i], - sizeof(struct ixgbe_ring)); - } - - adapter->rx_ring_count = new_rx_count; - } - -err_setup: - ixgbe_up(adapter); - vfree(temp_ring); -clear_reset: - clear_bit(__IXGBE_RESETTING, &adapter->state); - return err; -} - -#ifndef HAVE_ETHTOOL_GET_SSET_COUNT -static int ixgbe_get_stats_count(struct net_device *netdev) -{ - return IXGBE_STATS_LEN; -} - -#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ -static int ixgbe_get_sset_count(struct net_device *netdev, int sset) -{ -#ifdef HAVE_TX_MQ -#ifndef HAVE_NETDEV_SELECT_QUEUE - struct ixgbe_adapter *adapter = netdev_priv(netdev); -#endif -#endif - - switch (sset) { - case ETH_SS_TEST: - return IXGBE_TEST_LEN; - case ETH_SS_STATS: - return IXGBE_STATS_LEN; - case ETH_SS_PRIV_FLAGS: - return IXGBE_PRIV_FLAGS_STR_LEN; - default: - return -EOPNOTSUPP; - } -} - -#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ -static void ixgbe_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats __always_unused *stats, u64 *data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); -#ifdef HAVE_NETDEV_STATS_IN_NETDEV - struct net_device_stats *net_stats = &netdev->stats; -#else - struct net_device_stats *net_stats = &adapter->net_stats; -#endif - u64 *queue_stat; - int stat_count, k; -#ifdef HAVE_NDO_GET_STATS64 - unsigned int start; -#endif - struct ixgbe_ring *ring; - int i, j; - char *p; - - ixgbe_update_stats(adapter); - - for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) { - p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset; - data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) { - p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset; - data[i] = (ixgbe_gstrings_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - for (j = 0; j < IXGBE_NUM_TX_QUEUES; j++) { - ring = adapter->tx_ring[j]; - if (!ring) { - data[i++] = 0; - data[i++] = 0; -#ifdef BP_EXTENDED_STATS - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; -#endif - continue; - } - -#ifdef HAVE_NDO_GET_STATS64 - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); -#endif - data[i] = ring->stats.packets; - data[i+1] = ring->stats.bytes; -#ifdef HAVE_NDO_GET_STATS64 - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); -#endif - i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i+1] = ring->stats.misses; - data[i+2] = ring->stats.cleaned; - i += 3; -#endif - } - for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { - ring = adapter->rx_ring[j]; - if (!ring) { - data[i++] = 0; - data[i++] = 0; -#ifdef BP_EXTENDED_STATS - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; -#endif - continue; - } - -#ifdef HAVE_NDO_GET_STATS64 - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); -#endif - data[i] = ring->stats.packets; - data[i+1] = ring->stats.bytes; -#ifdef HAVE_NDO_GET_STATS64 - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); -#endif - i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i+1] = ring->stats.misses; - data[i+2] = ring->stats.cleaned; - i += 3; -#endif - } - for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { - data[i++] = adapter->stats.pxontxc[j]; - data[i++] = adapter->stats.pxofftxc[j]; - } - for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { - data[i++] = adapter->stats.pxonrxc[j]; - data[i++] = adapter->stats.pxoffrxc[j]; - } - stat_count = sizeof(struct vf_stats) / sizeof(u64); - for (j = 0; j < adapter->num_vfs; j++) { - queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; - for (k = 0; k < stat_count; k++) - data[i + k] = queue_stat[k]; - queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; - for (k = 0; k < stat_count; k++) - data[i + k] += queue_stat[k]; - i += k; - } -} - -static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - char *p = (char *)data; - unsigned int i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *ixgbe_gstrings_test, - IXGBE_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_net_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < IXGBE_NUM_TX_QUEUES; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "tx_queue_%u_bp_napi_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ - } - for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "rx_queue_%u_bp_poll_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ - } - for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { - sprintf(p, "tx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { - sprintf(p, "rx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_vfs; i++) { - sprintf(p, "VF %u Rx Packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "VF %u Rx Bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "VF %u Tx Packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "VF %u Tx Bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "VF %u MC Packets", i); - p += ETH_GSTRING_LEN; - } - /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ - break; -#ifdef HAVE_ETHTOOL_GET_SSET_COUNT - case ETH_SS_PRIV_FLAGS: - memcpy(data, ixgbe_priv_flags_strings, - IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); - break; -#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ - } -} - -static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) -{ - struct ixgbe_hw *hw = &adapter->hw; - bool link_up; - u32 link_speed = 0; - - if (IXGBE_REMOVED(hw->hw_addr)) { - *data = 1; - return 1; - } - *data = 0; - hw->mac.ops.check_link(hw, &link_speed, &link_up, true); - if (link_up) - return *data; - else - *data = 1; - return *data; -} - -/* ethtool register test data */ -struct ixgbe_reg_test { - u16 reg; - u8 array_len; - u8 test_type; - u32 mask; - u32 write; -}; - -/* In the hardware, registers are laid out either singly, in arrays - * spaced 0x40 bytes apart, or in contiguous tables. We assume - * most tests take place on arrays or single registers (handled - * as a single-element array) and special-case the tables. - * Table tests are always pattern tests. - * - * We also make provision for some required setup steps by specifying - * registers to be written without any read-back testing. - */ - -#define PATTERN_TEST 1 -#define SET_READ_TEST 2 -#define WRITE_NO_TEST 3 -#define TABLE32_TEST 4 -#define TABLE64_TEST_LO 5 -#define TABLE64_TEST_HI 6 - -/* default 82599 register test */ -static struct ixgbe_reg_test reg_test_82599[] = { - { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, - { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, - { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, - { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, - { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, - { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, - { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, - { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { .reg = 0 } -}; - -/* default 82598 register test */ -static struct ixgbe_reg_test reg_test_82598[] = { - { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, - { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - /* Enable all four RX queues before testing. */ - { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, - /* RDH is read-only for 82598, only test RDT. */ - { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, - { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, - { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, - { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, - { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, - { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { .reg = 0 } -}; - - -static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, - u32 mask, u32 write) -{ - u32 pat, val, before; - static const u32 test_pattern[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF - }; - - if (IXGBE_REMOVED(adapter->hw.hw_addr)) { - *data = 1; - return true; - } - for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { - before = IXGBE_READ_REG(&adapter->hw, reg); - IXGBE_WRITE_REG(&adapter->hw, reg, test_pattern[pat] & write); - val = IXGBE_READ_REG(&adapter->hw, reg); - if (val != (test_pattern[pat] & write & mask)) { - e_err(drv, - "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", - reg, val, test_pattern[pat] & write & mask); - *data = reg; - IXGBE_WRITE_REG(&adapter->hw, reg, before); - return true; - } - IXGBE_WRITE_REG(&adapter->hw, reg, before); - } - return false; -} - -static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, - u32 mask, u32 write) -{ - u32 val, before; - - if (IXGBE_REMOVED(adapter->hw.hw_addr)) { - *data = 1; - return true; - } - before = IXGBE_READ_REG(&adapter->hw, reg); - IXGBE_WRITE_REG(&adapter->hw, reg, write & mask); - val = IXGBE_READ_REG(&adapter->hw, reg); - if ((write & mask) != (val & mask)) { - e_err(drv, - "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", - reg, (val & mask), (write & mask)); - *data = reg; - IXGBE_WRITE_REG(&adapter->hw, reg, before); - return true; - } - IXGBE_WRITE_REG(&adapter->hw, reg, before); - return false; -} - -static bool ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) -{ - struct ixgbe_reg_test *test; - struct ixgbe_hw *hw = &adapter->hw; - u32 value, before, after; - u32 i, toggle; - - if (IXGBE_REMOVED(hw->hw_addr)) { - e_err(drv, "Adapter removed - register test blocked\n"); - *data = 1; - return true; - } - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - toggle = 0x7FFFF3FF; - test = reg_test_82598; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - toggle = 0x7FFFF30F; - test = reg_test_82599; - break; - default: - *data = 1; - return true; - } - - /* - * Because the status register is such a special case, - * we handle it separately from the rest of the register - * tests. Some bits are read-only, some toggle, and some - * are writeable on newer MACs. - */ - before = IXGBE_READ_REG(hw, IXGBE_STATUS); - value = IXGBE_READ_REG(hw, IXGBE_STATUS) & toggle; - IXGBE_WRITE_REG(hw, IXGBE_STATUS, toggle); - after = IXGBE_READ_REG(hw, IXGBE_STATUS) & toggle; - if (value != after) { - e_err(drv, - "failed STATUS register test got: 0x%08X expected: 0x%08X\n", - after, value); - *data = 1; - return true; - } - /* restore previous status */ - IXGBE_WRITE_REG(hw, IXGBE_STATUS, before); - - /* - * Perform the remainder of the register test, looping through - * the test table until we either fail or reach the null entry. - */ - while (test->reg) { - for (i = 0; i < test->array_len; i++) { - bool b = false; - - switch (test->test_type) { - case PATTERN_TEST: - b = reg_pattern_test(adapter, data, - test->reg + (i * 0x40), - test->mask, - test->write); - break; - case SET_READ_TEST: - b = reg_set_and_check(adapter, data, - test->reg + (i * 0x40), - test->mask, - test->write); - break; - case WRITE_NO_TEST: - IXGBE_WRITE_REG(hw, test->reg + (i * 0x40), - test->write); - break; - case TABLE32_TEST: - b = reg_pattern_test(adapter, data, - test->reg + (i * 4), - test->mask, - test->write); - break; - case TABLE64_TEST_LO: - b = reg_pattern_test(adapter, data, - test->reg + (i * 8), - test->mask, - test->write); - break; - case TABLE64_TEST_HI: - b = reg_pattern_test(adapter, data, - (test->reg + 4) + (i * 8), - test->mask, - test->write); - break; - } - if (b) - return true; - } - test++; - } - - *data = 0; - return false; -} - -static bool ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) -{ - struct ixgbe_hw *hw = &adapter->hw; - - if (hw->eeprom.ops.validate_checksum(hw, NULL)) { - *data = 1; - return true; - } else { - *data = 0; - return false; - } -} - -static irqreturn_t ixgbe_test_intr(int __always_unused irq, void *data) -{ - struct net_device *netdev = (struct net_device *) data; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); - - return IRQ_HANDLED; -} - -static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) -{ - struct net_device *netdev = adapter->netdev; - u32 mask, i = 0, shared_int = true; - u32 irq = adapter->pdev->irq; - - if (IXGBE_REMOVED(adapter->hw.hw_addr)) { - *data = 1; - return -1; - } - *data = 0; - - /* Hook up test interrupt handler just for this test */ - if (adapter->msix_entries) { - /* NOTE: we don't test MSI-X interrupts here, yet */ - return 0; - } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { - shared_int = false; - if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name, - netdev)) { - *data = 1; - return -1; - } - } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED, - netdev->name, netdev)) { - shared_int = false; - } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED, - netdev->name, netdev)) { - *data = 1; - return -1; - } - e_info(hw, "testing %s interrupt\n", - (shared_int ? "shared" : "unshared")); - - /* Disable all the interrupts */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - /* Test each interrupt */ - for (; i < 10; i++) { - /* Interrupt to test */ - mask = 1 << i; - - if (!shared_int) { - /* - * Disable the interrupts to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, - ~mask & 0x00007FFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, - ~mask & 0x00007FFF); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - if (adapter->test_icr & mask) { - *data = 3; - break; - } - } - - /* - * Enable the interrupt to be reported in the cause - * register and then force the same interrupt and see - * if one gets posted. If an interrupt was not posted - * to the bus, the test failed. - */ - adapter->test_icr = 0; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - if (!(adapter->test_icr & mask)) { - *data = 4; - break; - } - - if (!shared_int) { - /* - * Disable the other interrupts to be reported in - * the cause register and then force the other - * interrupts and see if any get posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, - ~mask & 0x00007FFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, - ~mask & 0x00007FFF); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - if (adapter->test_icr) { - *data = 5; - break; - } - } - } - - /* Disable all the interrupts */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - /* Unhook test interrupt handler */ - free_irq(irq, netdev); - - return *data; -} - -static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - struct ixgbe_hw *hw = &adapter->hw; - u32 reg_ctl; - - /* shut down the DMA engines now so they can be reinitialized later */ - - /* first Rx */ - ixgbe_disable_rx(hw); - ixgbe_disable_rx_queue(adapter, rx_ring); - - /* now Tx */ - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), 0); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - reg_ctl &= ~IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); - break; - default: - break; - } - - ixgbe_reset(adapter); - - ixgbe_free_tx_resources(&adapter->test_tx_ring); - ixgbe_free_rx_resources(&adapter->test_rx_ring); -} - -static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - u32 rctl, reg_data; - int ret_val; - int err; - - /* Setup Tx descriptor ring and Tx buffers */ - tx_ring->count = IXGBE_DEFAULT_TXD; - tx_ring->queue_index = 0; - tx_ring->dev = pci_dev_to_dev(adapter->pdev); - tx_ring->netdev = adapter->netdev; - tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; - - err = ixgbe_setup_tx_resources(tx_ring); - if (err) - return 1; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); - reg_data |= IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); - break; - default: - break; - } - - ixgbe_configure_tx_ring(adapter, tx_ring); - - /* Setup Rx Descriptor ring and Rx buffers */ - rx_ring->count = IXGBE_DEFAULT_RXD; - rx_ring->queue_index = 0; - rx_ring->dev = pci_dev_to_dev(adapter->pdev); - rx_ring->netdev = adapter->netdev; - rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; -#endif - - err = ixgbe_setup_rx_resources(rx_ring); - if (err) { - ret_val = 4; - goto err_nomem; - } - - ixgbe_disable_rx(&adapter->hw); - - ixgbe_configure_rx_ring(adapter, rx_ring); - - rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); - rctl |= IXGBE_RXCTRL_DMBYPS; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); - ixgbe_enable_rx(&adapter->hw); - - return 0; - -err_nomem: - ixgbe_free_desc_rings(adapter); - return ret_val; -} - -static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg_data; - - - /* Setup MAC loopback */ - reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); - reg_data |= IXGBE_HLREG0_LPBK; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); - - reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); - reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); - - /* X540 needs to set the MACC.FLU bit to force link up */ - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - case ixgbe_mac_X540: - reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); - reg_data |= IXGBE_MACC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); - break; - default: - if (hw->mac.orig_autoc) { - reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); - } else { - return 10; - } - } - IXGBE_WRITE_FLUSH(hw); - usleep_range(10000, 20000); - - /* Disable Atlas Tx lanes; re-enabled in reset path */ - if (hw->mac.type == ixgbe_mac_82598EB) { - u8 atlas; - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); - atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); - atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); - atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); - atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); - } - - return 0; -} - -static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) -{ - u32 reg_data; - - reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); - reg_data &= ~IXGBE_HLREG0_LPBK; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); -} - -static void ixgbe_create_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - memset(skb->data, 0xFF, frame_size); - frame_size >>= 1; - memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size + 10], 0xBE, 1); - memset(&skb->data[frame_size + 12], 0xAF, 1); -} - -static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, - unsigned int frame_size) -{ - unsigned char *data; - bool match = true; - - frame_size >>= 1; - -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - data = rx_buffer->skb->data; -#else - data = kmap(rx_buffer->page) + rx_buffer->page_offset; -#endif - - if (data[3] != 0xFF || - data[frame_size + 10] != 0xBE || - data[frame_size + 12] != 0xAF) - match = false; - -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - kunmap(rx_buffer->page); - -#endif - return match; -} - -static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, - struct ixgbe_ring *tx_ring, - unsigned int size) -{ - union ixgbe_adv_rx_desc *rx_desc; -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - const int bufsz = rx_ring->rx_buf_len; -#else - const int bufsz = ixgbe_rx_bufsz(rx_ring); -#endif - u16 rx_ntc, tx_ntc, count = 0; - - /* initialize next to clean and descriptor values */ - rx_ntc = rx_ring->next_to_clean; - tx_ntc = tx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); - - while (rx_desc->wb.upper.length) { - struct ixgbe_rx_buffer *rx_buffer; - struct ixgbe_tx_buffer *tx_buffer; - - /* unmap buffer on Tx side */ - tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); - - /* check Rx buffer */ - rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; - - /* sync Rx buffer for CPU read */ - dma_sync_single_for_cpu(rx_ring->dev, - rx_buffer->dma, - bufsz, - DMA_FROM_DEVICE); - - /* verify contents of skb */ - if (ixgbe_check_lbtest_frame(rx_buffer, size)) - count++; - - /* sync Rx buffer for device write */ - dma_sync_single_for_device(rx_ring->dev, - rx_buffer->dma, - bufsz, - DMA_FROM_DEVICE); - - /* increment Rx/Tx next to clean counters */ - rx_ntc++; - if (rx_ntc == rx_ring->count) - rx_ntc = 0; - tx_ntc++; - if (tx_ntc == tx_ring->count) - tx_ntc = 0; - - /* fetch next descriptor */ - rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); - } - - /* re-map buffers to ring, store next to clean values */ - ixgbe_alloc_rx_buffers(rx_ring, count); - rx_ring->next_to_clean = rx_ntc; - tx_ring->next_to_clean = tx_ntc; - - return count; -} - -static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - int i, j, lc, ret_val = 0; - unsigned int size = 1024; - netdev_tx_t tx_ret_val; - struct sk_buff *skb; - u32 flags_orig = adapter->flags; - - /* DCB can modify the frames on Tx */ - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - - /* allocate test skb */ - skb = alloc_skb(size, GFP_KERNEL); - if (!skb) - return 11; - - /* place data into test skb */ - ixgbe_create_lbtest_frame(skb, size); - skb_put(skb, size); - - /* - * Calculate the loop count based on the largest descriptor ring - * The idea is to wrap the largest ring a number of times using 64 - * send/receive pairs during each loop - */ - - if (rx_ring->count <= tx_ring->count) - lc = ((tx_ring->count / 64) * 2) + 1; - else - lc = ((rx_ring->count / 64) * 2) + 1; - - for (j = 0; j <= lc; j++) { - unsigned int good_cnt; - - /* reset count of good packets */ - good_cnt = 0; - - /* place 64 packets on the transmit queue*/ - for (i = 0; i < 64; i++) { - skb_get(skb); - tx_ret_val = ixgbe_xmit_frame_ring(skb, - adapter, - tx_ring); - if (tx_ret_val == NETDEV_TX_OK) - good_cnt++; - } - - if (good_cnt != 64) { - ret_val = 12; - break; - } - - /* allow 200 milliseconds for packets to go from Tx to Rx */ - msleep(200); - - good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); - if (good_cnt != 64) { - ret_val = 13; - break; - } - } - - /* free the original skb */ - kfree_skb(skb); - adapter->flags = flags_orig; - - return ret_val; -} - -static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) -{ - *data = ixgbe_setup_desc_rings(adapter); - if (*data) - goto out; - *data = ixgbe_setup_loopback_test(adapter); - if (*data) - goto err_loopback; - *data = ixgbe_run_loopback_test(adapter); - ixgbe_loopback_cleanup(adapter); - -err_loopback: - ixgbe_free_desc_rings(adapter); -out: - return *data; -} - -#ifndef HAVE_ETHTOOL_GET_SSET_COUNT -static int ixgbe_diag_test_count(struct net_device __always_unused *netdev) -{ - return IXGBE_TEST_LEN; -} - -#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ -static void ixgbe_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - bool if_running = netif_running(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (IXGBE_REMOVED(hw->hw_addr)) { - e_err(hw, "Adapter removed - test blocked\n"); - data[0] = 1; - data[1] = 1; - data[2] = 1; - data[3] = 1; - data[4] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; - return; - } - set_bit(__IXGBE_TESTING, &adapter->state); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - int i; - for (i = 0; i < adapter->num_vfs; i++) { - if (adapter->vfinfo[i].clear_to_send) { - e_warn(drv, "Please take active VFS " - "offline and restart the " - "adapter before running NIC " - "diagnostics\n"); - data[0] = 1; - data[1] = 1; - data[2] = 1; - data[3] = 1; - data[4] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__IXGBE_TESTING, - &adapter->state); - goto skip_ol_tests; - } - } - } - - /* Offline tests */ - e_info(hw, "offline testing starting\n"); - - /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ - if (ixgbe_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (if_running) - /* indicate we're in test mode */ - dev_close(netdev); - else - ixgbe_reset(adapter); - - e_info(hw, "register testing starting\n"); - if (ixgbe_reg_test(adapter, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - ixgbe_reset(adapter); - e_info(hw, "eeprom testing starting\n"); - if (ixgbe_eeprom_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - ixgbe_reset(adapter); - e_info(hw, "interrupt testing starting\n"); - if (ixgbe_intr_test(adapter, &data[2])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* If SRIOV or VMDq is enabled then skip MAC - * loopback diagnostic. */ - if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | - IXGBE_FLAG_VMDQ_ENABLED)) { - e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); - data[3] = 0; - goto skip_loopback; - } - - ixgbe_reset(adapter); - e_info(hw, "loopback testing starting\n"); - if (ixgbe_loopback_test(adapter, &data[3])) - eth_test->flags |= ETH_TEST_FL_FAILED; - -skip_loopback: - ixgbe_reset(adapter); - - /* clear testing bit and return adapter to previous state */ - clear_bit(__IXGBE_TESTING, &adapter->state); - if (if_running) - dev_open(netdev); - else if (hw->mac.ops.disable_tx_laser) - hw->mac.ops.disable_tx_laser(hw); - } else { - e_info(hw, "online testing starting\n"); - - /* Online tests */ - if (ixgbe_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* Offline tests aren't run; pass by default */ - data[0] = 0; - data[1] = 0; - data[2] = 0; - data[3] = 0; - - clear_bit(__IXGBE_TESTING, &adapter->state); - } - -skip_ol_tests: - msleep_interruptible(4 * 1000); -} - -static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, - struct ethtool_wolinfo *wol) -{ - struct ixgbe_hw *hw = &adapter->hw; - int retval = 0; - - /* WOL not supported for all devices */ - if (!ixgbe_wol_supported(adapter, hw->device_id, - hw->subsystem_device_id)) { - retval = 1; - wol->supported = 0; - } - - return retval; -} - -static void ixgbe_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = 0; - - if (ixgbe_wol_exclusion(adapter, wol) || - !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) - return; - - if (adapter->wol & IXGBE_WUFC_EX) - wol->wolopts |= WAKE_UCAST; - if (adapter->wol & IXGBE_WUFC_MC) - wol->wolopts |= WAKE_MCAST; - if (adapter->wol & IXGBE_WUFC_BC) - wol->wolopts |= WAKE_BCAST; - if (adapter->wol & IXGBE_WUFC_MAG) - wol->wolopts |= WAKE_MAGIC; -} - -static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) - return -EOPNOTSUPP; - - if (ixgbe_wol_exclusion(adapter, wol)) - return wol->wolopts ? -EOPNOTSUPP : 0; - - adapter->wol = 0; - - if (wol->wolopts & WAKE_UCAST) - adapter->wol |= IXGBE_WUFC_EX; - if (wol->wolopts & WAKE_MCAST) - adapter->wol |= IXGBE_WUFC_MC; - if (wol->wolopts & WAKE_BCAST) - adapter->wol |= IXGBE_WUFC_BC; - if (wol->wolopts & WAKE_MAGIC) - adapter->wol |= IXGBE_WUFC_MAG; - - hw->wol_enabled = !!(adapter->wol); - - device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); - - return 0; -} - -static int ixgbe_nway_reset(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - - return 0; -} - -#ifdef HAVE_ETHTOOL_SET_PHYS_ID -static int ixgbe_set_phys_id(struct net_device *netdev, - enum ethtool_phys_id_state state) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - s32 rc; - u16 regVal; - - if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) - return -EOPNOTSUPP; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - return 2; - - case ETHTOOL_ID_ON: - if (hw->mac.ops.led_on(hw, hw->mac.led_link_act)) - return -EINVAL; - break; - - case ETHTOOL_ID_OFF: - if (hw->mac.ops.led_off(hw, hw->mac.led_link_act)) - return -EINVAL; - break; - - case ETHTOOL_ID_INACTIVE: - /* Restore LED settings */ - if (hw->mac.type == ixgbe_mac_X550EM_a) { - /* For M88E1512, to select page 3 in register 22 */ - regVal = 0x03; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "page register write failed, rc:%x\n", rc); - } - - /* For M88E1512, read from page 3, register 16 */ - regVal = 0x00; - rc = hw->phy.ops.read_reg(hw, 0x10, MDIO_MMD_PMAPMD, ®Val); - if (rc) { - hw_err(hw, "led function control register read failed, rc:%x\n", rc); - } - - /* For M88E1512, write to page 3 register 16 with force led on */ - regVal = (regVal & 0xFF00) | 0x0017; - rc = hw->phy.ops.write_reg(hw, 0x10, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "led function control register write failed, rc:%x\n", rc); - } - - /* For M88E1512, write page 22 back to default 0 */ - regVal = 0x00; - rc = hw->phy.ops.write_reg(hw, 0x16, MDIO_MMD_PMAPMD, regVal); - if (rc) { - hw_err(hw, "page register write failed, rc:%x\n", rc); - } - } - else - IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); - break; - } - - return 0; -} -#else -static int ixgbe_phys_id(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - u32 i; - - if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) - return -EOPNOTSUPP; - - if (!data || data > 300) - data = 300; - - for (i = 0; i < (data * 1000); i += 400) { - if (hw->mac.ops.led_on(hw, hw->mac.led_link_act)) - return -EINVAL; - msleep_interruptible(200); - if (hw->mac.ops.led_off(hw, hw->mac.led_link_act)) - return -EINVAL; - msleep_interruptible(200); - } - - /* Restore LED settings */ - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - - return IXGBE_SUCCESS; -} -#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ - -static int ixgbe_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; - /* only valid if in constant ITR mode */ - if (adapter->rx_itr_setting <= 1) - ec->rx_coalesce_usecs = adapter->rx_itr_setting; - else - ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - - /* if in mixed tx/rx queues per vector mode, report only rx settings */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) - return 0; - - /* only valid if in constant ITR mode */ - if (adapter->tx_itr_setting <= 1) - ec->tx_coalesce_usecs = adapter->tx_itr_setting; - else - ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; - - return 0; -} - -/* - * this function must be called before setting the new value of - * rx_itr_setting - */ -static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - /* nothing to do if LRO or RSC are not enabled */ - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || - !(netdev->features & NETIF_F_LRO)) - return false; - - /* check the feature flag value and enable RSC if necessary */ - if (adapter->rx_itr_setting == 1 || - adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - e_info(probe, "rx-usecs value high enough " - "to re-enable RSC\n"); - return true; - } - /* if interrupt rate is too high then disable RSC */ - } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; - e_info(probe, "rx-usecs set too low, disabling RSC\n"); - return true; - } - return false; -} - -static int ixgbe_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - u16 tx_itr_param, rx_itr_param; - u16 tx_itr_prev; - bool need_reset = false; - - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { - /* reject Tx specific changes in case of mixed RxTx vectors */ - if (ec->tx_coalesce_usecs) - return -EINVAL; - tx_itr_prev = adapter->rx_itr_setting; - } else { - tx_itr_prev = adapter->tx_itr_setting; - } - - if (ec->tx_max_coalesced_frames_irq) - adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; - - if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || - (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) - return -EINVAL; - - if (ec->rx_coalesce_usecs > 1) - adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; - else - adapter->rx_itr_setting = ec->rx_coalesce_usecs; - - if (adapter->rx_itr_setting == 1) - rx_itr_param = IXGBE_20K_ITR; - else - rx_itr_param = adapter->rx_itr_setting; - - if (ec->tx_coalesce_usecs > 1) - adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; - else - adapter->tx_itr_setting = ec->tx_coalesce_usecs; - - if (adapter->tx_itr_setting == 1) - tx_itr_param = IXGBE_12K_ITR; - else - tx_itr_param = adapter->tx_itr_setting; - - /* mixed Rx/Tx */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) - adapter->tx_itr_setting = adapter->rx_itr_setting; - - /* detect ITR changes that require update of TXDCTL.WTHRESH */ - if ((adapter->tx_itr_setting != 1) && - (adapter->tx_itr_setting < IXGBE_100K_ITR)) { - if ((tx_itr_prev == 1) || - (tx_itr_prev >= IXGBE_100K_ITR)) - need_reset = true; - } else { - if ((tx_itr_prev != 1) && - (tx_itr_prev < IXGBE_100K_ITR)) - need_reset = true; - } - - /* check the old value and enable RSC if necessary */ - need_reset |= ixgbe_update_rsc(adapter); - - if (adapter->hw.mac.dmac_config.watchdog_timer && - (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { - e_info(probe, - "Disabling DMA coalescing because interrupt throttling is disabled\n"); - adapter->hw.mac.dmac_config.watchdog_timer = 0; - ixgbe_dmac_config(&adapter->hw); - } - - for (i = 0; i < adapter->num_q_vectors; i++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; - - q_vector->tx.work_limit = adapter->tx_work_limit; - q_vector->rx.work_limit = adapter->rx_work_limit; - if (q_vector->tx.count && !q_vector->rx.count) - /* tx only */ - q_vector->itr = tx_itr_param; - else - /* rx only or mixed */ - q_vector->itr = rx_itr_param; - ixgbe_write_eitr(q_vector); - } - - /* - * do reset here at the end to make sure EITR==0 case is handled - * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings - * also locks in RSC enable/disable which requires reset - */ - if (need_reset) - ixgbe_do_reset(netdev); - - return 0; -} - -#ifndef HAVE_NDO_SET_FEATURES -static u32 ixgbe_get_rx_csum(struct net_device *netdev) -{ - return !!(netdev->features & NETIF_F_RXCSUM); -} - -static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - bool need_reset = false; - - if (data) - netdev->features |= NETIF_F_RXCSUM; - else - netdev->features &= ~NETIF_F_RXCSUM; - - /* LRO and RSC both depend on RX checksum to function */ - if (!data && (netdev->features & NETIF_F_LRO)) { - netdev->features &= ~NETIF_F_LRO; - - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; - need_reset = true; - } - } - -#ifdef HAVE_VXLAN_RX_OFFLOAD - if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && data) { - netdev->hw_enc_features |= NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; - if (!need_reset) - adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; - } else { - netdev->hw_enc_features &= ~(NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM); - ixgbe_clear_udp_tunnel_port(adapter, - IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); - } -#endif /* HAVE_VXLAN_RX_OFFLOAD */ - - if (need_reset) - ixgbe_do_reset(netdev); - - return 0; -} - -static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); -#ifdef NETIF_F_IPV6_CSUM - u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; -#else - u32 feature_list = NETIF_F_IP_CSUM; -#endif - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: -#ifdef HAVE_ENCAP_TSO_OFFLOAD - if (data) - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; - else - netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; - feature_list |= NETIF_F_GSO_UDP_TUNNEL; -#endif /* HAVE_ENCAP_TSO_OFFLOAD */ - feature_list |= NETIF_F_SCTP_CSUM; - break; - default: - break; - } - - if (data) - netdev->features |= feature_list; - else - netdev->features &= ~feature_list; - - return 0; -} - -#ifdef NETIF_F_TSO -static int ixgbe_set_tso(struct net_device *netdev, u32 data) -{ -#ifdef NETIF_F_TSO6 - u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; -#else - u32 feature_list = NETIF_F_TSO; -#endif - - if (data) - netdev->features |= feature_list; - else - netdev->features &= ~feature_list; - -#ifndef HAVE_NETDEV_VLAN_FEATURES - if (!data) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct net_device *v_netdev; - int i; - - /* disable TSO on all VLANs if they're present */ - if (!adapter->vlgrp) - goto tso_out; - - for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { - v_netdev = vlan_group_get_device(adapter->vlgrp, i); - if (!v_netdev) - continue; - - v_netdev->features &= ~feature_list; - vlan_group_set_device(adapter->vlgrp, i, v_netdev); - } - } - -tso_out: - -#endif /* HAVE_NETDEV_VLAN_FEATURES */ - return 0; -} - -#endif /* NETIF_F_TSO */ -#ifdef ETHTOOL_GFLAGS -static int ixgbe_set_flags(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; - u32 changed = netdev->features ^ data; - bool need_reset = false; - int rc; - -#ifndef HAVE_VLAN_RX_REGISTER - if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - !(data & ETH_FLAG_RXVLAN)) - return -EINVAL; - -#endif - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) - supported_flags |= ETH_FLAG_LRO; - -#ifdef ETHTOOL_GRXRINGS - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - case ixgbe_mac_X540: - case ixgbe_mac_82599EB: - supported_flags |= ETH_FLAG_NTUPLE; - default: - break; - } - -#endif -#ifdef NETIF_F_RXHASH - supported_flags |= ETH_FLAG_RXHASH; - -#endif - rc = ethtool_op_set_flags(netdev, data, supported_flags); - if (rc) - return rc; - -#ifndef HAVE_VLAN_RX_REGISTER - if (changed & ETH_FLAG_RXVLAN) - ixgbe_vlan_mode(netdev, netdev->features); -#endif - -#ifdef HAVE_VXLAN_RX_OFFLOAD - if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && - netdev->features & NETIF_F_RXCSUM) { - vxlan_get_rx_port(netdev); - else - ixgbe_clear_udp_tunnel_port(adapter, - IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); - } -#endif /* HAVE_VXLAN_RX_OFFLOAD */ - - /* if state changes we need to update adapter->flags and reset */ - if (!(netdev->features & NETIF_F_LRO)) { - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - need_reset = true; - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; - } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && - !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { - if (adapter->rx_itr_setting == 1 || - adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - need_reset = true; - } else if (changed & ETH_FLAG_LRO) { - e_info(probe, "rx-usecs set too low, " - "disabling RSC\n"); - } - } - -#ifdef ETHTOOL_GRXRINGS - /* - * Check if Flow Director n-tuple support was enabled or disabled. If - * the state changed, we need to reset. - */ - switch (netdev->features & NETIF_F_NTUPLE) { - case NETIF_F_NTUPLE: - /* turn off ATR, enable perfect filters and reset */ - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - need_reset = true; - - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - break; - default: - /* turn off perfect filters, enable ATR and reset */ - if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) - need_reset = true; - - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - - /* We cannot enable ATR if VMDq is enabled */ - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) - break; - - /* We cannot enable ATR if we have 2 or more traffic classes */ - if (netdev_get_num_tc(netdev) > 1) - break; - - /* We cannot enable ATR if RSS is disabled */ - if (adapter->ring_feature[RING_F_RSS].limit <= 1) - break; - - /* A sample rate of 0 indicates ATR disabled */ - if (!adapter->atr_sample_rate) - break; - - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - break; - } - -#endif /* ETHTOOL_GRXRINGS */ - if (need_reset) - ixgbe_do_reset(netdev); - - return 0; -} - -#endif /* ETHTOOL_GFLAGS */ -#endif /* HAVE_NDO_SET_FEATURES */ -#ifdef ETHTOOL_GRXRINGS -static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - union ixgbe_atr_input *mask = &adapter->fdir_mask; - struct ethtool_rx_flow_spec *fsp = - (struct ethtool_rx_flow_spec *)&cmd->fs; - struct hlist_node *node2; - struct ixgbe_fdir_filter *rule = NULL; - - /* report total rule count */ - cmd->data = (1024 << adapter->fdir_pballoc) - 2; - - hlist_for_each_entry_safe(rule, node2, - &adapter->fdir_filter_list, fdir_node) { - if (fsp->location <= rule->sw_idx) - break; - } - - if (!rule || fsp->location != rule->sw_idx) - return -EINVAL; - - /* fill out the flow spec entry */ - - /* set flow type field */ - switch (rule->filter.formatted.flow_type) { - case IXGBE_ATR_FLOW_TYPE_TCPV4: - fsp->flow_type = TCP_V4_FLOW; - break; - case IXGBE_ATR_FLOW_TYPE_UDPV4: - fsp->flow_type = UDP_V4_FLOW; - break; - case IXGBE_ATR_FLOW_TYPE_SCTPV4: - fsp->flow_type = SCTP_V4_FLOW; - break; - case IXGBE_ATR_FLOW_TYPE_IPV4: - fsp->flow_type = IP_USER_FLOW; - fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; - fsp->h_u.usr_ip4_spec.proto = 0; - fsp->m_u.usr_ip4_spec.proto = 0; - break; - default: - return -EINVAL; - } - - fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; - fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; - fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; - fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; - fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; - fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; - fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; - fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; - fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; - fsp->m_ext.vlan_tci = mask->formatted.vlan_id; - fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; - fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; - fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); - fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); - fsp->flow_type |= FLOW_EXT; - - /* record action */ - if (rule->action == IXGBE_FDIR_DROP_QUEUE) - fsp->ring_cookie = RX_CLS_FLOW_DISC; - else - fsp->ring_cookie = rule->action; - - return 0; -} - -static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) -{ - struct hlist_node *node2; - struct ixgbe_fdir_filter *rule; - int cnt = 0; - - /* report total rule count */ - cmd->data = (1024 << adapter->fdir_pballoc) - 2; - - hlist_for_each_entry_safe(rule, node2, - &adapter->fdir_filter_list, fdir_node) { - if (cnt == cmd->rule_cnt) - return -EMSGSIZE; - rule_locs[cnt] = rule->sw_idx; - cnt++; - } - - cmd->rule_cnt = cnt; - - return 0; -} - -static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - cmd->data = 0; - - /* Report default options for RSS on ixgbe */ - switch (cmd->flow_type) { - case TCP_V4_FLOW: - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ - case UDP_V4_FLOW: - if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case IPV4_FLOW: - cmd->data |= RXH_IP_SRC | RXH_IP_DST; - break; - case TCP_V6_FLOW: - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ - case UDP_V6_FLOW: - if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case IPV6_FLOW: - cmd->data |= RXH_IP_SRC | RXH_IP_DST; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, -#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS - void *rule_locs) -#else - u32 *rule_locs) -#endif -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_rx_queues; - ret = 0; - break; - case ETHTOOL_GRXCLSRLCNT: - cmd->rule_cnt = adapter->fdir_filter_count; - ret = 0; - break; - case ETHTOOL_GRXCLSRULE: - ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); - break; - case ETHTOOL_GRXCLSRLALL: - ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, - (u32 *)rule_locs); - break; - case ETHTOOL_GRXFH: - ret = ixgbe_get_rss_hash_opts(adapter, cmd); - break; - default: - break; - } - - return ret; -} - -static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ixgbe_fdir_filter *input, - u16 sw_idx) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct hlist_node *node2; - struct ixgbe_fdir_filter *rule, *parent; - bool deleted = false; - s32 err; - - parent = NULL; - rule = NULL; - - hlist_for_each_entry_safe(rule, node2, - &adapter->fdir_filter_list, fdir_node) { - /* hash found, or no matching entry */ - if (rule->sw_idx >= sw_idx) - break; - parent = rule; - } - - /* if there is an old rule occupying our place remove it */ - if (rule && (rule->sw_idx == sw_idx)) { - /* hardware filters are only configured when interface is up, - * and we should not issue filter commands while the interface - * is down - */ - if (netif_running(adapter->netdev) && - (!input || (rule->filter.formatted.bkt_hash != - input->filter.formatted.bkt_hash))) { - err = ixgbe_fdir_erase_perfect_filter_82599(hw, - &rule->filter, - sw_idx); - if (err) - return -EINVAL; - } - - hlist_del(&rule->fdir_node); - kfree(rule); - adapter->fdir_filter_count--; - deleted = true; - } - - /* If we weren't given an input, then this was a request to delete a - * filter. We should return -EINVAL if the filter wasn't found, but - * return 0 if the rule was successfully deleted. - */ - if (!input) - return deleted ? 0 : -EINVAL; - - /* initialize node and set software index */ - INIT_HLIST_NODE(&input->fdir_node); - - /* add filter to the list */ - if (parent) - hlist_add_behind(&input->fdir_node, &parent->fdir_node); - else - hlist_add_head(&input->fdir_node, - &adapter->fdir_filter_list); - - /* update counts */ - adapter->fdir_filter_count++; - - return 0; -} - -static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, - u8 *flow_type) -{ - switch (fsp->flow_type & ~FLOW_EXT) { - case TCP_V4_FLOW: - *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; - break; - case UDP_V4_FLOW: - *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; - break; - case SCTP_V4_FLOW: - *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; - break; - case IP_USER_FLOW: - switch (fsp->h_u.usr_ip4_spec.proto) { - case IPPROTO_TCP: - *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; - break; - case IPPROTO_UDP: - *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; - break; - case IPPROTO_SCTP: - *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; - break; - case 0: - if (!fsp->m_u.usr_ip4_spec.proto) { - *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; - break; - } - /* fall through */ - default: - return 0; - } - break; - default: - return 0; - } - - return 1; -} - -static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - struct ethtool_rx_flow_spec *fsp = - (struct ethtool_rx_flow_spec *)&cmd->fs; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_fdir_filter *input; - union ixgbe_atr_input mask; - int err; - - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - return -EOPNOTSUPP; - - /* - * Don't allow programming if the action is a queue greater than - * the number of online Rx queues. - */ - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= adapter->num_rx_queues)) - return -EINVAL; - - /* Don't allow indexes to exist outside of available space */ - if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { - e_err(drv, "Location out of range\n"); - return -EINVAL; - } - - input = kzalloc(sizeof(*input), GFP_ATOMIC); - if (!input) - return -ENOMEM; - - memset(&mask, 0, sizeof(union ixgbe_atr_input)); - - /* set SW index */ - input->sw_idx = fsp->location; - - /* record flow type */ - if (!ixgbe_flowspec_to_flow_type(fsp, - &input->filter.formatted.flow_type)) { - e_err(drv, "Unrecognized flow type\n"); - goto err_out; - } - - mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | - IXGBE_ATR_L4TYPE_MASK; - - if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) - mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; - - /* Copy input into formatted structures */ - input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; - mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; - input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; - mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; - input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; - mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; - input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; - mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; - - if (fsp->flow_type & FLOW_EXT) { - input->filter.formatted.vm_pool = - (unsigned char)ntohl(fsp->h_ext.data[1]); - mask.formatted.vm_pool = - (unsigned char)ntohl(fsp->m_ext.data[1]); - input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; - mask.formatted.vlan_id = fsp->m_ext.vlan_tci; - input->filter.formatted.flex_bytes = - fsp->h_ext.vlan_etype; - mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; - } - - /* determine if we need to drop or route the packet */ - if (fsp->ring_cookie == RX_CLS_FLOW_DISC) - input->action = IXGBE_FDIR_DROP_QUEUE; - else - input->action = fsp->ring_cookie; - - spin_lock(&adapter->fdir_perfect_lock); - - if (hlist_empty(&adapter->fdir_filter_list)) { - /* save mask and program input mask into HW */ - memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, &mask, adapter->cloud_mode); - if (err) { - e_err(drv, "Error writing mask\n"); - goto err_out_w_lock; - } - } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { - e_err(drv, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); - goto err_out_w_lock; - } - - /* apply mask and compute/store hash */ - ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); - - /* only program filters to hardware if the net device is running, as - * we store the filters in the Rx buffer which is not allocated when - * the device is down - */ - if (netif_running(adapter->netdev)) { - err = ixgbe_fdir_write_perfect_filter_82599(hw, - &input->filter, input->sw_idx, - (input->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[input->action]->reg_idx, - adapter->cloud_mode); - if (err) - goto err_out_w_lock; - } - - ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); - - spin_unlock(&adapter->fdir_perfect_lock); - - return err; -err_out_w_lock: - spin_unlock(&adapter->fdir_perfect_lock); -err_out: - kfree(input); - return -EINVAL; -} - -static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - struct ethtool_rx_flow_spec *fsp = - (struct ethtool_rx_flow_spec *)&cmd->fs; - int err; - - spin_lock(&adapter->fdir_perfect_lock); - err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); - spin_unlock(&adapter->fdir_perfect_lock); - - return err; -} - -#ifdef ETHTOOL_SRXNTUPLE -/* - * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid - * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag - * was defined that this function was present. - */ -static int ixgbe_set_rx_ntuple(struct net_device __always_unused *dev, - struct ethtool_rx_ntuple __always_unused *cmd) -{ - return -EOPNOTSUPP; -} - -#endif -#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ - IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) -static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *nfc) -{ - u32 flags2 = adapter->flags2; - - /* - * RSS does not support anything other than hashing - * to queues on src and dst IPs and ports - */ - if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) - return -EINVAL; - - switch (nfc->flow_type) { - case TCP_V4_FLOW: - case TCP_V6_FLOW: - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST) || - !(nfc->data & RXH_L4_B_0_1) || - !(nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - break; - case UDP_V4_FLOW: - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST)) - return -EINVAL; - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; - break; - default: - return -EINVAL; - } - break; - case UDP_V6_FLOW: - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST)) - return -EINVAL; - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; - break; - default: - return -EINVAL; - } - break; - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case SCTP_V4_FLOW: - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case SCTP_V6_FLOW: - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST) || - (nfc->data & RXH_L4_B_0_1) || - (nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - break; - default: - return -EINVAL; - } - - /* if we changed something we need to update flags */ - if (flags2 != adapter->flags2) { - struct ixgbe_hw *hw = &adapter->hw; - u32 mrqc; - unsigned int pf_pool = adapter->num_vfs; - - if ((hw->mac.type >= ixgbe_mac_X550) && - (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); - else - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); - - if ((flags2 & UDP_RSS_FLAGS) && - !(adapter->flags2 & UDP_RSS_FLAGS)) - e_warn(drv, "enabling UDP RSS: fragmented packets" - " may arrive out of order to the stack above\n"); - - adapter->flags2 = flags2; - - /* Perform hash on these packet types */ - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 - | IXGBE_MRQC_RSS_FIELD_IPV4_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; - - mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | - IXGBE_MRQC_RSS_FIELD_IPV6_UDP); - - if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; - - if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - - if ((hw->mac.type >= ixgbe_mac_X550) && - (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); - else - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); - } - - return 0; -} - -static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXCLSRLINS: - ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); - break; - case ETHTOOL_SRXCLSRLDEL: - ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); - break; - case ETHTOOL_SRXFH: - ret = ixgbe_set_rss_hash_opt(adapter, cmd); - break; - default: - break; - } - - return ret; -} - -#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) -static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) -{ - if (adapter->hw.mac.type < ixgbe_mac_X550) - return 16; - else - return 64; -} - -static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) -{ - return IXGBE_RSS_KEY_SIZE; -} - -static u32 ixgbe_rss_indir_size(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return ixgbe_rss_indir_tbl_entries(adapter); -} - -static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) -{ - int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); - u16 rss_m = adapter->ring_feature[RING_F_RSS].mask; - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - rss_m = adapter->ring_feature[RING_F_RSS].indices - 1; - - for (i = 0; i < reta_size; i++) - indir[i] = adapter->rss_indir_tbl[i] & rss_m; -} - -#ifdef HAVE_RXFH_HASHFUNC -static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) -#else -static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) -#endif -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - -#ifdef HAVE_RXFH_HASHFUNC - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; -#endif - - if (indir) - ixgbe_get_reta(adapter, indir); - - if (key) - memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); - - return 0; -} - -#ifdef HAVE_RXFH_HASHFUNC -static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) -#else -#ifdef HAVE_RXFH_NONCONST -static int ixgbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) -#else -static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key) -#endif /* HAVE_RXFH_NONCONST */ -#endif /* HAVE_RXFH_HASHFUNC */ -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); - -#ifdef HAVE_RXFH_HASHFUNC - if (hfunc) - return -EINVAL; -#endif - - /* Fill out the redirection table */ - if (indir) { - int max_queues = min_t(int, adapter->num_rx_queues, - ixgbe_rss_indir_tbl_max(adapter)); - - /*Allow at least 2 queues w/ SR-IOV.*/ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && - (max_queues < 2)) - max_queues = 2; - - /* Verify user input. */ - for (i = 0; i < reta_entries; i++) - if (indir[i] >= max_queues) - return -EINVAL; - - for (i = 0; i < reta_entries; i++) - adapter->rss_indir_tbl[i] = indir[i]; - } - - /* Fill out the rss hash key */ - if (key) { - memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); - ixgbe_store_key(adapter); - } - - ixgbe_store_reta(adapter); - - return 0; -} -#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ - -#ifdef HAVE_ETHTOOL_GET_TS_INFO -static int ixgbe_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - /* we always support timestamping disabled */ - info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; - - switch (adapter->hw.mac.type) { -#ifdef HAVE_PTP_1588_CLOCK - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; - /* fallthrough */ - case ixgbe_mac_X540: - case ixgbe_mac_82599EB: - info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - - if (adapter->ptp_clock) - info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; - - info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); - - info->rx_filters |= - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); - break; -#endif /* HAVE_PTP_1588_CLOCK */ - default: - return ethtool_op_get_ts_info(dev, info); - break; - } - return 0; -} -#endif /* HAVE_ETHTOOL_GET_TS_INFO */ - -#endif /* ETHTOOL_GRXRINGS */ -#ifdef ETHTOOL_SCHANNELS -static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) -{ - unsigned int max_combined; - u8 tcs = netdev_get_num_tc(adapter->netdev); - - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { - /* We only support one q_vector without MSI-X */ - max_combined = 1; - } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - /* Limit value based on the queue mask */ - max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; - } else if (tcs > 1) { - /* For DCB report channels per traffic class */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - /* 8 TC w/ 4 queues per TC */ - max_combined = 4; - } else if (tcs > 4) { - /* 8 TC w/ 8 queues per TC */ - max_combined = 8; - } else { - /* 4 TC w/ 16 queues per TC */ - max_combined = 16; - } - } else if (adapter->atr_sample_rate) { - /* support up to 64 queues with ATR */ - max_combined = IXGBE_MAX_FDIR_INDICES; - } else { - /* support up to max allowed queues with RSS */ - max_combined = ixgbe_max_rss_indices(adapter); - } - - return max_combined; -} - -static void ixgbe_get_channels(struct net_device *dev, - struct ethtool_channels *ch) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - /* report maximum channels */ - ch->max_combined = ixgbe_max_channels(adapter); - - /* report info for other vector */ - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - ch->max_other = NON_Q_VECTORS; - ch->other_count = NON_Q_VECTORS; - } - - /* record RSS queues */ - ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; - - /* nothing else to report if RSS is disabled */ - if (ch->combined_count == 1) - return; - - /* we do not support ATR queueing if SR-IOV is enabled */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - return; - - /* same thing goes for being DCB enabled */ - if (netdev_get_num_tc(dev) > 1) - return; - - /* if ATR is disabled we can exit */ - if (!adapter->atr_sample_rate) - return; - - /* report flow director queues as maximum channels */ - ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; -} - -static int ixgbe_set_channels(struct net_device *dev, - struct ethtool_channels *ch) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - unsigned int count = ch->combined_count; - u8 max_rss_indices = ixgbe_max_rss_indices(adapter); - - /* verify they are not requesting separate vectors */ - if (!count || ch->rx_count || ch->tx_count) - return -EINVAL; - - /* verify other_count has not changed */ - if (ch->other_count != NON_Q_VECTORS) - return -EINVAL; - - /* verify the number of channels does not exceed hardware limits */ - if (count > ixgbe_max_channels(adapter)) - return -EINVAL; - - /* update feature limits from largest to smallest supported values */ - adapter->ring_feature[RING_F_FDIR].limit = count; - - /* cap RSS limit */ - if (count > max_rss_indices) - count = max_rss_indices; - adapter->ring_feature[RING_F_RSS].limit = count; - -#if IS_ENABLED(CONFIG_FCOE) - /* cap FCoE limit at 8 */ - if (count > IXGBE_FCRETA_SIZE) - count = IXGBE_FCRETA_SIZE; - adapter->ring_feature[RING_F_FCOE].limit = count; -#endif /* CONFIG_FCOE */ - - /* use setup TC to update any traffic class queue mapping */ - return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); -} -#endif /* ETHTOOL_SCHANNELS */ - -#ifdef ETHTOOL_GMODULEINFO -static int ixgbe_get_module_info(struct net_device *dev, - struct ethtool_modinfo *modinfo) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - u32 status; - u8 sff8472_rev, addr_mode; - bool page_swap = false; - - /* Check whether we support SFF-8472 or not */ - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_SFF_8472_COMP, - &sff8472_rev); - if (status != 0) - return -EIO; - - /* addressing mode is not supported */ - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_SFF_8472_SWAP, - &addr_mode); - if (status != 0) - return -EIO; - - if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { - e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); - page_swap = true; - } - - if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { - /* We have a SFP, but it does not support SFF-8472 */ - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - } else { - /* We have a SFP which supports a revision of SFF-8472. */ - modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; - } - - return 0; -} - -static int ixgbe_get_module_eeprom(struct net_device *dev, - struct ethtool_eeprom *ee, - u8 *data) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - u32 status = IXGBE_ERR_PHY_ADDR_INVALID; - u8 databyte = 0xFF; - int i = 0; - - if (ee->len == 0) - return -EINVAL; - - for (i = ee->offset; i < ee->offset + ee->len; i++) { - /* I2C reads can take long time */ - if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - return -EBUSY; - - if (i < ETH_MODULE_SFF_8079_LEN) - status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); - else - status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); - - if (status != 0) - return -EIO; - - data[i - ee->offset] = databyte; - } - - return 0; -} -#endif /* ETHTOOL_GMODULEINFO */ - -#ifdef ETHTOOL_GEEE - -static const struct { - ixgbe_link_speed mac_speed; - u32 supported; -} ixgbe_ls_map[] = { - { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, - { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, - { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, - { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, - { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, -}; - -static const struct { - u32 lp_advertised; - u32 mac_speed; -} ixgbe_lp_map[] = { - { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, - { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, - { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, - { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, - { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, - { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, -}; - -static int -ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) -{ - u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; - struct ixgbe_hw *hw = &adapter->hw; - s32 rc; - u16 i; - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); - if (rc) - return rc; - - edata->lp_advertised = 0; - for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { - if (info[0] & ixgbe_lp_map[i].lp_advertised) - edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; - } - - edata->supported = 0; - for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { - if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) - edata->supported |= ixgbe_ls_map[i].supported; - } - - edata->advertised = 0; - for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { - if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) - edata->advertised |= ixgbe_ls_map[i].supported; - } - - edata->eee_enabled = !!edata->advertised; - edata->tx_lpi_enabled = edata->eee_enabled; - if (edata->advertised & edata->lp_advertised) - edata->eee_active = true; - - return 0; -} - -static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (!hw->mac.ops.setup_eee) - return -EOPNOTSUPP; - - if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) - return -EOPNOTSUPP; - - if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw) - return ixgbe_get_eee_fw(adapter, edata); - - return -EOPNOTSUPP; -} -#endif /* ETHTOOL_GEEE */ - -#ifdef ETHTOOL_SEEE -static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - struct ethtool_eee eee_data; - s32 ret_val; - - if (!(hw->mac.ops.setup_eee && - (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))) - return -EOPNOTSUPP; - - memset(&eee_data, 0, sizeof(struct ethtool_eee)); - - ret_val = ixgbe_get_eee(netdev, &eee_data); - if (ret_val) - return ret_val; - - if (eee_data.eee_enabled && !edata->eee_enabled) { - if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { - e_dev_err("Setting EEE tx-lpi is not supported\n"); - return -EINVAL; - } - - if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { - e_dev_err("Setting EEE Tx LPI timer is not supported\n"); - return -EINVAL; - } - - if (eee_data.advertised != edata->advertised) { - e_dev_err("Setting EEE advertised speeds is not supported\n"); - return -EINVAL; - } - - } - - if (eee_data.eee_enabled != edata->eee_enabled) { - - if (edata->eee_enabled) { - adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; - hw->phy.eee_speeds_advertised = - hw->phy.eee_speeds_supported; - } else { - adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; - hw->phy.eee_speeds_advertised = 0; - } - - /* reset link */ - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); - } - - return 0; -} -#endif /* ETHTOOL_SEEE */ - -#ifdef HAVE_ETHTOOL_GET_SSET_COUNT -/** - * ixgbe_get_priv_flags - report device private flags - * @netdev: network interface device structure - * - * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the ixgbe_priv_flags_strings - * array. - * - * Returns a u32 bitmap of flags. - **/ -static u32 ixgbe_get_priv_flags(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 priv_flags = 0; - - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - priv_flags |= IXGBE_PRIV_FLAGS_FD_ATR; -#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC - - if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) - priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; -#endif - - return priv_flags; -} - -/** - * ixgbe_set_priv_flags - set private flags - * @netdev: network interface device structure - * @flags: bit flags to be set - **/ -static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); -#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC - unsigned int flags2 = adapter->flags2; -#endif - unsigned int flags = adapter->flags; - - /* allow the user to control the state of the Flow - * Director ATR (Application Targeted Routing) feature - * of the driver - */ - flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - if (priv_flags & IXGBE_PRIV_FLAGS_FD_ATR) { - /* We cannot enable ATR if VMDq is enabled */ - if (flags & IXGBE_FLAG_VMDQ_ENABLED) - return -EINVAL; - /* We cannot enable ATR if we have 2 or more traffic classes */ - if (netdev_get_num_tc(netdev) > 1) - return -EINVAL; - /* We cannot enable ATR if RSS is disabled */ - if (adapter->ring_feature[RING_F_RSS].limit <= 1) - return -EINVAL; - /* A sample rate of 0 indicates ATR disabled */ - if (!adapter->atr_sample_rate) - return -EINVAL; - flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - } -#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC - - flags2 &= ~IXGBE_FLAG2_RX_LEGACY; - if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) - flags2 |= IXGBE_FLAG2_RX_LEGACY; -#endif - - if (flags != adapter->flags) { - adapter->flags = flags; - - /* ATR state change requires a reset */ - ixgbe_do_reset(netdev); -#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC - } -#else - } else if (flags2 != adapter->flags2) { - adapter->flags2 = flags2; - - /* reset interface to repopulate queues */ - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - } -#endif - - return 0; -} - -#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ -static struct ethtool_ops ixgbe_ethtool_ops = { -#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE - .get_link_ksettings = ixgbe_get_link_ksettings, - .set_link_ksettings = ixgbe_set_link_ksettings, -#else - .get_settings = ixgbe_get_settings, - .set_settings = ixgbe_set_settings, -#endif - .get_drvinfo = ixgbe_get_drvinfo, - .get_regs_len = ixgbe_get_regs_len, - .get_regs = ixgbe_get_regs, - .get_wol = ixgbe_get_wol, - .set_wol = ixgbe_set_wol, - .nway_reset = ixgbe_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = ixgbe_get_eeprom_len, - .get_eeprom = ixgbe_get_eeprom, - .set_eeprom = ixgbe_set_eeprom, - .get_ringparam = ixgbe_get_ringparam, - .set_ringparam = ixgbe_set_ringparam, - .get_pauseparam = ixgbe_get_pauseparam, - .set_pauseparam = ixgbe_set_pauseparam, - .get_msglevel = ixgbe_get_msglevel, - .set_msglevel = ixgbe_set_msglevel, -#ifndef HAVE_ETHTOOL_GET_SSET_COUNT - .self_test_count = ixgbe_diag_test_count, -#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ - .self_test = ixgbe_diag_test, - .get_strings = ixgbe_get_strings, -#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -#ifdef HAVE_ETHTOOL_SET_PHYS_ID - .set_phys_id = ixgbe_set_phys_id, -#else - .phys_id = ixgbe_phys_id, -#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ -#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ -#ifndef HAVE_ETHTOOL_GET_SSET_COUNT - .get_stats_count = ixgbe_get_stats_count, -#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ - .get_sset_count = ixgbe_get_sset_count, - .get_priv_flags = ixgbe_get_priv_flags, - .set_priv_flags = ixgbe_set_priv_flags, -#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ - .get_ethtool_stats = ixgbe_get_ethtool_stats, -#ifdef HAVE_ETHTOOL_GET_PERM_ADDR - .get_perm_addr = ethtool_op_get_perm_addr, -#endif - .get_coalesce = ixgbe_get_coalesce, - .set_coalesce = ixgbe_set_coalesce, -#ifndef HAVE_NDO_SET_FEATURES - .get_rx_csum = ixgbe_get_rx_csum, - .set_rx_csum = ixgbe_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ixgbe_set_tx_csum, - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, -#ifdef NETIF_F_TSO - .get_tso = ethtool_op_get_tso, - .set_tso = ixgbe_set_tso, -#endif -#ifdef ETHTOOL_GFLAGS - .get_flags = ethtool_op_get_flags, - .set_flags = ixgbe_set_flags, -#endif -#endif /* HAVE_NDO_SET_FEATURES */ -#ifdef ETHTOOL_GRXRINGS - .get_rxnfc = ixgbe_get_rxnfc, - .set_rxnfc = ixgbe_set_rxnfc, -#ifdef ETHTOOL_SRXNTUPLE - .set_rx_ntuple = ixgbe_set_rx_ntuple, -#endif -#endif /* ETHTOOL_GRXRINGS */ -#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -#ifdef ETHTOOL_GEEE - .get_eee = ixgbe_get_eee, -#endif /* ETHTOOL_GEEE */ -#ifdef ETHTOOL_SEEE - .set_eee = ixgbe_set_eee, -#endif /* ETHTOOL_SEEE */ -#ifdef ETHTOOL_SCHANNELS - .get_channels = ixgbe_get_channels, - .set_channels = ixgbe_set_channels, -#endif -#ifdef ETHTOOL_GMODULEINFO - .get_module_info = ixgbe_get_module_info, - .get_module_eeprom = ixgbe_get_module_eeprom, -#endif -#ifdef HAVE_ETHTOOL_GET_TS_INFO - .get_ts_info = ixgbe_get_ts_info, -#endif -#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) - .get_rxfh_indir_size = ixgbe_rss_indir_size, - .get_rxfh_key_size = ixgbe_get_rxfh_key_size, - .get_rxfh = ixgbe_get_rxfh, - .set_rxfh = ixgbe_set_rxfh, -#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ -#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ -}; - -#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -static const struct ethtool_ops_ext ixgbe_ethtool_ops_ext = { - .size = sizeof(struct ethtool_ops_ext), - .get_ts_info = ixgbe_get_ts_info, - .set_phys_id = ixgbe_set_phys_id, - .get_channels = ixgbe_get_channels, - .set_channels = ixgbe_set_channels, -#ifdef ETHTOOL_GMODULEINFO - .get_module_info = ixgbe_get_module_info, - .get_module_eeprom = ixgbe_get_module_eeprom, -#endif -#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) - .get_rxfh_indir_size = ixgbe_rss_indir_size, - .get_rxfh_key_size = ixgbe_get_rxfh_key_size, - .get_rxfh = ixgbe_get_rxfh, - .set_rxfh = ixgbe_set_rxfh, -#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ -#ifdef ETHTOOL_GEEE - .get_eee = ixgbe_get_eee, -#endif /* ETHTOOL_GEEE */ -#ifdef ETHTOOL_SEEE - .set_eee = ixgbe_set_eee, -#endif /* ETHTOOL_SEEE */ -}; - -#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ -void ixgbe_set_ethtool_ops(struct net_device *netdev) -{ -#ifndef ETHTOOL_OPS_COMPAT - netdev->ethtool_ops = &ixgbe_ethtool_ops; -#else - SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); -#endif - -#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT - set_ethtool_ops_ext(netdev, &ixgbe_ethtool_ops_ext); -#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ -} -#endif /* SIOCETHTOOL */ - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c deleted file mode 100644 index abd12a9a7f75..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.c +++ /dev/null @@ -1,1043 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" - -#if IS_ENABLED(CONFIG_FCOE) -#if IS_ENABLED(CONFIG_DCB) -#include "ixgbe_dcb_82599.h" -#endif /* CONFIG_DCB */ -#include -#include -#include -#include -#include -#include -#include - -/** - * ixgbe_fcoe_clear_ddp - clear the given ddp context - * @ddp - ptr to the ixgbe_fcoe_ddp - * - * Returns : none - * - */ -static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) -{ - ddp->len = 0; - ddp->err = 1; - ddp->udl = NULL; - ddp->udp = 0UL; - ddp->sgl = NULL; - ddp->sgc = 0; -} - -/** - * ixgbe_fcoe_ddp_put - free the ddp context for a given xid - * @netdev: the corresponding net_device - * @xid: the xid that corresponding ddp will be freed - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_done - * and it is expected to be called by ULD, i.e., FCP layer of libfc - * to release the corresponding ddp context when the I/O is done. - * - * Returns : data length already ddp-ed in bytes - */ -int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) -{ - int len = 0; - struct ixgbe_fcoe *fcoe; - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; - struct ixgbe_fcoe_ddp *ddp; - u32 fcbuff; - - if (!netdev) - goto out_ddp_put; - - if (xid > netdev->fcoe_ddp_xid) - goto out_ddp_put; - - adapter = netdev_priv(netdev); - hw = &adapter->hw; - fcoe = &adapter->fcoe; - ddp = &fcoe->ddp[xid]; - if (!ddp->udl) - goto out_ddp_put; - - len = ddp->len; - /* if there an error, force to invalidate ddp context */ - if (ddp->err) { - switch (hw->mac.type) { - case ixgbe_mac_X550: - /* X550 does not require DDP FCoE lock */ - - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), - (xid | IXGBE_FCFLTRW_WE)); - - /* program FCBUFF */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); - - /* program FCDMARW */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), - (xid | IXGBE_FCDMARW_WE)); - - /* read FCBUFF to check context invalidated */ - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), - (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(hw, - IXGBE_FCDDC(2, xid)); - break; - default: - /* other hardware requires DDP FCoE lock */ - spin_lock_bh(&fcoe->lock); - - IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, - (xid | IXGBE_FCFLTRW_WE)); - IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, - (xid | IXGBE_FCDMARW_WE)); - - /* read FCBUFF to check context invalidated */ - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, - (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); - - spin_unlock_bh(&fcoe->lock); - break; - } - - /* guaranteed to be invalidated after 100us */ - if (fcbuff & IXGBE_FCBUFF_VALID) - udelay(100); - } - if (ddp->sgl) - dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc, - DMA_FROM_DEVICE); - if (ddp->pool) { - dma_pool_free(ddp->pool, ddp->udl, ddp->udp); - ddp->pool = NULL; - } - - ixgbe_fcoe_clear_ddp(ddp); - -out_ddp_put: - return len; -} - -/** - * ixgbe_fcoe_ddp_setup - called to set up ddp context - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * - * Returns : 1 for success and 0 for no ddp - */ -static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc, - int target_mode) -{ - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; - struct ixgbe_fcoe *fcoe; - struct ixgbe_fcoe_ddp *ddp; - struct ixgbe_fcoe_ddp_pool *ddp_pool; - struct scatterlist *sg; - unsigned int i, j, dmacount; - unsigned int len; - static const unsigned int bufflen = IXGBE_FCBUFF_MIN; - unsigned int firstoff = 0; - unsigned int lastsize; - unsigned int thisoff = 0; - unsigned int thislen = 0; - u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; - dma_addr_t addr = 0; - - if (!netdev || !sgl || !sgc) - return 0; - - adapter = netdev_priv(netdev); - if (xid > netdev->fcoe_ddp_xid) { - e_warn(drv, "xid=0x%x out-of-range\n", xid); - return 0; - } - - /* no DDP if we are already down or resetting */ - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return 0; - - fcoe = &adapter->fcoe; - ddp = &fcoe->ddp[xid]; - if (ddp->sgl) { - e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", - xid, ddp->sgl, ddp->sgc); - return 0; - } - ixgbe_fcoe_clear_ddp(ddp); - - - if (!fcoe->ddp_pool) { - e_warn(drv, "No ddp_pool resources allocated\n"); - return 0; - } - - ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); - if (!ddp_pool->pool) { - e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); - goto out_noddp; - } - - /* setup dma from scsi command sgl */ - dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); - if (dmacount == 0) { - e_err(drv, "xid 0x%x DMA map error\n", xid); - goto out_noddp; - } - - /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); - if (!ddp->udl) { - e_err(drv, "failed allocated ddp context\n"); - goto out_noddp_unmap; - } - ddp->pool = ddp_pool->pool; - ddp->sgl = sgl; - ddp->sgc = sgc; - - j = 0; - for_each_sg(sgl, sg, dmacount, i) { - addr = sg_dma_address(sg); - len = sg_dma_len(sg); - while (len) { - /* max number of buffers allowed in one DDP context */ - if (j >= IXGBE_BUFFCNT_MAX) { - ddp_pool->noddp++; - goto out_noddp_free; - } - - /* get the offset of length of current buffer */ - thisoff = addr & ((dma_addr_t)bufflen - 1); - thislen = min((bufflen - thisoff), len); - /* - * all but the 1st buffer (j == 0) - * must be aligned on bufflen - */ - if ((j != 0) && (thisoff)) - goto out_noddp_free; - /* - * all but the last buffer - * ((i == (dmacount - 1)) && (thislen == len)) - * must end at bufflen - */ - if (((i != (dmacount - 1)) || (thislen != len)) - && ((thislen + thisoff) != bufflen)) - goto out_noddp_free; - - ddp->udl[j] = (u64)(addr - thisoff); - /* only the first buffer may have none-zero offset */ - if (j == 0) - firstoff = thisoff; - len -= thislen; - addr += thislen; - j++; - } - } - /* only the last buffer may have non-full bufflen */ - lastsize = thisoff + thislen; - - /* - * lastsize can not be bufflen. - * If it is then adding another buffer with lastsize = 1. - * Since lastsize is 1 there will be no HW access to this buffer. - */ - if (lastsize == bufflen) { - if (j >= IXGBE_BUFFCNT_MAX) { - ddp_pool->noddp_ext_buff++; - goto out_noddp_free; - } - - ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); - j++; - lastsize = 1; - } - put_cpu(); - - fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); - fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); - fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); - /* Set WRCONTX bit to allow DDP for target */ - if (target_mode) - fcbuff |= (IXGBE_FCBUFF_WRCONTX); - fcbuff |= (IXGBE_FCBUFF_VALID); - - fcdmarw = xid; - fcdmarw |= IXGBE_FCDMARW_WE; - fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); - - fcfltrw = xid; - fcfltrw |= IXGBE_FCFLTRW_WE; - - /* program DMA context */ - hw = &adapter->hw; - - /* turn on last frame indication for target mode as FCP_RSPtarget is - * supposed to send FCP_RSP when it is done. */ - if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { - set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); - fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); - fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; - IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); - } - - switch (hw->mac.type) { - case ixgbe_mac_X550: - /* X550 does not require DDP lock */ - - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), - ddp->udp & DMA_BIT_MASK(32)); - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); - IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); - /* program filter context */ - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); - IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); - /* - * TBD: SMAC and FCID info not available with current - * netdev APIs, add code to pull that from skb later - * and then program that here before enabling DDP context. - */ - break; - default: - /* other devices require DDP lock with direct DDP context access */ - spin_lock_bh(&fcoe->lock); - - IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); - IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); - IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); - /* program filter context */ - IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); - IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); - - spin_unlock_bh(&fcoe->lock); - break; - } - - return 1; - -out_noddp_free: - dma_pool_free(ddp->pool, ddp->udl, ddp->udp); - ixgbe_fcoe_clear_ddp(ddp); - -out_noddp_unmap: - dma_unmap_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); -out_noddp: - put_cpu(); - return 0; -} - -/** - * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup - * and is expected to be called from ULD, e.g., FCP layer of libfc - * to set up ddp for the corresponding xid of the given sglist for - * the corresponding I/O. - * - * Returns : 1 for success and 0 for no ddp - */ -int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) -{ - return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); -} - -#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET -/** - * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_target - * and is expected to be called from ULD, e.g., FCP layer of libfc - * to set up ddp for the corresponding xid of the given sglist for - * the corresponding I/O. The DDP in target mode is a write I/O request - * from the initiator. - * - * Returns : 1 for success and 0 for no ddp - */ -int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) -{ - return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); -} - -#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ -/** - * ixgbe_fcoe_ddp - check ddp status and mark it done - * @adapter: ixgbe adapter - * @rx_desc: advanced rx descriptor - * @skb: the skb holding the received data - * - * This checks ddp status. - * - * Returns : < 0 indicates an error or not a FCoE ddp, 0 indicates - * not passing the skb to ULD, > 0 indicates is the length of data - * being ddped. - */ -int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct ixgbe_fcoe_ddp *ddp; - struct fc_frame_header *fh; - int rc = -EINVAL, ddp_max; - __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); - __le32 ddp_err; - u32 fctl; - u16 xid; - - if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) - skb->ip_summed = CHECKSUM_NONE; - else - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* verify header contains at least the FCOE header */ - BUG_ON(skb_headlen(skb) < FCOE_HEADER_LEN); - - fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr)); - - if (skb->protocol == htons(ETH_P_8021Q)) - fh = (struct fc_frame_header *)((char *)fh + VLAN_HLEN); - - fctl = ntoh24(fh->fh_f_ctl); - if (fctl & FC_FC_EX_CTX) - xid = ntohs(fh->fh_ox_id); - else - xid = ntohs(fh->fh_rx_id); - - ddp_max = IXGBE_FCOE_DDP_MAX; - /* X550 has different DDP Max limit */ - if (adapter->hw.mac.type == ixgbe_mac_X550) - ddp_max = IXGBE_FCOE_DDP_MAX_X550; - - if (xid >= ddp_max) - goto ddp_out; - - ddp = &fcoe->ddp[xid]; - if (!ddp->udl) - goto ddp_out; - - ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | - IXGBE_RXDADV_ERR_FCERR); - if (ddp_err) - goto ddp_out; - - switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { - /* return 0 to bypass going to ULD for DDPed data */ - case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): - /* update length of DDPed data */ - ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); - rc = 0; - break; - /* unmap the sg list when FCPRSP is received */ - case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): - dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, - ddp->sgc, DMA_FROM_DEVICE); - ddp->err = ddp_err; - ddp->sgl = NULL; - ddp->sgc = 0; - /* fall through */ - /* if DDP length is present pass it through to ULD */ - case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): - /* update length of DDPed data */ - ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); - if (ddp->len) - rc = ddp->len; - break; - /* no match will return as an error */ - case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): - default: - break; - } - - /* In target mode, check the last data frame of the sequence. - * For DDP in target mode, data is already DDPed but the header - * indication of the last data frame ould allow is to tell if we - * got all the data and the ULP can send FCP_RSP back, as this is - * not a full fcoe frame, we fill the trailer here so it won't be - * dropped by the ULP stack. - */ - if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && - (fctl & FC_FC_END_SEQ)) { - struct fcoe_crc_eof *crc; - skb_linearize(skb); - crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); - crc->fcoe_eof = FC_EOF_T; - } -ddp_out: - return rc; -} - -/** - * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) - * @tx_ring: tx desc ring - * @first: first tx_buffer structure containing skb, tx_flags, and protocol - * @hdr_len: hdr_len to be returned - * - * This sets up large send offload for FCoE - * - * Returns : 0 indicates success, < 0 for error - */ -int ixgbe_fso(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - u8 *hdr_len) -{ - struct sk_buff *skb = first->skb; - struct fc_frame_header *fh; - u32 vlan_macip_lens; - u32 fcoe_sof_eof = 0; - u32 mss_l4len_idx; - u8 sof, eof; - u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; - -#ifdef NETIF_F_FSO - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { - dev_err(tx_ring->dev, "Wrong gso type %d:expecting " - "SKB_GSO_FCOE\n", skb_shinfo(skb)->gso_type); - return -EINVAL; - } - -#endif - /* resets the header to point fcoe/fc */ - skb_set_network_header(skb, skb->mac_len); - skb_set_transport_header(skb, skb->mac_len + - sizeof(struct fcoe_hdr)); - - /* sets up SOF and ORIS */ - sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; - switch (sof) { - case FC_SOF_I2: - fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; - break; - case FC_SOF_I3: - fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | - IXGBE_ADVTXD_FCOEF_ORIS; - break; - case FC_SOF_N2: - break; - case FC_SOF_N3: - fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; - break; - default: - dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); - return -EINVAL; - } - - /* the first byte of the last dword is EOF */ - skb_copy_bits(skb, skb->len - 4, &eof, 1); - /* sets up EOF and ORIE */ - switch (eof) { - case FC_EOF_N: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; - break; - case FC_EOF_T: - /* lso needs ORIE */ - if (skb_is_gso(skb)) - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | - IXGBE_ADVTXD_FCOEF_ORIE; - else - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; - break; - case FC_EOF_NI: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; - break; - case FC_EOF_A: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; - break; - default: - dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); - return -EINVAL; - } - - /* sets up PARINC indicating data offset */ - fh = (struct fc_frame_header *)skb_transport_header(skb); - if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; - - /* include trailer in headlen as it is replicated per frame */ - *hdr_len = sizeof(struct fcoe_crc_eof); - - /* hdr_len includes fc_hdr if FCoE LSO is enabled */ - if (skb_is_gso(skb)) { - *hdr_len += skb_transport_offset(skb) + - sizeof(struct fc_frame_header); - /* update gso_segs and bytecount */ - first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, - skb_shinfo(skb)->gso_size); - first->bytecount += (first->gso_segs - 1) * *hdr_len; - first->tx_flags |= IXGBE_TX_FLAGS_TSO; - /* Hardware expects L4T to be RSV for FCoE TSO */ - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; - } - - /* set flag indicating FCOE to ixgbe_tx_map call */ - first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; - - /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ - mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_transport_offset(skb) + - sizeof(struct fc_frame_header); - vlan_macip_lens |= (skb_transport_offset(skb) - 4) - << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - - /* write context desc */ - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, - type_tucmd, mss_l4len_idx); - - return 0; -} - -static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) -{ - struct ixgbe_fcoe_ddp_pool *ddp_pool; - - ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); - dma_pool_destroy(ddp_pool->pool); - ddp_pool->pool = NULL; -} - -static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, - struct device *dev, - unsigned int cpu) -{ - struct ixgbe_fcoe_ddp_pool *ddp_pool; - struct dma_pool *pool; - char pool_name[32]; - - snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); - - pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, - IXGBE_FCPTR_ALIGN, PAGE_SIZE); - if (!pool) - return -ENOMEM; - - ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); - ddp_pool->pool = pool; - ddp_pool->noddp = 0; - ddp_pool->noddp_ext_buff = 0; - - return 0; -} - -/** - * ixgbe_configure_fcoe - configures registers for fcoe at start - * @adapter: ptr to ixgbe adapter - * - * This sets up FCoE related registers - * - * Returns : none - */ -void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; - struct ixgbe_hw *hw = &adapter->hw; - int i, fcoe_i; - u32 fcoe_q, fcoe_q_h = 0; - u32 etqf; - int fcreta_size; - - /* Minimal funcionality for FCoE requires at least CRC offloads */ - if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) - return; - - /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ - etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - etqf |= IXGBE_ETQF_POOL_ENABLE; - etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; - } - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); - - /* leave remaining registers unconfigued if FCoE is disabled */ - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return; - - /* Use one or more Rx queues for FCoE by redirection table */ - fcreta_size = IXGBE_FCRETA_SIZE; - if (adapter->hw.mac.type == ixgbe_mac_X550) - fcreta_size = IXGBE_FCRETA_SIZE_X550; - - for (i = 0; i < fcreta_size; i++) { - if (adapter->hw.mac.type == ixgbe_mac_X550) { - int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % - fcoe->indices); - fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; - fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & - IXGBE_FCRETA_ENTRY_HIGH_MASK; - } - fcoe_i = fcoe->offset + (i % fcoe->indices); - fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; - fcoe_q |= fcoe_q_h; - IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); - } - IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); - - /* Enable L2 EtherType filter for FIP */ - etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - etqf |= IXGBE_ETQF_POOL_ENABLE; - etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; - } - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); - - /* Send FIP frames to the first FCoE queue */ - fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), - IXGBE_ETQS_QUEUE_EN | - (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); - - /* Configure FCoE Rx control */ - IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, - IXGBE_FCRXCTRL_FCCRCBO | - (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); -} - -/** - * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources - * @adapter : ixgbe adapter - * - * Cleans up outstanding ddp context resources - * - * Returns : none - */ -void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) -{ - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - int cpu, i, ddp_max; - - /* do nothing if no DDP pools were allocated */ - if (!fcoe->ddp_pool) - return; - - ddp_max = IXGBE_FCOE_DDP_MAX; - /* X550 has different DDP Max limit */ - if (adapter->hw.mac.type == ixgbe_mac_X550) - ddp_max = IXGBE_FCOE_DDP_MAX_X550; - - for (i = 0; i < ddp_max; i++) - ixgbe_fcoe_ddp_put(adapter->netdev, i); - - for_each_possible_cpu(cpu) - ixgbe_fcoe_dma_pool_free(fcoe, cpu); - - dma_unmap_single(pci_dev_to_dev(adapter->pdev), - fcoe->extra_ddp_buffer_dma, - IXGBE_FCBUFF_MIN, - DMA_FROM_DEVICE); - kfree(fcoe->extra_ddp_buffer); - - fcoe->extra_ddp_buffer = NULL; - fcoe->extra_ddp_buffer_dma = 0; -} - -/** - * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources - * @adapter: ixgbe adapter - * - * Sets up ddp context resouces - * - * Returns : 0 indicates success or -EINVAL on failure - */ -int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) -{ - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct device *dev = pci_dev_to_dev(adapter->pdev); - void *buffer; - dma_addr_t dma; - unsigned int cpu; - - /* do nothing if no DDP pools were allocated */ - if (!fcoe->ddp_pool) - return 0; - - /* Extra buffer to be shared by all DDPs for HW work around */ - buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); - if (!buffer) { - e_err(drv, "failed to allocate extra DDP buffer\n"); - return -ENOMEM; - } - - dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, dma)) { - e_err(drv, "failed to map extra DDP buffer\n"); - kfree(buffer); - return -ENOMEM; - } - - fcoe->extra_ddp_buffer = buffer; - fcoe->extra_ddp_buffer_dma = dma; - - /* allocate pci pool for each cpu */ - for_each_possible_cpu(cpu) { - int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); - if (!err) - continue; - - e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); - ixgbe_free_fcoe_ddp_resources(adapter); - return -ENOMEM; - } - - return 0; -} - -#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE -int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) -#else -static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) -#endif -{ - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) - return -EINVAL; - - fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); - - if (!fcoe->ddp_pool) { - e_err(drv, "failed to allocate percpu DDP resources\n"); - return -ENOMEM; - } - - adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; - - /* X550 has different DDP Max limit */ - if (adapter->hw.mac.type == ixgbe_mac_X550) - adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; - - return 0; -} - -#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE -void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) -#else -static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) -#endif -{ - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - - adapter->netdev->fcoe_ddp_xid = 0; - - if (!fcoe->ddp_pool) - return; - - free_percpu(fcoe->ddp_pool); - fcoe->ddp_pool = NULL; -} - -#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE -/** - * ixgbe_fcoe_enable - turn on FCoE offload feature - * @netdev: the corresponding netdev - * - * Turns on FCoE offload feature in 82599. - * - * Returns : 0 indicates success or -EINVAL on failure - */ -int ixgbe_fcoe_enable(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - - atomic_inc(&fcoe->refcnt); - - if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) - return -EINVAL; - - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - return -EINVAL; - - e_info(drv, "Enabling FCoE offload features.\n"); - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); - - if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - - /* Allocate per CPU memory to track DDP pools */ - ixgbe_fcoe_ddp_enable(adapter); - - /* enable FCoE and notify stack */ - adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - netdev->features |= NETIF_F_FCOE_MTU; - netdev_features_change(netdev); - - /* release existing queues and reallocate them */ - ixgbe_clear_interrupt_scheme(adapter); - ixgbe_init_interrupt_scheme(adapter); - - if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); - - return 0; -} - -/** - * ixgbe_fcoe_disable - turn off FCoE offload feature - * @netdev: the corresponding netdev - * - * Turns off FCoE offload feature in 82599. - * - * Returns : 0 indicates success or -EINVAL on failure - */ -int ixgbe_fcoe_disable(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) - return -EINVAL; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return -EINVAL; - - e_info(drv, "Disabling FCoE offload features.\n"); - if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - - /* Free per CPU memory to track DDP pools */ - ixgbe_fcoe_ddp_disable(adapter); - - /* disable FCoE and notify stack */ - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - netdev->features &= ~NETIF_F_FCOE_MTU; - - netdev_features_change(netdev); - - /* release existing queues and reallocate them */ - ixgbe_clear_interrupt_scheme(adapter); - ixgbe_init_interrupt_scheme(adapter); - - if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); - - return 0; -} -#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ - -#if IS_ENABLED(CONFIG_DCB) -#ifdef HAVE_DCBNL_OPS_GETAPP -/** - * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE - * @netdev: the corresponding net_device - * - * Finds out the corresponding user priority bitmap from the current - * traffic class that FCoE belongs to. Returns 0 as the invalid user - * priority bitmap to indicate an error. - * - * Returns : 802.1p user priority bitmap for FCoE - */ -u8 ixgbe_fcoe_getapp(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - return 1 << adapter->fcoe.up; -} -#endif /* HAVE_DCBNL_OPS_GETAPP */ -#endif /* CONFIG_DCB */ -#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN -/** - * ixgbe_fcoe_get_wwn - get world wide name for the node or the port - * @netdev : ixgbe adapter - * @wwn : the world wide name - * @type: the type of world wide name - * - * Returns the node or port world wide name if both the prefix and the san - * mac address are valid, then the wwn is formed based on the NAA-2 for - * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). - * - * Returns : 0 on success - */ -int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) -{ - int rc = -EINVAL; - u16 prefix = 0xffff; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_mac_info *mac = &adapter->hw.mac; - - switch (type) { - case NETDEV_FCOE_WWNN: - prefix = mac->wwnn_prefix; - break; - case NETDEV_FCOE_WWPN: - prefix = mac->wwpn_prefix; - break; - default: - break; - } - - if ((prefix != 0xffff) && - is_valid_ether_addr(mac->san_addr)) { - *wwn = ((u64) prefix << 48) | - ((u64) mac->san_addr[0] << 40) | - ((u64) mac->san_addr[1] << 32) | - ((u64) mac->san_addr[2] << 24) | - ((u64) mac->san_addr[3] << 16) | - ((u64) mac->san_addr[4] << 8) | - ((u64) mac->san_addr[5]); - rc = 0; - } - return rc; -} - -#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */ -/** - * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to - * @adapter - pointer to the device adapter structure - * - * Return : TC that FCoE is mapped to - */ -u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) -{ - return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); -} -#endif /* CONFIG_FCOE */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h deleted file mode 100644 index 08de8d3dfc95..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_fcoe.h +++ /dev/null @@ -1,93 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_FCOE_H_ -#define _IXGBE_FCOE_H_ - -#if IS_ENABLED(CONFIG_FCOE) - -#include -#include - -/* shift bits within STAT fo FCSTAT */ -#define IXGBE_RXDADV_FCSTAT_SHIFT 4 - -/* ddp user buffer */ -#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ -#define IXGBE_FCPTR_ALIGN 16 -#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) -#define IXGBE_FCBUFF_4KB 0x0 -#define IXGBE_FCBUFF_8KB 0x1 -#define IXGBE_FCBUFF_16KB 0x2 -#define IXGBE_FCBUFF_64KB 0x3 -#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ -#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ -#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ -#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ - -/* Default user priority to use for FCoE */ -#define IXGBE_FCOE_DEFUP 3 - -/* fcerr */ -#define IXGBE_FCERR_BADCRC 0x00100000 -#define IXGBE_FCERR_EOFSOF 0x00200000 -#define IXGBE_FCERR_NOFIRST 0x00300000 -#define IXGBE_FCERR_OOOSEQ 0x00400000 -#define IXGBE_FCERR_NODMA 0x00500000 -#define IXGBE_FCERR_PKTLOST 0x00600000 - -/* FCoE DDP for target mode */ -#define __IXGBE_FCOE_TARGET 1 - -struct ixgbe_fcoe_ddp { - int len; - u32 err; - unsigned int sgc; - struct scatterlist *sgl; - dma_addr_t udp; - u64 *udl; - struct dma_pool *pool; -}; - -/* per cpu variables */ -struct ixgbe_fcoe_ddp_pool { - struct dma_pool *pool; - u64 noddp; - u64 noddp_ext_buff; -}; - -struct ixgbe_fcoe { - struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; - atomic_t refcnt; - spinlock_t lock; - struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550]; - void *extra_ddp_buffer; - dma_addr_t extra_ddp_buffer_dma; - unsigned long mode; - u8 up; - u8 up_set; -}; -#endif /* CONFIG_FCOE */ - -#endif /* _IXGBE_FCOE_H */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c deleted file mode 100644 index 278646355210..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.c +++ /dev/null @@ -1,210 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_hv_vf.h" - -/** - * Hyper-V variant - just a stub. - */ -s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr next, - bool clear) -{ - UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear); - - return IXGBE_ERR_FEATURE_NOT_SUPPORTED; -} - -/** - * Hyper-V variant - just a stub. - */ -s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) -{ - UNREFERENCED_2PARAMETER(hw, xcast_mode); - - return IXGBE_ERR_FEATURE_NOT_SUPPORTED; -} - -/** - * Hyper-V variant - just a stub. - */ -s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool vlvf_bypass) -{ - UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass); - - return IXGBE_ERR_FEATURE_NOT_SUPPORTED; -} - -s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) -{ - UNREFERENCED_3PARAMETER(hw, index, addr); - - return IXGBE_ERR_FEATURE_NOT_SUPPORTED; -} - -/** - * Hyper-V variant; there is no mailbox communication. - */ -s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, - bool autoneg_wait_to_complete) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - struct ixgbe_mac_info *mac = &hw->mac; - u32 links_reg; - UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); - - /* If we were hit with a reset drop the link */ - if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) - mac->get_link_status = true; - - if (!mac->get_link_status) - goto out; - - /* if link status is down no point in checking to see if pf is up */ - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - - /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs - * before the link status is correct - */ - if (mac->type == ixgbe_mac_82599_vf) { - int i; - - for (i = 0; i < 5; i++) { - udelay(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - } - } - - switch (links_reg & IXGBE_LINKS_SPEED_82599) { - case IXGBE_LINKS_SPEED_10G_82599: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - if (hw->mac.type >= ixgbe_mac_X550) { - if (links_reg & IXGBE_LINKS_SPEED_NON_STD) - *speed = IXGBE_LINK_SPEED_2_5GB_FULL; - } - break; - case IXGBE_LINKS_SPEED_1G_82599: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case IXGBE_LINKS_SPEED_100_82599: - *speed = IXGBE_LINK_SPEED_100_FULL; - if (hw->mac.type == ixgbe_mac_X550) { - if (links_reg & IXGBE_LINKS_SPEED_NON_STD) - *speed = IXGBE_LINK_SPEED_5GB_FULL; - } - break; - case IXGBE_LINKS_SPEED_10_X550EM_A: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - /* Reserved for pre-x550 devices */ - if (hw->mac.type >= ixgbe_mac_X550) - *speed = IXGBE_LINK_SPEED_10_FULL; - break; - default: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - } - - /* if we passed all the tests above then the link is up and we no - * longer need to check for link - */ - mac->get_link_status = false; - -out: - *link_up = !mac->get_link_status; - return IXGBE_SUCCESS; -} - -/** - * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length - * @hw: pointer to the HW structure - * @max_size: value to assign to max frame size - * Hyper-V variant. - **/ -s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) -{ - u32 reg; - - /* If we are on Hyper-V, we implement this functionality - * differently. - */ - reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); - /* CRC == 4 */ - reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); - IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version - * @hw: pointer to the HW structure - * @api: integer containing requested API version - * Hyper-V version - only ixgbe_mbox_api_10 supported. - **/ -int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) -{ - UNREFERENCED_1PARAMETER(hw); - - /* Hyper-V only supports api version ixgbe_mbox_api_10 */ - if (api != ixgbe_mbox_api_10) - return IXGBE_ERR_INVALID_ARGUMENT; - - return IXGBE_SUCCESS; -} - -/** - * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf - * @hw: pointer to hardware structure - * - * This will assign function pointers, adapter-specific functions can - * override the assignment of generic function pointers by assigning - * their own adapter-specific function pointers. - * Does not touch the hardware. - **/ -s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw) -{ - /* Set defaults for VF then override applicable Hyper-V - * specific functions - */ - ixgbe_init_ops_vf(hw); - - hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf; - hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf; - hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf; - hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf; - hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf; - hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode; - hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf; - hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf; - hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf; - - return IXGBE_SUCCESS; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h deleted file mode 100644 index 387ed1f36601..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_hv_vf.h +++ /dev/null @@ -1,51 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_HV_VF_H_ -#define _IXGBE_HV_VF_H_ - -/* On Hyper-V, to reset, we need to read from this offset - * from the PCI config space. This is the mechanism used on - * Hyper-V to support PF/VF communication. - */ -#define IXGBE_HV_RESET_OFFSET 0x201 - -#include "ixgbe_vf.h" - -s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool autoneg_wait_to_complete); -s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); -s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr, - bool clear); -s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode); -s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool vlvf_bypass); -s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size); -int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api); - -extern s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw); -extern s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, - u32 vmdq, u32 enable_addr); -#endif /* _IXGBE_HV_VF_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c deleted file mode 100644 index 2a0d91dd454c..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_lib.c +++ /dev/null @@ -1,1311 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include "ixgbe_sriov.h" - -#ifdef HAVE_TX_MQ -/** - * ixgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for VMDq to the assigned rings. It - * will also try to cache the proper offsets if RSS/FCoE are enabled along - * with VMDq. - * - **/ -static bool ixgbe_cache_ring_dcb_vmdq(struct ixgbe_adapter *adapter) -{ -#if IS_ENABLED(CONFIG_FCOE) - struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; -#endif /* CONFIG_FCOE */ - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; - int i; - u16 reg_idx; - u8 tcs = netdev_get_num_tc(adapter->netdev); - - /* verify we have DCB enabled before proceeding */ - if (tcs <= 1) - return false; - - /* verify we have VMDq enabled before proceeding */ - if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) - return false; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - /* - * The bits on the 82598 are reversed compared to the other - * adapters. The DCB bits are the higher order bits and the - * lower bits belong to the VMDq pool. In order to sort - * this out we have to swap the bits to get the correct layout - */ - for (i = 0; i < adapter->num_rx_queues; i++) { - reg_idx = ((i >> 3) | (i << 3)) & 0x3F; - adapter->rx_ring[i]->reg_idx = reg_idx; - } - for (i = 0; i < adapter->num_tx_queues; i++) { - reg_idx = ((i >> 4) | (i << 2)) & 0x1F; - adapter->tx_ring[i]->reg_idx = reg_idx; - } - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* start at VMDq register offset for SR-IOV enabled setups */ - reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { - /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= tcs) - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); - adapter->rx_ring[i]->reg_idx = reg_idx; - } - - reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { - /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= tcs) - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); - adapter->tx_ring[i]->reg_idx = reg_idx; - } - - break; - default: - break; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* nothing to do if FCoE is disabled */ - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return true; - - /* The work is already done if the FCoE ring is shared */ - if (fcoe->offset < tcs) - return true; - - /* The FCoE rings exist separately, we need to move their reg_idx */ - if (fcoe->indices) { - u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); - u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); - - reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; - for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; - adapter->rx_ring[i]->reg_idx = reg_idx; - reg_idx++; - } - - reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; - for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; - adapter->tx_ring[i]->reg_idx = reg_idx; - reg_idx++; - } - } -#endif /* CONFIG_FCOE */ - - return true; -} - -/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ -static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, - unsigned int *tx, unsigned int *rx) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *dev = adapter->netdev; - u8 num_tcs = netdev_get_num_tc(dev); - - *tx = 0; - *rx = 0; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - /* TxQs/TC: 4 RxQs/TC: 8 */ - *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ - *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: -case ixgbe_mac_X550EM_a: - if (num_tcs > 4) { - /* - * TCs : TC0/1 TC2/3 TC4-7 - * TxQs/TC: 32 16 8 - * RxQs/TC: 16 16 16 - */ - *rx = tc << 4; - if (tc < 3) - *tx = tc << 5; /* 0, 32, 64 */ - else if (tc < 5) - *tx = (tc + 2) << 4; /* 80, 96 */ - else - *tx = (tc + 8) << 3; /* 104, 112, 120 */ - } else { - /* - * TCs : TC0 TC1 TC2/3 - * TxQs/TC: 64 32 16 - * RxQs/TC: 32 32 32 - */ - *rx = tc << 5; - if (tc < 2) - *tx = tc << 6; /* 0, 64 */ - else - *tx = (tc + 4) << 4; /* 96, 112 */ - } - default: - break; - } -} - -/** - * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for DCB to the assigned rings. - * - **/ -static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) -{ - int tc, offset, rss_i, i; - unsigned int tx_idx, rx_idx; - struct net_device *dev = adapter->netdev; - u8 num_tcs = netdev_get_num_tc(dev); - - if (num_tcs <= 1) - return false; - - rss_i = adapter->ring_feature[RING_F_RSS].indices; - - for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { - ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); - for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { - adapter->tx_ring[offset + i]->reg_idx = tx_idx; - adapter->rx_ring[offset + i]->reg_idx = rx_idx; - adapter->tx_ring[offset + i]->dcb_tc = tc; - adapter->rx_ring[offset + i]->dcb_tc = tc; - } - } - - return true; -} - -#endif /* HAVE_TX_MQ */ -/** - * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for VMDq to the assigned rings. It - * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along - * with VMDq. - * - **/ -static bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter) -{ -#if IS_ENABLED(CONFIG_FCOE) - struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; -#endif /* CONFIG_FCOE */ - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; - struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; - int i; - u16 reg_idx; - - /* only proceed if VMDq is enabled */ - if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) - return false; - - /* start at VMDq register offset for SR-IOV enabled setups */ - reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { -#if IS_ENABLED(CONFIG_FCOE) - /* Allow first FCoE queue to be mapped as RSS */ - if (fcoe->offset && (i > fcoe->offset)) - break; -#endif /* CONFIG_FCOE */ - /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= rss->indices) - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); - adapter->rx_ring[i]->reg_idx = reg_idx; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* FCoE uses a linear block of queues so just assigning 1:1 */ - for (; i < adapter->num_rx_queues; i++, reg_idx++) - adapter->rx_ring[i]->reg_idx = reg_idx; -#endif /* CONFIG_FCOE */ - - reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { -#if IS_ENABLED(CONFIG_FCOE) - /* Allow first FCoE queue to be mapped as RSS */ - if (fcoe->offset && (i > fcoe->offset)) - break; -#endif - /* If we are greater than indices move to next pool */ - if ((reg_idx & rss->mask) >= rss->indices) - reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); - adapter->tx_ring[i]->reg_idx = reg_idx; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* FCoE uses a linear block of queues so just assigning 1:1 */ - for (; i < adapter->num_tx_queues; i++, reg_idx++) - adapter->tx_ring[i]->reg_idx = reg_idx; -#endif /* CONFIG_FCOE */ - - return true; -} - -/** - * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. - * - **/ -static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - - return true; -} - -/** - * ixgbe_cache_ring_register - Descriptor ring to register mapping - * @adapter: board private structure to initialize - * - * Once we know the feature-set enabled for the device, we'll cache - * the register offset the descriptor ring is assigned to. - * - * Note, the order the various feature calls is important. It must start with - * the "most" features enabled at the same time, then trickle down to the - * least amount of features turned on at once. - **/ -static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) -{ -#ifdef HAVE_TX_MQ - if (ixgbe_cache_ring_dcb_vmdq(adapter)) - return; - - if (ixgbe_cache_ring_dcb(adapter)) - return; - -#endif - if (ixgbe_cache_ring_vmdq(adapter)) - return; - - ixgbe_cache_ring_rss(adapter); -} - -#define IXGBE_RSS_64Q_MASK 0x3F -#define IXGBE_RSS_16Q_MASK 0xF -#define IXGBE_RSS_8Q_MASK 0x7 -#define IXGBE_RSS_4Q_MASK 0x3 -#define IXGBE_RSS_2Q_MASK 0x1 -#define IXGBE_RSS_DISABLED_MASK 0x0 - -#ifdef HAVE_TX_MQ -/** - * ixgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB - * @adapter: board private structure to initialize - * - * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues - * and VM pools where appropriate. Also assign queues based on DCB - * priorities and map accordingly.. - * - **/ -static bool ixgbe_set_dcb_vmdq_queues(struct ixgbe_adapter *adapter) -{ - int i; - u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; - u16 vmdq_m = 0; -#if IS_ENABLED(CONFIG_FCOE) - u16 fcoe_i = 0; -#endif - u8 tcs = netdev_get_num_tc(adapter->netdev); - - /* verify we have DCB enabled before proceeding */ - if (tcs <= 1) - return false; - - /* verify we have VMDq enabled before proceeding */ - if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) - return false; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - /* 4 pools w/ 8TC per pool */ - vmdq_i = min_t(u16, vmdq_i, 4); - vmdq_m = 0x7; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* Add starting offset to total pool count */ - vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; - - /* 16 pools w/ 8 TC per pool */ - if (tcs > 4) { - vmdq_i = min_t(u16, vmdq_i, 16); - vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; - /* 32 pools w/ 4 TC per pool */ - } else { - vmdq_i = min_t(u16, vmdq_i, 32); - vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* queues in the remaining pools are available for FCoE */ - fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; -#endif /* CONFIG_FCOE */ - - /* remove the starting offset from the pool count */ - vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; - - break; - default: - /* unknown hardware, only support one pool w/ one queue */ - vmdq_i = 1; - tcs = 1; - break; - } - - /* save features for later use */ - adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; - adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; - - /* - * We do not support DCB, VMDq, and RSS all simultaneously - * so we will disable RSS since it is the lowest priority - */ - adapter->ring_feature[RING_F_RSS].indices = 1; - adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; - - adapter->num_rx_pools = vmdq_i; - adapter->num_rx_queues_per_pool = tcs; - - adapter->num_tx_queues = vmdq_i * tcs; - adapter->num_rx_queues = vmdq_i * tcs; - - /* disable ATR as it is not supported when VMDq is enabled */ - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - -#if IS_ENABLED(CONFIG_FCOE) - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - struct ixgbe_ring_feature *fcoe; - - fcoe = &adapter->ring_feature[RING_F_FCOE]; - - /* limit ourselves based on feature limits */ - fcoe_i = min_t(u16, fcoe_i, fcoe->limit); - - if (fcoe_i) { - /* alloc queues for FCoE separately */ - fcoe->indices = fcoe_i; - fcoe->offset = vmdq_i * tcs; - - /* add queues to adapter */ - adapter->num_tx_queues += fcoe_i; - adapter->num_rx_queues += fcoe_i; - } else if (tcs > 1) { - /* use queue belonging to FcoE TC */ - fcoe->indices = 1; - fcoe->offset = ixgbe_fcoe_get_tc(adapter); - } else { - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - - fcoe->indices = 0; - fcoe->offset = 0; - } - } -#endif /* CONFIG_FCOE */ - - /* configure TC to queue mapping */ - for (i = 0; i < tcs; i++) - netdev_set_tc_queue(adapter->netdev, i, 1, i); - - return true; -} - -/** - * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device - * @adapter: board private structure to initialize - * - * When DCB (Data Center Bridging) is enabled, allocate queues for - * each traffic class. If multiqueue isn't available,then abort DCB - * initialization. - * - * This function handles all combinations of DCB, RSS, and FCoE. - * - **/ -static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) -{ - struct net_device *dev = adapter->netdev; - struct ixgbe_ring_feature *f; - int rss_i, rss_m, i; - int tcs; - - /* Map queue offset and counts onto allocated tx queues */ - tcs = netdev_get_num_tc(dev); - - if (tcs <= 1) - return false; - - /* determine the upper limit for our current DCB mode */ -#ifndef HAVE_NETDEV_SELECT_QUEUE - rss_i = adapter->indices; -#else - rss_i = dev->num_tx_queues / tcs; -#endif - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - /* 8 TC w/ 4 queues per TC */ - rss_i = min_t(u16, rss_i, 4); - rss_m = IXGBE_RSS_4Q_MASK; - } else if (tcs > 4) { - /* 8 TC w/ 8 queues per TC */ - rss_i = min_t(u16, rss_i, 8); - rss_m = IXGBE_RSS_8Q_MASK; - } else { - /* 4 TC w/ 16 queues per TC */ - rss_i = min_t(u16, rss_i, 16); - rss_m = IXGBE_RSS_16Q_MASK; - } - - /* set RSS mask and indices */ - f = &adapter->ring_feature[RING_F_RSS]; - rss_i = min_t(u16, rss_i, f->limit); - f->indices = rss_i; - f->mask = rss_m; - - /* disable ATR as it is not supported when DCB is enabled */ - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - -#if IS_ENABLED(CONFIG_FCOE) - /* - * FCoE enabled queues require special configuration indexed - * by feature specific indices and mask. Here we map FCoE - * indices onto the DCB queue pairs allowing FCoE to own - * configuration later. - */ - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - u8 tc = ixgbe_fcoe_get_tc(adapter); - - f = &adapter->ring_feature[RING_F_FCOE]; - f->indices = min_t(u16, rss_i, f->limit); - f->offset = rss_i * tc; - } -#endif /* CONFIG_FCOE */ - - for (i = 0; i < tcs; i++) - netdev_set_tc_queue(dev, i, rss_i, rss_i * i); - - adapter->num_tx_queues = rss_i * tcs; - adapter->num_rx_queues = rss_i * tcs; - - return true; -} - -#endif -/** - * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices - * @adapter: board private structure to initialize - * - * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues - * and VM pools where appropriate. If RSS is available, then also try and - * enable RSS and map accordingly. - * - **/ -static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter) -{ - u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; - u16 vmdq_m = 0; - u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; - u16 rss_m = IXGBE_RSS_DISABLED_MASK; -#if IS_ENABLED(CONFIG_FCOE) - u16 fcoe_i = 0; -#endif - - /* only proceed if VMDq is enabled */ - if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) - return false; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - vmdq_i = min_t(u16, vmdq_i, 16); - /* 16 pool mode with 1 queue per pool */ - if ((vmdq_i > 4) || (rss_i == 1)) { - vmdq_m = 0x0F; - rss_i = 1; - /* 4 pool mode with 8 queue per pool */ - } else { - vmdq_m = 0x18; - rss_m = IXGBE_RSS_8Q_MASK; - rss_i = min_t(u16, rss_i, 8); - } - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* Add starting offset to total pool count */ - vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; - - /* double check we are limited to maximum pools */ - vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); - - /* 64 pool mode with 2 queues per pool */ - if (vmdq_i > 32) { - vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; - rss_m = IXGBE_RSS_2Q_MASK; - rss_i = min_t(u16, rss_i, 2); - /* 32 pool mode with up to 4 queues per pool */ - } else { - vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; - rss_m = IXGBE_RSS_4Q_MASK; - /* We can support 4, 2, or 1 queues */ - rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* queues in the remaining pools are available for FCoE */ - fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); -#endif - - /* remove the starting offset from the pool count */ - vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; - - break; - default: - /* unknown hardware, support one pool w/ one queue */ - vmdq_i = 1; - rss_i = 1; - break; - } - - /* save features for later use */ - adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; - adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; - - /* limit RSS based on user input and save for later use */ - adapter->ring_feature[RING_F_RSS].indices = rss_i; - adapter->ring_feature[RING_F_RSS].mask = rss_m; - - adapter->num_rx_pools = vmdq_i; - adapter->num_rx_queues_per_pool = rss_i; - - adapter->num_rx_queues = vmdq_i * rss_i; -#ifdef HAVE_TX_MQ - adapter->num_tx_queues = vmdq_i * rss_i; -#else - adapter->num_tx_queues = vmdq_i; -#endif /* HAVE_TX_MQ */ - - /* disable ATR as it is not supported when VMDq is enabled */ - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - -#if IS_ENABLED(CONFIG_FCOE) - /* - * FCoE can use rings from adjacent buffers to allow RSS - * like behavior. To account for this we need to add the - * FCoE indices to the total ring count. - */ - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - struct ixgbe_ring_feature *fcoe; - - fcoe = &adapter->ring_feature[RING_F_FCOE]; - - /* limit ourselves based on feature limits */ - fcoe_i = min_t(u16, fcoe_i, fcoe->limit); - - if (vmdq_i > 1 && fcoe_i) { - /* alloc queues for FCoE separately */ - fcoe->indices = fcoe_i; - fcoe->offset = vmdq_i * rss_i; - } else { - /* merge FCoE queues with RSS queues */ - fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); - - /* limit indices to rss_i if MSI-X is disabled */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - fcoe_i = rss_i; - - /* attempt to reserve some queues for just FCoE */ - fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); - fcoe->offset = fcoe_i - fcoe->indices; - fcoe_i -= rss_i; - } - - /* add queues to adapter */ - adapter->num_tx_queues += fcoe_i; - adapter->num_rx_queues += fcoe_i; - } -#endif /* CONFIG_FCOE */ - - return true; -} - -/** - * ixgbe_set_rss_queues: Allocate queues for RSS - * @adapter: board private structure to initialize - * - * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try - * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. - * - **/ -static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_ring_feature *f; - u16 rss_i; - - /* set mask for 16 queue limit of RSS */ - f = &adapter->ring_feature[RING_F_RSS]; - rss_i = f->limit; - - f->indices = rss_i; - if (hw->mac.type < ixgbe_mac_X550) - f->mask = IXGBE_RSS_16Q_MASK; - else - f->mask = IXGBE_RSS_64Q_MASK; - - /* disable ATR by default, it will be configured below */ - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - - /* - * Use Flow Director in addition to RSS to ensure the best - * distribution of flows across cores, even when an FDIR flow - * isn't matched. - */ - if (rss_i > 1 && adapter->atr_sample_rate) { - f = &adapter->ring_feature[RING_F_FDIR]; - - rss_i = f->indices = f->limit; - - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* - * FCoE can exist on the same rings as standard network traffic - * however it is preferred to avoid that if possible. In order - * to get the best performance we allocate as many FCoE queues - * as we can and we place them at the end of the ring array to - * avoid sharing queues with standard RSS on systems with 24 or - * more CPUs. - */ - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - struct net_device *dev = adapter->netdev; - u16 fcoe_i; - - f = &adapter->ring_feature[RING_F_FCOE]; - - /* merge FCoE queues with RSS queues */ - fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); - fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); - - /* limit indices to rss_i if MSI-X is disabled */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - fcoe_i = rss_i; - - /* attempt to reserve some queues for just FCoE */ - f->indices = min_t(u16, fcoe_i, f->limit); - f->offset = fcoe_i - f->indices; - rss_i = max_t(u16, fcoe_i, rss_i); - } -#endif /* CONFIG_FCOE */ - - adapter->num_rx_queues = rss_i; -#ifdef HAVE_TX_MQ - adapter->num_tx_queues = rss_i; -#endif - - return true; -} - -/* - * ixgbe_set_num_queues: Allocate queues for device, feature dependent - * @adapter: board private structure to initialize - * - * This is the top level queue allocation routine. The order here is very - * important, starting with the "most" number of features turned on at once, - * and ending with the smallest set of features. This way large combinations - * can be allocated if they're turned on, and smaller combinations are the - * fallthrough conditions. - * - **/ -static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) -{ - /* Start with base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - adapter->num_rx_pools = adapter->num_rx_queues; - adapter->num_rx_queues_per_pool = 1; - -#ifdef HAVE_TX_MQ - if (ixgbe_set_dcb_vmdq_queues(adapter)) - return; - - if (ixgbe_set_dcb_queues(adapter)) - return; - -#endif - if (ixgbe_set_vmdq_queues(adapter)) - return; - - ixgbe_set_rss_queues(adapter); -} - -/** - * ixgbe_acquire_msix_vectors - acquire MSI-X vectors - * @adapter: board private structure - * - * Attempts to acquire a suitable range of MSI-X vector interrupts. Will - * return a negative error code if unable to acquire MSI-X vectors for any - * reason. - */ -static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i, vectors, vector_threshold; - - if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) - return -EOPNOTSUPP; - - /* We start by asking for one vector per queue pair */ - vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); - - /* It is easy to be greedy for MSI-X vectors. However, it really - * doesn't do much good if we have a lot more vectors than CPUs. We'll - * be somewhat conservative and only ask for (roughly) the same number - * of vectors as there are CPUs. - */ - vectors = min_t(int, vectors, num_online_cpus()); - - /* Some vectors are necessary for non-queue interrupts */ - vectors += NON_Q_VECTORS; - - /* Hardware can only support a maximum of hw.mac->max_msix_vectors. - * With features such as RSS and VMDq, we can easily surpass the - * number of Rx and Tx descriptor queues supported by our device. - * Thus, we cap the maximum in the rare cases where the CPU count also - * exceeds our vector limit - */ - vectors = min_t(int, vectors, hw->mac.max_msix_vectors); - - /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] - * handler, and (2) an Other (Link Status Change, etc.) handler. - */ - vector_threshold = MIN_MSIX_COUNT; - - adapter->msix_entries = kcalloc(vectors, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!adapter->msix_entries) - return -ENOMEM; - - for (i = 0; i < vectors; i++) - adapter->msix_entries[i].entry = i; - - vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, - vector_threshold, vectors); - - if (vectors < 0) { - /* A negative count of allocated vectors indicates an error in - * acquiring within the specified range of MSI-X vectors */ - e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", - vectors); - - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - - return vectors; - } - - /* we successfully allocated some number of vectors within our - * requested range. - */ - adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; - - /* Adjust for only the vectors we'll use, which is minimum - * of max_q_vectors, or the number of vectors we were allocated. - */ - vectors -= NON_Q_VECTORS; - adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); - - return 0; -} - - -static void ixgbe_add_ring(struct ixgbe_ring *ring, - struct ixgbe_ring_container *head) -{ - ring->next = head->ring; - head->ring = ring; - head->count++; -} - -/** - * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector - * @adapter: board private structure to initialize - * @v_count: q_vectors allocated on adapter, used for ring interleaving - * @v_idx: index of vector in adapter struct - * @txr_count: total number of Tx rings to allocate - * @txr_idx: index of first Tx ring to allocate - * @rxr_count: total number of Rx rings to allocate - * @rxr_idx: index of first Rx ring to allocate - * - * We allocate one q_vector. If allocation fails we return -ENOMEM. - **/ -static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, - unsigned int v_count, unsigned int v_idx, - unsigned int txr_count, unsigned int txr_idx, - unsigned int rxr_count, unsigned int rxr_idx) -{ - struct ixgbe_q_vector *q_vector; - struct ixgbe_ring *ring; - int node = -1; -#ifdef HAVE_IRQ_AFFINITY_HINT - int cpu = -1; - u8 tcs = netdev_get_num_tc(adapter->netdev); -#endif - int ring_count, size; - - /* note this will allocate space for the ring structure as well! */ - ring_count = txr_count + rxr_count; - size = sizeof(struct ixgbe_q_vector) + - (sizeof(struct ixgbe_ring) * ring_count); - -#ifdef HAVE_IRQ_AFFINITY_HINT - /* customize cpu for Flow Director mapping */ - if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) { - u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - if (rss_i > 1 && adapter->atr_sample_rate) { - if (cpu_online(v_idx)) { - cpu = v_idx; - node = cpu_to_node(cpu); - } - } - } - -#endif - /* allocate q_vector and rings */ - q_vector = kzalloc_node(size, GFP_KERNEL, node); - if (!q_vector) - q_vector = kzalloc(size, GFP_KERNEL); - if (!q_vector) - return -ENOMEM; - - /* setup affinity mask and node */ -#ifdef HAVE_IRQ_AFFINITY_HINT - if (cpu != -1) - cpumask_set_cpu(cpu, &q_vector->affinity_mask); -#endif - q_vector->numa_node = node; - - /* initialize CPU for DCA */ - q_vector->cpu = -1; - - /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - ixgbe_poll, 64); -#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD -#ifdef HAVE_NDO_BUSY_POLL - napi_hash_add(&q_vector->napi); -#endif -#endif - -#ifdef HAVE_NDO_BUSY_POLL - /* initialize busy poll */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); - -#endif - /* tie q_vector and adapter together */ - adapter->q_vector[v_idx] = q_vector; - q_vector->adapter = adapter; - q_vector->v_idx = v_idx; - - /* initialize work limits */ - q_vector->tx.work_limit = adapter->tx_work_limit; - q_vector->rx.work_limit = adapter->rx_work_limit; - - /* initialize pointer to rings */ - ring = q_vector->ring; - - /* intialize ITR */ - if (txr_count && !rxr_count) { - /* tx only vector */ - if (adapter->tx_itr_setting == 1) - q_vector->itr = IXGBE_12K_ITR; - else - q_vector->itr = adapter->tx_itr_setting; - } else { - /* rx or rx/tx vector */ - if (adapter->rx_itr_setting == 1) - q_vector->itr = IXGBE_20K_ITR; - else - q_vector->itr = adapter->rx_itr_setting; - } - - while (txr_count) { - /* assign generic ring traits */ - ring->dev = pci_dev_to_dev(adapter->pdev); - ring->netdev = adapter->netdev; - - /* configure backlink on ring */ - ring->q_vector = q_vector; - - /* update q_vector Tx values */ - ixgbe_add_ring(ring, &q_vector->tx); - - /* apply Tx specific ring traits */ - ring->count = adapter->tx_ring_count; - ring->queue_index = txr_idx; - - /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; - - /* update count and index */ - txr_count--; - txr_idx += v_count; - - /* push pointer to next ring */ - ring++; - } - - while (rxr_count) { - /* assign generic ring traits */ - ring->dev = pci_dev_to_dev(adapter->pdev); - ring->netdev = adapter->netdev; - - /* configure backlink on ring */ - ring->q_vector = q_vector; - - /* update q_vector Rx values */ - ixgbe_add_ring(ring, &q_vector->rx); - - /* - * 82599 errata, UDP frames with a 0 checksum - * can be marked as checksum errors. - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) - set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); - -#if IS_ENABLED(CONFIG_FCOE) - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - struct ixgbe_ring_feature *f; - f = &adapter->ring_feature[RING_F_FCOE]; - - if ((rxr_idx >= f->offset) && - (rxr_idx < f->offset + f->indices)) { - set_bit(__IXGBE_RX_FCOE, &ring->state); - } - } -#endif /* CONFIG_FCOE */ - - /* apply Rx specific ring traits */ - ring->count = adapter->rx_ring_count; - ring->queue_index = rxr_idx; - - /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; - - /* update count and index */ - rxr_count--; - rxr_idx += v_count; - - /* push pointer to next ring */ - ring++; - } - - return 0; -} - -/** - * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector - * @adapter: board private structure to initialize - * @v_idx: Index of vector to be freed - * - * This function frees the memory allocated to the q_vector. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) -{ - struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; - struct ixgbe_ring *ring; - - ixgbe_for_each_ring(ring, q_vector->tx) - adapter->tx_ring[ring->queue_index] = NULL; - - ixgbe_for_each_ring(ring, q_vector->rx) - adapter->rx_ring[ring->queue_index] = NULL; - - adapter->q_vector[v_idx] = NULL; -#ifdef HAVE_NDO_BUSY_POLL - napi_hash_del(&q_vector->napi); -#endif - netif_napi_del(&q_vector->napi); - kfree_rcu(q_vector, rcu); -} - -/** - * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) -{ - unsigned int q_vectors = adapter->num_q_vectors; - unsigned int rxr_remaining = adapter->num_rx_queues; - unsigned int txr_remaining = adapter->num_tx_queues; - unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; - int err; - - if (q_vectors >= (rxr_remaining + txr_remaining)) { - for (; rxr_remaining; v_idx++) { - err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, - 0, 0, 1, rxr_idx); - if (err) - goto err_out; - - /* update counts and index */ - rxr_remaining--; - rxr_idx++; - } - } - - for (; v_idx < q_vectors; v_idx++) { - int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); - int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); - err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, - tqpv, txr_idx, - rqpv, rxr_idx); - - if (err) - goto err_out; - - /* update counts and index */ - rxr_remaining -= rqpv; - txr_remaining -= tqpv; - rxr_idx++; - txr_idx++; - } - - return IXGBE_SUCCESS; - -err_out: - adapter->num_tx_queues = 0; - adapter->num_rx_queues = 0; - adapter->num_q_vectors = 0; - - while (v_idx--) - ixgbe_free_q_vector(adapter, v_idx); - - return -ENOMEM; -} - -/** - * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) -{ - int v_idx = adapter->num_q_vectors; - - adapter->num_tx_queues = 0; - adapter->num_rx_queues = 0; - adapter->num_q_vectors = 0; - - while (v_idx--) - ixgbe_free_q_vector(adapter, v_idx); -} - -void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) -{ - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; - pci_disable_msi(adapter->pdev); - } -} - -/** - * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported - * @adapter: board private structure to initialize - * - * Attempt to configure the interrupts using the best available - * capabilities of the hardware and the kernel. - **/ -void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) -{ - int err; - - /* We will try to get MSI-X interrupts first */ - if (!ixgbe_acquire_msix_vectors(adapter)) - return; - - /* At this point, we do not have MSI-X capabilities. We need to - * reconfigure or disable various features which require MSI-X - * capability. - */ - - /* Disable DCB unless we only have a single traffic class */ - if (netdev_get_num_tc(adapter->netdev) > 1) { - e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); - netdev_reset_tc(adapter->netdev); - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; - - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->temp_dcb_cfg.pfc_mode_enable = false; - adapter->dcb_cfg.pfc_mode_enable = false; - } - - adapter->dcb_cfg.num_tcs.pg_tcs = 1; - adapter->dcb_cfg.num_tcs.pfc_tcs = 1; - - /* Disable VMDq support */ - e_dev_warn("Disabling VMQd support\n"); - adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; - -#ifdef CONFIG_PCI_IOV - /* Disable SR-IOV support */ - e_dev_warn("Disabling SR-IOV support\n"); - ixgbe_disable_sriov(adapter); -#endif /* CONFIG_PCI_IOV */ - - /* Disable RSS */ - e_dev_warn("Disabling RSS support\n"); - adapter->ring_feature[RING_F_RSS].limit = 1; - - /* recalculate number of queues now that many features have been - * changed or disabled. - */ - ixgbe_set_num_queues(adapter); - adapter->num_q_vectors = 1; - - if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE)) - return; - - err = pci_enable_msi(adapter->pdev); - if (err) - e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", - err); - else - adapter->flags |= IXGBE_FLAG_MSI_ENABLED; -} - -/** - * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme - * @adapter: board private structure to initialize - * - * We determine which interrupt scheme to use based on... - * - Kernel support (MSI, MSI-X) - * - which can be user-defined (via MODULE_PARAM) - * - Hardware queue count (num_*_queues) - * - defined by miscellaneous hardware support/features (RSS, etc.) - **/ -int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) -{ - int err; - - /* Number of supported queues */ - ixgbe_set_num_queues(adapter); - - /* Set interrupt mode */ - ixgbe_set_interrupt_capability(adapter); - - /* Allocate memory for queues */ - err = ixgbe_alloc_q_vectors(adapter); - if (err) { - e_err(probe, "Unable to allocate memory for queue vectors\n"); - ixgbe_reset_interrupt_capability(adapter); - return err; - } - - ixgbe_cache_ring_register(adapter); - - set_bit(__IXGBE_DOWN, &adapter->state); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings - * @adapter: board private structure to clear interrupt scheme on - * - * We go through and clear interrupt specific resources and reset the structure - * to pre-load conditions - **/ -void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) -{ - ixgbe_free_q_vectors(adapter); - ixgbe_reset_interrupt_capability(adapter); -} - -void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, - u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) -{ - struct ixgbe_adv_tx_context_desc *context_desc; - u16 i = tx_ring->next_to_use; - - context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); - - i++; - tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - - /* set bits to identify this as an advanced context descriptor */ - type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); -} - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c deleted file mode 100644 index 613c69250198..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_main.c +++ /dev/null @@ -1,11946 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/****************************************************************************** - Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code -******************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef NETIF_F_TSO -#include -#ifdef NETIF_F_TSO6 -#include -#include -#endif /* NETIF_F_TSO6 */ -#endif /* NETIF_F_TSO */ -#ifdef SIOCETHTOOL -#include -#endif - -#include -#include "ixgbe.h" -#ifdef HAVE_UDP_ENC_RX_OFFLOAD -#include -#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ -#ifdef HAVE_VXLAN_RX_OFFLOAD -#include -#endif /* HAVE_VXLAN_RX_OFFLOAD */ - -#include "ixgbe_dcb_82599.h" -#include "ixgbe_sriov.h" - -#define DRV_HW_PERF - -#define FPGA - -#define DRIVERIOV - -#define BYPASS_TAG - -#define RELEASE_TAG - -#define DRV_VERSION "5.2.4" \ - DRIVERIOV DRV_HW_PERF FPGA \ - BYPASS_TAG RELEASE_TAG -#define DRV_SUMMARY "Intel(R) 10GbE PCI Express Linux Network Driver" -const char ixgbe_driver_version[] = DRV_VERSION; -#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME -char ixgbe_driver_name[] = "ixgbe"; -#else -const char ixgbe_driver_name[] = "ixgbe"; -#endif -static const char ixgbe_driver_string[] = DRV_SUMMARY; -static const char ixgbe_copyright[] = "Copyright(c) 1999 - 2017 Intel Corporation."; -static const char ixgbe_overheat_msg[] = - "Network adapter has been stopped because it has over heated. " - "Restart the computer. If the problem persists, " - "power off the system and replace the adapter"; - -/* ixgbe_pci_tbl - PCI Device ID Table - * - * Wildcard entries (PCI_ANY_ID) should come last - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } - */ -static const struct pci_device_id ixgbe_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_QSFP), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_QSFP_N), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), 0}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), 0}, - /* required last entry */ - { .device = 0 } -}; -MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); - -#if IS_ENABLED(CONFIG_DCA) -static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, - void *p); -static struct notifier_block dca_notifier = { - .notifier_call = ixgbe_notify_dca, - .next = NULL, - .priority = 0 -}; -#endif /* CONFIG_DCA */ -static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION(DRV_SUMMARY); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -#define DEFAULT_DEBUG_LEVEL_SHIFT 3 - -static struct workqueue_struct *ixgbe_wq; - -static bool ixgbe_is_sfp(struct ixgbe_hw *hw); -static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); - -static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_hw *hw, - u32 reg, u16 *value) -{ - struct ixgbe_adapter *adapter = hw->back; - struct pci_dev *parent_dev; - struct pci_bus *parent_bus; - int pos; - - parent_bus = adapter->pdev->bus->parent; - if (!parent_bus) - return IXGBE_ERR_FEATURE_NOT_SUPPORTED; - - parent_dev = parent_bus->self; - if (!parent_dev) - return IXGBE_ERR_FEATURE_NOT_SUPPORTED; - - pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP); - if (!pos) - return IXGBE_ERR_FEATURE_NOT_SUPPORTED; - - pci_read_config_word(parent_dev, pos + reg, value); - if (*value == IXGBE_FAILED_READ_CFG_WORD && - ixgbe_check_cfg_remove(hw, parent_dev)) - return IXGBE_ERR_FEATURE_NOT_SUPPORTED; - return IXGBE_SUCCESS; -} - -/** - * ixgbe_get_parent_bus_info - Set PCI bus info beyond switch - * @hw: pointer to hardware structure - * - * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure - * when the device is behind a switch. - **/ -static s32 ixgbe_get_parent_bus_info(struct ixgbe_hw *hw) -{ - u16 link_status = 0; - int err; - - hw->bus.type = ixgbe_bus_type_pci_express; - - /* Get the negotiated link width and speed from PCI config space of the - * parent, as this device is behind a switch - */ - err = ixgbe_read_pci_cfg_word_parent(hw, 18, &link_status); - - /* If the read fails, fallback to default */ - if (err) - link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); - - ixgbe_set_pci_config_data_generic(hw, link_status); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_check_from_parent - determine whether to use parent for PCIe info - * @hw: hw specific details - * - * This function is used by probe to determine whether a device's PCIe info - * (speed, width, etc) should be obtained from the parent bus or directly. This - * is useful for specialized device configurations containing PCIe bridges. - */ -static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) -{ - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_QSFP_SF_QP: - case IXGBE_DEV_ID_82599_SFP_SF_QP: - return true; - default: - return false; - } -} - -static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, - int expected_gts) -{ - struct ixgbe_hw *hw = &adapter->hw; - int max_gts = 0; - enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; - enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; - struct pci_dev *pdev; - - /* Some devices are not connected over PCIe and thus do not negotiate - * speed. These devices do not have valid bus info, and thus any report - * we generate may not be correct. - */ - if (hw->bus.type == ixgbe_bus_type_internal) - return; - - /* determine whether to use the parent device */ - if (ixgbe_pcie_from_parent(&adapter->hw)) - pdev = adapter->pdev->bus->parent->self; - else - pdev = adapter->pdev; - - if (pcie_get_minimum_link(pdev, &speed, &width) || - speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { - e_dev_warn("Unable to determine PCI Express bandwidth.\n"); - return; - } - - switch (speed) { - case PCIE_SPEED_2_5GT: - /* 8b/10b encoding reduces max throughput by 20% */ - max_gts = 2 * width; - break; - case PCIE_SPEED_5_0GT: - /* 8b/10b encoding reduces max throughput by 20% */ - max_gts = 4 * width; - break; - case PCIE_SPEED_8_0GT: - /* 128b/130b encoding has less than 2% impact on throughput */ - max_gts = 8 * width; - break; - default: - e_dev_warn("Unable to determine PCI Express bandwidth.\n"); - return; - } - - e_dev_info("PCI Express bandwidth of %dGT/s available\n", - max_gts); - e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n", - (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : - speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : - speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : - "Unknown"), - hw->bus.width, - (speed == PCIE_SPEED_2_5GT ? "20%" : - speed == PCIE_SPEED_5_0GT ? "20%" : - speed == PCIE_SPEED_8_0GT ? "<2%" : - "Unknown")); - - if (max_gts < expected_gts) { - e_dev_warn("This is not sufficient for optimal performance of this card.\n"); - e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n", - expected_gts); - e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n"); - } -} - -/** - * ixgbe_enumerate_functions - Get the number of ports this device has - * @adapter: adapter structure - * - * This function enumerates the phsyical functions co-located on a single slot, - * in order to determine how many ports a device has. This is most useful in - * determining the required GT/s of PCIe bandwidth necessary for optimal - * performance. - **/ -static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) -{ - struct pci_dev *entry, *pdev = adapter->pdev; - int physfns = 0; - - /* Some cards can not use the generic count PCIe functions method, - * because they are behind a parent switch, so we hardcode these to - * correct number of ports. - */ - if (ixgbe_pcie_from_parent(&adapter->hw)) { - physfns = 4; - } else { - list_for_each_entry(entry, &pdev->bus->devices, bus_list) { -#ifdef CONFIG_PCI_IOV - /* don't count virtual functions */ - if (entry->is_virtfn) - continue; -#endif - - /* When the devices on the bus don't all match our device ID, - * we can't reliably determine the correct number of - * functions. This can occur if a function has been direct - * attached to a virtual machine using VT-d, for example. In - * this case, simply return -1 to indicate this. - */ - if ((entry->vendor != pdev->vendor) || - (entry->device != pdev->device)) - return -1; - - physfns++; - } - } - - return physfns; -} - -static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) -{ - if (!test_bit(__IXGBE_DOWN, &adapter->state) && - !test_bit(__IXGBE_REMOVE, &adapter->state) && - !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) - queue_work(ixgbe_wq, &adapter->service_task); -} - -static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) -{ - BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); - - /* flush memory to make sure state is correct before next watchog */ - smp_mb__before_atomic(); - clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); -} - -static void ixgbe_remove_adapter(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - - if (!hw->hw_addr) - return; - hw->hw_addr = NULL; - e_dev_err("Adapter removed\n"); - if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) - ixgbe_service_event_schedule(adapter); -} - -static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) -{ - u32 value; - - /* The following check not only optimizes a bit by not - * performing a read on the status register when the - * register just read was a status register read that - * returned IXGBE_FAILED_READ_REG. It also blocks any - * potential recursion. - */ - if (reg == IXGBE_STATUS) { - ixgbe_remove_adapter(hw); - return; - } - value = IXGBE_READ_REG(hw, IXGBE_STATUS); - if (value == IXGBE_FAILED_READ_REG) - ixgbe_remove_adapter(hw); -} - -static u32 -ixgbe_validate_register_read(struct ixgbe_hw *_hw, u32 reg, bool quiet) -{ - int i; - u32 value; - u8 __iomem *reg_addr; - struct ixgbe_adapter *adapter = _hw->back; - - reg_addr = ACCESS_ONCE(_hw->hw_addr); - if (IXGBE_REMOVED(reg_addr)) - return IXGBE_FAILED_READ_REG; - for (i = 0; i < IXGBE_DEAD_READ_RETRIES; ++i) { - value = readl(reg_addr + reg); - if (value != IXGBE_DEAD_READ_REG) - break; - } - if (quiet) - return value; - if (value == IXGBE_DEAD_READ_REG) - e_err(drv, "%s: register %x read unchanged\n", __func__, reg); - else - e_warn(hw, "%s: register %x read recovered after %d retries\n", - __func__, reg, i + 1); - return value; -} - -u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg, bool quiet) -{ - u32 value; - u8 __iomem *reg_addr; - - reg_addr = ACCESS_ONCE(hw->hw_addr); - if (IXGBE_REMOVED(reg_addr)) - return IXGBE_FAILED_READ_REG; - if (unlikely(hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { - struct ixgbe_adapter *adapter; - int i; - - for (i = 0; i < 200; ++i) { - value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); - if (likely(!value)) - goto writes_completed; - if (value == IXGBE_FAILED_READ_REG) { - ixgbe_remove_adapter(hw); - return IXGBE_FAILED_READ_REG; - } - udelay(5); - } - - adapter = hw->back; - e_warn(hw, "register writes incomplete %08x\n", value); - } - -writes_completed: - value = readl(reg_addr + reg); - if (unlikely(value == IXGBE_FAILED_READ_REG)) - ixgbe_check_remove(hw, reg); - if (unlikely(value == IXGBE_DEAD_READ_REG)) - value = ixgbe_validate_register_read(hw, reg, quiet); - return value; -} - -static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) -{ - u32 ctrl_ext; - - /* Let firmware take over control of h/w */ - ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, - ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); -} - -static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) -{ - u32 ctrl_ext; - - /* Let firmware know the driver has taken over */ - ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, - ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); -} - -/* - * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors - * @adapter: pointer to adapter struct - * @direction: 0 for Rx, 1 for Tx, -1 for other causes - * @queue: queue to map the corresponding interrupt to - * @msix_vector: the vector to map to the corresponding queue - * - */ -static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, - u8 queue, u8 msix_vector) -{ - u32 ivar, index; - struct ixgbe_hw *hw = &adapter->hw; - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - if (direction == -1) - direction = 0; - index = (((direction * 64) + queue) >> 2) & 0x1F; - ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); - ivar &= ~(0xFF << (8 * (queue & 0x3))); - ivar |= (msix_vector << (8 * (queue & 0x3))); - IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - if (direction == -1) { - /* other causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - index = ((queue & 1) * 8); - ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); - ivar &= ~(0xFF << index); - ivar |= (msix_vector << index); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); - break; - } else { - /* tx or rx causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - index = ((16 * (queue & 1)) + (8 * direction)); - ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); - ivar &= ~(0xFF << index); - ivar |= (msix_vector << index); - IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); - break; - } - default: - break; - } -} - -static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - mask = (qmask & 0xFFFFFFFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); - mask = (qmask >> 32); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); - break; - default: - break; - } -} - -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, - struct ixgbe_tx_buffer *tx_buffer) -{ - if (tx_buffer->skb) { - dev_kfree_skb_any(tx_buffer->skb); - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_single(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } - tx_buffer->next_to_watch = NULL; - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - /* tx_buffer_info must be completely set up in the transmit path */ -} - -static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_hw_stats *hwstats = &adapter->stats; - int i; - u32 data; - - if ((hw->fc.current_mode != ixgbe_fc_full) && - (hw->fc.current_mode != ixgbe_fc_rx_pause)) - return; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); - break; - default: - data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); - } - hwstats->lxoffrxc += data; - - /* refill credits (no tx hang) if we received xoff */ - if (!data) - return; - - for (i = 0; i < adapter->num_tx_queues; i++) - clear_bit(__IXGBE_HANG_CHECK_ARMED, - &adapter->tx_ring[i]->state); -} - -static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_hw_stats *hwstats = &adapter->stats; - u32 xoff[8] = {0}; - int i; - bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; - -#ifdef HAVE_DCBNL_IEEE - if (adapter->ixgbe_ieee_pfc) - pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); - -#endif - if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { - ixgbe_update_xoff_rx_lfc(adapter); - return; - } - - /* update stats for each tc, only valid with PFC enabled */ - for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); - break; - default: - xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); - } - hwstats->pxoffrxc[i] += xoff[i]; - } - - /* disarm tx queues that have received xoff frames */ - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - u8 tc = tx_ring->dcb_tc; - - if ((tc <= 7) && (xoff[tc])) - clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); - } -} - -static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) -{ - return ring->stats.packets; -} - -static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) -{ - struct ixgbe_adapter *adapter = ring->q_vector->adapter; - struct ixgbe_hw *hw = &adapter->hw; - - u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); - u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); - - return ((head <= tail) ? tail : tail + ring->count) - head; -} - -static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) -{ - u32 tx_done = ixgbe_get_tx_completed(tx_ring); - u32 tx_done_old = tx_ring->tx_stats.tx_done_old; - u32 tx_pending = ixgbe_get_tx_pending(tx_ring); - bool ret = false; - - clear_check_for_tx_hang(tx_ring); - - /* - * Check for a hung queue, but be thorough. This verifies - * that a transmit has been completed since the previous - * check AND there is at least one packet pending. The - * ARMED bit is set to indicate a potential hang. The - * bit is cleared if a pause frame is received to remove - * false hang detection due to PFC or 802.3x frames. By - * requiring this to fail twice we avoid races with - * PFC clearing the ARMED bit and conditions where we - * run the check_tx_hang logic with a transmit completion - * pending but without time to complete it yet. - */ - if ((tx_done_old == tx_done) && tx_pending) { - /* make sure it is true for two checks in a row */ - ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, - &tx_ring->state); - } else { - /* update completed stats and continue */ - tx_ring->tx_stats.tx_done_old = tx_done; - /* reset the countdown */ - clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); - } - - return ret; -} - -/** - * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout - * @adapter: driver private struct - **/ -static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) -{ - - /* Do the reset outside of interrupt context */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - ixgbe_service_event_schedule(adapter); - } -} - -/** - * ixgbe_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ -static void ixgbe_tx_timeout(struct net_device *netdev) -{ -struct ixgbe_adapter *adapter = netdev_priv(netdev); - bool real_tx_hang = false; - int i; - -#define TX_TIMEO_LIMIT 16000 - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) - real_tx_hang = true; - } - - if (real_tx_hang) { - ixgbe_tx_timeout_reset(adapter); - } else { - e_info(drv, "Fake Tx hang detected with timeout of %d " - "seconds\n", netdev->watchdog_timeo/HZ); - - /* fake Tx hang - increase the kernel timeout */ - if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) - netdev->watchdog_timeo *= 2; - } -} - -/** - * ixgbe_clean_tx_irq - Reclaim resources after transmit completes - * @q_vector: structure containing interrupt and ring information - * @tx_ring: tx ring to clean - **/ -static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_tx_buffer *tx_buffer; - union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0; - unsigned int budget = q_vector->tx.work_limit; - unsigned int i = tx_ring->next_to_clean; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return true; - - tx_buffer = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC(tx_ring, i); - i -= tx_ring->count; - - do { - union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; - - /* if next_to_watch is not set then there is no work pending */ - if (!eop_desc) - break; - - /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); - - /* if DD is not set pending work has not been completed */ - if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) - break; - - /* clear next_to_watch to prevent false hangs */ - tx_buffer->next_to_watch = NULL; - - /* update the statistics for this packet */ - total_bytes += tx_buffer->bytecount; - total_packets += tx_buffer->gso_segs; - - /* free the skb */ - dev_kfree_skb_any(tx_buffer->skb); - - /* unmap skb header data */ - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - - /* clear tx_buffer data */ - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - - /* unmap remaining buffers */ - while (tx_desc != eop_desc) { - tx_buffer++; - tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; - tx_buffer = tx_ring->tx_buffer_info; - tx_desc = IXGBE_TX_DESC(tx_ring, 0); - } - - /* unmap any remaining paged data */ - if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buffer, len, 0); - } - } - - /* move us one more past the eop_desc for start of next pkt */ - tx_buffer++; - tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; - tx_buffer = tx_ring->tx_buffer_info; - tx_desc = IXGBE_TX_DESC(tx_ring, 0); - } - - /* issue prefetch for next Tx descriptor */ - prefetch(tx_desc); - - /* update budget accounting */ - budget--; - } while (likely(budget)); - - i += tx_ring->count; - tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - q_vector->tx.total_bytes += total_bytes; - q_vector->tx.total_packets += total_packets; - - if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { - /* schedule immediate reset if we believe we hung */ - struct ixgbe_hw *hw = &adapter->hw; - e_err(drv, "Detected Tx Unit Hang\n" - " Tx Queue <%d>\n" - " TDH, TDT <%x>, <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n", - tx_ring->queue_index, - IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), - IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), - tx_ring->next_to_use, i); - e_err(drv, "tx_buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_buffer_info[i].time_stamp, jiffies); - - netif_stop_subqueue(netdev_ring(tx_ring), - ring_queue_index(tx_ring)); - - e_info(probe, - "tx hang %d detected on queue %d, resetting adapter\n", - adapter->tx_timeout_count + 1, tx_ring->queue_index); - - ixgbe_tx_timeout_reset(adapter); - - /* the adapter is about to reset, no point in enabling stuff */ - return true; - } - - netdev_tx_completed_queue(txring_txq(tx_ring), - total_packets, total_bytes); - -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(total_packets && netif_carrier_ok(netdev_ring(tx_ring)) && - (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); -#ifdef HAVE_TX_MQ - if (__netif_subqueue_stopped(netdev_ring(tx_ring), - ring_queue_index(tx_ring)) - && !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) { - netif_wake_subqueue(netdev_ring(tx_ring), - ring_queue_index(tx_ring)); - ++tx_ring->tx_stats.restart_queue; - } -#else - if (netif_queue_stopped(netdev_ring(tx_ring)) && - !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) { - netif_wake_queue(netdev_ring(tx_ring)); - ++tx_ring->tx_stats.restart_queue; - } -#endif - } - - return !!budget; -} - -#if IS_ENABLED(CONFIG_DCA) -static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - int cpu) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 txctrl = 0; - u16 reg_offset; - - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - txctrl = dca3_get_tag(tx_ring->dev, cpu); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); - txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; - break; - default: - /* for unknown hardware do not write register */ - return; - } - - /* - * We can enable relaxed ordering for reads, but not writes when - * DCA is enabled. This is due to a known issue in some chipsets - * which will cause the DCA tag to be cleared. - */ - txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | - IXGBE_DCA_TXCTRL_DATA_RRO_EN | - IXGBE_DCA_TXCTRL_DESC_DCA_EN; - - IXGBE_WRITE_REG(hw, reg_offset, txctrl); -} - -static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - int cpu) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rxctrl = 0; - u8 reg_idx = rx_ring->reg_idx; - - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - rxctrl = dca3_get_tag(rx_ring->dev, cpu); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; - break; - default: - break; - } - - /* - * We can enable relaxed ordering for reads, but not writes when - * DCA is enabled. This is due to a known issue in some chipsets - * which will cause the DCA tag to be cleared. - */ - rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | - IXGBE_DCA_RXCTRL_DATA_DCA_EN | - IXGBE_DCA_RXCTRL_DESC_DCA_EN; - - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); -} - -static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - int cpu = get_cpu(); - - if (q_vector->cpu == cpu) - goto out_no_update; - - ixgbe_for_each_ring(ring, q_vector->tx) - ixgbe_update_tx_dca(adapter, ring, cpu); - - ixgbe_for_each_ring(ring, q_vector->rx) - ixgbe_update_rx_dca(adapter, ring, cpu); - - q_vector->cpu = cpu; -out_no_update: - put_cpu(); -} - -static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) -{ - int v_idx; - - /* always use CB2 mode, difference is masked in the CB driver */ - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, - IXGBE_DCA_CTRL_DCA_MODE_CB2); - else - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, - IXGBE_DCA_CTRL_DCA_DISABLE); - - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - adapter->q_vector[v_idx]->cpu = -1; - ixgbe_update_dca(adapter->q_vector[v_idx]); - } -} - -static int __ixgbe_notify_dca(struct device *dev, void *data) -{ - struct ixgbe_adapter *adapter = dev_get_drvdata(dev); - unsigned long event = *(unsigned long *)data; - - if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) - return 0; - - switch (event) { - case DCA_PROVIDER_ADD: - /* if we're already enabled, don't do it again */ - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - break; - if (dca_add_requester(dev) == IXGBE_SUCCESS) { - adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); - break; - } - /* fall through - DCA is disabled */ - case DCA_PROVIDER_REMOVE: - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { - dca_remove_requester(dev); - adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); - } - break; - } - - return IXGBE_SUCCESS; -} -#endif /* CONFIG_DCA */ - -#ifdef NETIF_F_RXHASH -#define IXGBE_RSS_L4_TYPES_MASK \ - ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ - (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ - (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ - (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP) | \ - (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX) | \ - (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX)) - -static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - u16 rss_type; - - if (!(netdev_ring(ring)->features & NETIF_F_RXHASH)) - return; - - rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & - IXGBE_RXDADV_RSSTYPE_MASK; - - if (!rss_type) - return; - - skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), - (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? - PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); -} -#endif /* NETIF_F_RXHASH */ - -#if IS_ENABLED(CONFIG_FCOE) -/** - * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type - * @ring: structure containing ring specific data - * @rx_desc: advanced rx descriptor - * - * Returns : true if it is FCoE pkt - */ -static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, - union ixgbe_adv_rx_desc *rx_desc) -{ - __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - - return test_bit(__IXGBE_RX_FCOE, &ring->state) && - ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == - (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << - IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); -} -#endif /* CONFIG_FCOE */ - -/** - * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum - * @ring: structure containing ring specific data - * @rx_desc: current Rx descriptor being processed - * @skb: skb currently being received and modified - **/ -static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - bool encap_pkt = false; - - skb_checksum_none_assert(skb); - - /* Rx csum disabled */ - if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) - return; - - /* check for VXLAN or Geneve packet type */ - if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { - encap_pkt = true; -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) - skb->encapsulation = 1; -#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ - skb->ip_summed = CHECKSUM_NONE; - } - - /* if IP and error */ - if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && - ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { - ring->rx_stats.csum_err++; - return; - } - - if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) - return; - - if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { - - /* - * 82599 errata, UDP frames with a 0 checksum can be marked as - * checksum errors. - */ - if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && - test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) - return; - - ring->rx_stats.csum_err++; - return; - } - - /* It must be a TCP or UDP packet with a valid checksum */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - if (encap_pkt) { - if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) - return; - - if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { - skb->ip_summed = CHECKSUM_NONE; - return; - } -#ifdef HAVE_SKBUFF_CSUM_LEVEL - /* If we checked the outer header let the stack know */ - skb->csum_level = 1; -#endif /* HAVE_SKBUFF_CSUM_LEVEL */ - } -} - -static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) -{ - rx_ring->next_to_use = val; -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; -#endif - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(val, rx_ring->tail); -} - -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *bi) -{ - struct sk_buff *skb = bi->skb; - dma_addr_t dma = bi->dma; - - if (unlikely(dma)) - return true; - - if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), - rx_ring->rx_buf_len); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - return false; - } - - bi->skb = skb; - - } - - dma = dma_map_single(rx_ring->dev, skb->data, - rx_ring->rx_buf_len, DMA_FROM_DEVICE); - - /* - * if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - dev_kfree_skb_any(skb); - bi->skb = NULL; - - rx_ring->rx_stats.alloc_rx_buff_failed++; - return false; - } - - bi->dma = dma; - return true; -} - -#else /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ -static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) -{ - return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; -} - -static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) - return true; - - /* alloc new page for storage */ - page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, - ixgbe_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_rx_page_failed++; - return false; - } - - /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - - /* - * if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, ixgbe_rx_pg_order(rx_ring)); - - rx_ring->rx_stats.alloc_rx_page_failed++; - return false; - } - - bi->dma = dma; - bi->page = page; - bi->page_offset = ixgbe_rx_offset(rx_ring); - bi->pagecnt_bias = 1; - - return true; -} - -#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ -/** - * ixgbe_alloc_rx_buffers - Replace used receive buffers - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace - **/ -void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) -{ - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *bi; - u16 i = rx_ring->next_to_use; -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - u16 bufsz; -#endif - - /* nothing to do */ - if (!cleaned_count) - return; - - rx_desc = IXGBE_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_buffer_info[i]; - i -= rx_ring->count; -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - - bufsz = ixgbe_rx_bufsz(rx_ring); -#endif - - do { -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - if (!ixgbe_alloc_mapped_skb(rx_ring, bi)) - break; -#else - if (!ixgbe_alloc_mapped_page(rx_ring, bi)) - break; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, bufsz, - DMA_FROM_DEVICE); -#endif - - /* - * Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); -#else - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); -#endif - - rx_desc++; - bi++; - i++; - if (unlikely(!i)) { - rx_desc = IXGBE_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_buffer_info; - i -= rx_ring->count; - } - - /* clear the length for the next_to_use descriptor */ - rx_desc->wb.upper.length = 0; - - cleaned_count--; - } while (cleaned_count); - - i += rx_ring->count; - - if (rx_ring->next_to_use != i) - ixgbe_release_rx_desc(rx_ring, i); -} - -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -/** - * ixgbe_merge_active_tail - merge active tail into lro skb - * @tail: pointer to active tail in frag_list - * - * This function merges the length and data of an active tail into the - * skb containing the frag_list. It resets the tail's pointer to the head, - * but it leaves the heads pointer to tail intact. - **/ -static inline struct sk_buff *ixgbe_merge_active_tail(struct sk_buff *tail) -{ - struct sk_buff *head = IXGBE_CB(tail)->head; - - if (!head) - return tail; - - head->len += tail->len; - head->data_len += tail->len; - head->truesize += tail->truesize; - - IXGBE_CB(tail)->head = NULL; - - return head; -} - -/** - * ixgbe_add_active_tail - adds an active tail into the skb frag_list - * @head: pointer to the start of the skb - * @tail: pointer to active tail to add to frag_list - * - * This function adds an active tail to the end of the frag list. This tail - * will still be receiving data so we cannot yet ad it's stats to the main - * skb. That is done via ixgbe_merge_active_tail. - **/ -static inline void ixgbe_add_active_tail(struct sk_buff *head, - struct sk_buff *tail) -{ - struct sk_buff *old_tail = IXGBE_CB(head)->tail; - - if (old_tail) { - ixgbe_merge_active_tail(old_tail); - old_tail->next = tail; - } else { - skb_shinfo(head)->frag_list = tail; - } - - IXGBE_CB(tail)->head = head; - IXGBE_CB(head)->tail = tail; -} - -/** - * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb - * @head: pointer to head of an active frag list - * - * This function will clear the frag_tail_tracker pointer on an active - * frag_list and returns true if the pointer was actually set - **/ -static inline bool ixgbe_close_active_frag_list(struct sk_buff *head) -{ - struct sk_buff *tail = IXGBE_CB(head)->tail; - - if (!tail) - return false; - - ixgbe_merge_active_tail(tail); - - IXGBE_CB(head)->tail = NULL; - - return true; -} - -#endif -#ifdef HAVE_VLAN_RX_REGISTER -/** - * ixgbe_receive_skb - Send a completed packet up the stack - * @q_vector: structure containing interrupt and ring information - * @skb: packet to send up - **/ -static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) -{ - u16 vlan_tag = IXGBE_CB(skb)->vid; - -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) - if (vlan_tag & VLAN_VID_MASK) { - /* by placing vlgrp at start of structure we can alias it */ - struct vlan_group **vlgrp = netdev_priv(skb->dev); - if (!*vlgrp) - dev_kfree_skb_any(skb); - else if (q_vector->netpoll_rx) - vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); - else - vlan_gro_receive(&q_vector->napi, - *vlgrp, vlan_tag, skb); - } else { -#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ - if (q_vector->netpoll_rx) - netif_rx(skb); - else - napi_gro_receive(&q_vector->napi, skb); -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) - } -#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ -} - -#endif /* HAVE_VLAN_RX_REGISTER */ -#ifdef NETIF_F_GSO -static void ixgbe_set_rsc_gso_size(struct ixgbe_ring __maybe_unused *ring, - struct sk_buff *skb) -{ - u16 hdr_len = eth_get_headlen(skb->data, skb_headlen(skb)); - - /* set gso_size to avoid messing up TCP MSS */ - skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), - IXGBE_CB(skb)->append_cnt); - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; -} - -#endif /* NETIF_F_GSO */ -static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, - struct sk_buff *skb) -{ - /* if append_cnt is 0 then frame is not RSC */ - if (!IXGBE_CB(skb)->append_cnt) - return; - - rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; - rx_ring->rx_stats.rsc_flush++; - -#ifdef NETIF_F_GSO - ixgbe_set_rsc_gso_size(rx_ring, skb); - -#endif - /* gso_size is computed using append_cnt so always clear it last */ - IXGBE_CB(skb)->append_cnt = 0; -} - -static void ixgbe_rx_vlan(struct ixgbe_ring *ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ -#ifdef NETIF_F_HW_VLAN_CTAG_RX - if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_CTAG_RX) && -#else - if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_RX) && -#endif - ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) -#ifndef HAVE_VLAN_RX_REGISTER - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - le16_to_cpu(rx_desc->wb.upper.vlan)); -#else - IXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); - else - IXGBE_CB(skb)->vid = 0; -#endif -} - -/** - * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being populated - * - * This function checks the ring, descriptor, and packet information in - * order to populate the hash, checksum, VLAN, timestamp, protocol, and - * other fields within the skb. - **/ -static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ -#ifdef HAVE_PTP_1588_CLOCK - u32 flags = rx_ring->q_vector->adapter->flags; - -#endif - ixgbe_update_rsc_stats(rx_ring, skb); - -#ifdef NETIF_F_RXHASH - ixgbe_rx_hash(rx_ring, rx_desc, skb); - -#endif /* NETIF_F_RXHASH */ - ixgbe_rx_checksum(rx_ring, rx_desc, skb); -#ifdef HAVE_PTP_1588_CLOCK - if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) - ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); - -#endif - ixgbe_rx_vlan(rx_ring, rx_desc, skb); - - skb_record_rx_queue(skb, ring_queue_index(rx_ring)); - - skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring)); -} - -static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ -#ifdef HAVE_NDO_BUSY_POLL - skb_mark_napi_id(skb, &q_vector->napi); - - if (ixgbe_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { - netif_receive_skb(skb); - /* exit early if we busy polled */ - return; - } -#endif - -#ifdef HAVE_VLAN_RX_REGISTER - ixgbe_receive_skb(q_vector, skb); -#else - napi_gro_receive(&q_vector->napi, skb); -#endif -#ifndef NETIF_F_GRO - - netdev_ring(rx_ring)->last_rx = jiffies; -#endif -} - -/** - * ixgbe_is_non_eop - process handling of non-EOP buffers - * @rx_ring: Rx ring being processed - * @rx_desc: Rx descriptor for current buffer - * @skb: Current socket buffer containing buffer in progress - * - * This function updates next to clean. If the buffer is an EOP buffer - * this function exits returning false, otherwise it will place the - * sk_buff in the next buffer to be chained and return true indicating - * that this is in fact a non-EOP buffer. - **/ -static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - struct sk_buff *next_skb; -#endif - u32 ntc = rx_ring->next_to_clean + 1; - - /* fetch, update, and store next to clean */ - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - - prefetch(IXGBE_RX_DESC(rx_ring, ntc)); - - /* update RSC append count if present */ - if (ring_is_rsc_enabled(rx_ring)) { - __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & - cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); - - if (unlikely(rsc_enabled)) { - u32 rsc_cnt = le32_to_cpu(rsc_enabled); - - rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; - IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; - - /* update ntc based on RSC value */ - ntc = le32_to_cpu(rx_desc->wb.upper.status_error); - ntc &= IXGBE_RXDADV_NEXTP_MASK; - ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; - } - } - - /* if we are the last buffer then there is nothing else to do */ - if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - return false; - - /* place skb in next buffer to be received */ -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - next_skb = rx_ring->rx_buffer_info[ntc].skb; - - ixgbe_add_active_tail(skb, next_skb); - IXGBE_CB(next_skb)->head = skb; -#else - rx_ring->rx_buffer_info[ntc].skb = skb; -#endif - rx_ring->rx_stats.non_eop_descs++; - - return true; -} - -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -/** - * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail - * @skb: pointer to current skb being adjusted - * - * This function is an ixgbe specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void ixgbe_pull_tail(struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* - * it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* - * we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** - * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being updated - * - * This function provides a basic DMA sync up for the first fragment of an - * skb. The reason for doing this is that the first fragment cannot be - * unmapped until we have reached the end of packet descriptor for a buffer - * chain. - */ -static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, - struct sk_buff *skb) -{ - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - } else { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - - dma_sync_single_range_for_cpu(rx_ring->dev, - IXGBE_CB(skb)->dma, - frag->page_offset, - skb_frag_size(frag), - DMA_FROM_DEVICE); - } -} - -/** - * ixgbe_cleanup_headers - Correct corrupted or empty headers - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being fixed - * - * Check for corrupted packet headers caused by senders on the local L2 - * embedded NIC switch not setting up their Tx Descriptors right. These - * should be very rare. - * - * Also address the case where we are pulling data in on pages only - * and as such no data is present in the skb header. - * - * In addition if skb is not at least 60 bytes we need to pad it so that - * it is large enough to qualify as a valid Ethernet frame. - * - * Returns true if an error was encountered and skb was freed. - **/ -static bool ixgbe_cleanup_headers(struct ixgbe_ring __maybe_unused *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - /* verify that the packet does not have any known errors */ - if (unlikely(ixgbe_test_staterr(rx_desc, - IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { - dev_kfree_skb_any(skb); - return true; - } - - /* place header in linear portion of buffer */ - if (!skb_headlen(skb)) - ixgbe_pull_tail(skb); - -#if IS_ENABLED(CONFIG_FCOE) - /* do not attempt to pad FCoE Frames as this will disrupt DDP */ - if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) - return false; -#endif - - /* if eth_skb_pad returns an error the skb was freed */ - if (eth_skb_pad(skb)) - return true; - - return false; -} - -/** - * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *old_buff) -{ - struct ixgbe_rx_buffer *new_buff; - u16 nta = rx_ring->next_to_alloc; - - new_buff = &rx_ring->rx_buffer_info[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* Transfer page from old buffer to new buffer. - * Move each member individually to avoid possible store - * forwarding stalls and unnecessary copy of skb. - */ - new_buff->dma = old_buff->dma; - new_buff->page = old_buff->page; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - -static inline bool ixgbe_page_is_reserved(struct page *page) -{ - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); -} - -static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) -{ - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; - struct page *page = rx_buffer->page; - - /* avoid re-using remote pages */ - if (unlikely(ixgbe_page_is_reserved(page))) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ -#ifdef HAVE_PAGE_COUNT_BULK_UPDATE - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) -#else - if (unlikely((page_count(page) - pagecnt_bias) > 1)) -#endif - return false; -#else - /* The last offset is a bit aggressive in that we assume the - * worst case of FCoE being enabled and using a 3K buffer. - * However this should have minimal impact as the 1K extra is - * still less than one buffer in size. - */ -#define IXGBE_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) - if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) - return false; -#endif - -#ifdef HAVE_PAGE_COUNT_BULK_UPDATE - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); - rx_buffer->pagecnt_bias = USHRT_MAX; - } -#else - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - if (likely(!pagecnt_bias)) { - page_ref_inc(page); - rx_buffer->pagecnt_bias = 1; - } -#endif - - return true; -} - -/** - * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware - * @skb: sk_buff to place the data into - * - * This function will add the data contained in rx_buffer->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. - * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. - **/ -static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb, - unsigned int size) -{ -#if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : - SKB_DATA_ALIGN(size); -#endif - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, - rx_buffer->page_offset, size, truesize); - -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif -} - -static struct ixgbe_rx_buffer * -ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, struct sk_buff **skb, - const unsigned int size) -{ - struct ixgbe_rx_buffer *rx_buffer; - - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - prefetchw(rx_buffer->page); - *skb = rx_buffer->skb; - - /* Delay unmapping of the first packet. It carries the header - * information, HW may still access the header after the writeback. - * Only unmap it when EOP is reached - */ - if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { - if (!*skb) - goto skip_sync; - } else { - if (*skb) - ixgbe_dma_sync_frag(rx_ring, *skb); - } - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); -skip_sync: - rx_buffer->pagecnt_bias--; - - return rx_buffer; -} - -static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb) -{ - if (ixgbe_can_reuse_rx_page(rx_buffer)) { - /* hand second half of page back to the ring */ - ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else { - if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - } - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } - - /* clear contents of rx_buffer */ - rx_buffer->page = NULL; - rx_buffer->skb = NULL; -} - -static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - unsigned int size) -{ - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; -#if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(size); -#endif - struct sk_buff *skb; - - /* prefetch first cache line of first page */ - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif - - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - if (size > IXGBE_RX_HDR_SIZE) { - if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) - IXGBE_CB(skb)->dma = rx_buffer->dma; - - skb_add_rx_frag(skb, 0, rx_buffer->page, - rx_buffer->page_offset, - size, truesize); -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - } else { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - rx_buffer->pagecnt_bias++; - } - - return skb; -} - -#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC -static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - unsigned int size) -{ - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; -#if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size); -#endif - struct sk_buff *skb; - - /* prefetch first cache line of first page */ - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif - - /* build an skb around the page buffer */ - skb = build_skb(va - IXGBE_SKB_PAD, truesize); - if (unlikely(!skb)) - return NULL; - - /* update pointers within the skb to store the data */ - skb_reserve(skb, IXGBE_SKB_PAD); - __skb_put(skb, size); - - /* record DMA address if this is the start of a chain of buffers */ - if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) - IXGBE_CB(skb)->dma = rx_buffer->dma; - - /* update buffer offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - - return skb; -} - -#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ -/** - * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf - * @q_vector: structure containing interrupt and ring information - * @rx_ring: rx descriptor ring to transact packets on - * @budget: Total limit on number of packets to process - * - * This function provides a "bounce buffer" approach to Rx interrupt - * processing. The advantage to this is that on systems that have - * expensive overhead for IOMMU access this provides a means of avoiding - * it by maintaining the mapping of the page to the syste. - * - * Returns amount of work completed. - **/ -static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *rx_ring, - int budget) -{ - unsigned int total_rx_bytes = 0, total_rx_packets = 0; -#if IS_ENABLED(CONFIG_FCOE) - int ddp_bytes; - unsigned int mss = 0; -#endif /* CONFIG_FCOE */ - u16 cleaned_count = ixgbe_desc_unused(rx_ring); - - while (likely(total_rx_packets < budget)) { - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer; - struct sk_buff *skb; - unsigned int size; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); - size = le16_to_cpu(rx_desc->wb.upper.length); - if (!size) - break; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * descriptor has been written back - */ - dma_rmb(); - - rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); - - /* retrieve a buffer from the ring */ - if (skb) - ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); -#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC - else if (ring_uses_build_skb(rx_ring)) - skb = ixgbe_build_skb(rx_ring, rx_buffer, - rx_desc, size); -#endif - else - skb = ixgbe_construct_skb(rx_ring, rx_buffer, rx_desc, - size); - - /* exit if we failed to retrieve a buffer */ - if (!skb) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - rx_buffer->pagecnt_bias++; - break; - } - - ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); - cleaned_count++; - - /* place incomplete frames back on ring for completion */ - if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) - continue; - - /* verify the packet layout is correct */ - if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) - continue; - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - - /* populate checksum, timestamp, VLAN, and protocol */ - ixgbe_process_skb_fields(rx_ring, rx_desc, skb); - -#if IS_ENABLED(CONFIG_FCOE) - /* if ddp, not passing to ULD unless for FCP_RSP or error */ - if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { - ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, - rx_desc, skb); - /* include DDPed FCoE data */ - if (ddp_bytes > 0) { - if (!mss) { - mss = netdev_ring(rx_ring)->mtu - - sizeof(struct fcoe_hdr) - - sizeof(struct fc_frame_header) - - sizeof(struct fcoe_crc_eof); - if (mss > 512) - mss &= ~511; - } - total_rx_bytes += ddp_bytes; - total_rx_packets += DIV_ROUND_UP(ddp_bytes, - mss); - } - if (!ddp_bytes) { - dev_kfree_skb_any(skb); -#ifndef NETIF_F_GRO - netdev_ring(rx_ring)->last_rx = jiffies; -#endif - continue; - } - } -#endif /* CONFIG_FCOE */ - - ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); - - /* update budget accounting */ - total_rx_packets++; - } - - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - q_vector->rx.total_packets += total_rx_packets; - q_vector->rx.total_bytes += total_rx_bytes; - - return total_rx_packets; -} - -#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ -/** - * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy - * @q_vector: structure containing interrupt and ring information - * @rx_ring: rx descriptor ring to transact packets on - * @budget: Total limit on number of packets to process - * - * This function provides a legacy approach to Rx interrupt - * handling. This version will perform better on systems with a low cost - * dma mapping API. - * - * Returns amount of work completed. - **/ -static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *rx_ring, - int budget) -{ - unsigned int total_rx_bytes = 0, total_rx_packets = 0; -#if IS_ENABLED(CONFIG_FCOE) - int ddp_bytes; - unsigned int mss = 0; -#endif /* CONFIG_FCOE */ - u16 len = 0; - u16 cleaned_count = ixgbe_desc_unused(rx_ring); - - while (likely(total_rx_packets < budget)) { - struct ixgbe_rx_buffer *rx_buffer; - union ixgbe_adv_rx_desc *rx_desc; - struct sk_buff *skb; - u16 ntc; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - ntc = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC(rx_ring, ntc); - rx_buffer = &rx_ring->rx_buffer_info[ntc]; - - if (!rx_desc->wb.upper.length) - break; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * descriptor has been written back - */ - dma_rmb(); - - skb = rx_buffer->skb; - - prefetch(skb->data); - - len = le16_to_cpu(rx_desc->wb.upper.length); - /* pull the header of the skb in */ - __skb_put(skb, len); - - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header after - * the writeback. Only unmap it when EOP is reached - */ - if (!IXGBE_CB(skb)->head) { - IXGBE_CB(skb)->dma = rx_buffer->dma; - } else { - skb = ixgbe_merge_active_tail(skb); - dma_unmap_single(rx_ring->dev, - rx_buffer->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - } - - /* clear skb reference in buffer info structure */ - rx_buffer->skb = NULL; - rx_buffer->dma = 0; - - cleaned_count++; - - if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) - continue; - - dma_unmap_single(rx_ring->dev, - IXGBE_CB(skb)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - - IXGBE_CB(skb)->dma = 0; - - if (ixgbe_close_active_frag_list(skb) && - !IXGBE_CB(skb)->append_cnt) { - /* if we got here without RSC the packet is invalid */ - dev_kfree_skb_any(skb); - continue; - } - - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(ixgbe_test_staterr(rx_desc, - IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { - dev_kfree_skb_any(skb); - continue; - } - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - - /* populate checksum, timestamp, VLAN, and protocol */ - ixgbe_process_skb_fields(rx_ring, rx_desc, skb); - -#if IS_ENABLED(CONFIG_FCOE) - /* if ddp, not passing to ULD unless for FCP_RSP or error */ - if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { - ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, - rx_desc, skb); - /* include DDPed FCoE data */ - if (ddp_bytes > 0) { - if (!mss) { - mss = netdev_ring(rx_ring)->mtu - - sizeof(struct fcoe_hdr) - - sizeof(struct fc_frame_header) - - sizeof(struct fcoe_crc_eof); - if (mss > 512) - mss &= ~511; - } - total_rx_bytes += ddp_bytes; - total_rx_packets += DIV_ROUND_UP(ddp_bytes, - mss); - } - if (!ddp_bytes) { - dev_kfree_skb_any(skb); -#ifndef NETIF_F_GRO - netdev_ring(rx_ring)->last_rx = jiffies; -#endif - continue; - } - } - -#endif /* CONFIG_FCOE */ - ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); - - /* update budget accounting */ - total_rx_packets++; - } - - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - q_vector->rx.total_packets += total_rx_packets; - q_vector->rx.total_bytes += total_rx_bytes; - - if (cleaned_count) - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - - return total_rx_packets; -} - -#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ -#ifdef HAVE_NDO_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int ixgbe_busy_poll_recv(struct napi_struct *napi) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - int found = 0; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return LL_FLUSH_FAILED; - - if (!ixgbe_qv_lock_poll(q_vector)) - return LL_FLUSH_BUSY; - - ixgbe_for_each_ring(ring, q_vector->rx) { - found = ixgbe_clean_rx_irq(q_vector, ring, 4); -#ifdef BP_EXTENDED_STATS - if (found) - ring->stats.cleaned += found; - else - ring->stats.misses++; -#endif - if (found) - break; - } - - ixgbe_qv_unlock_poll(q_vector); - - return found; -} - -#endif /* HAVE_NDO_BUSY_POLL */ -/** - * ixgbe_configure_msix - Configure MSI-X hardware - * @adapter: board private structure - * - * ixgbe_configure_msix sets up the hardware to properly generate MSI-X - * interrupts. - **/ -static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) -{ - int v_idx; - u32 mask; - - /* Populate MSIX to EITR Select */ - if (adapter->num_vfs >= 32) { - u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); - } - - /* - * Populate the IVAR table and set the ITR values to the - * corresponding register. - */ - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; - struct ixgbe_ring *ring; - - ixgbe_for_each_ring(ring, q_vector->rx) - ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); - - ixgbe_for_each_ring(ring, q_vector->tx) - ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); - - ixgbe_write_eitr(q_vector); - } - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - v_idx); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - ixgbe_set_ivar(adapter, -1, 1, v_idx); - break; - default: - break; - } - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); - - /* set up to autoclear timer, and the vectors */ - mask = IXGBE_EIMS_ENABLE_MASK; - mask &= ~(IXGBE_EIMS_OTHER | - IXGBE_EIMS_MAILBOX | - IXGBE_EIMS_LSC); - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); -} - -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - -/** - * ixgbe_update_itr - update the dynamic ITR value based on statistics - * @q_vector: structure containing interrupt and ring information - * @ring_container: structure containing ring performance data - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see ixgbe_param.c) - **/ -static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring_container *ring_container) -{ - int bytes = ring_container->total_bytes; - int packets = ring_container->total_packets; - u32 timepassed_us; - u64 bytes_perint; - u8 itr_setting = ring_container->itr; - - if (packets == 0) - return; - - /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) - * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (12000 ints/s) - */ - /* what was last interrupt timeslice? */ - timepassed_us = q_vector->itr >> 2; - if (timepassed_us == 0) - return; - bytes_perint = bytes / timepassed_us; /* bytes/usec */ - - switch (itr_setting) { - case lowest_latency: - if (bytes_perint > 10) { - itr_setting = low_latency; - } - break; - case low_latency: - if (bytes_perint > 20) { - itr_setting = bulk_latency; - } else if (bytes_perint <= 10) { - itr_setting = lowest_latency; - } - break; - case bulk_latency: - if (bytes_perint <= 20) { - itr_setting = low_latency; - } - break; - } - - /* clear work counters since we have the values we need */ - ring_container->total_bytes = 0; - ring_container->total_packets = 0; - - /* write updated itr to ring container */ - ring_container->itr = itr_setting; -} - -/** - * ixgbe_write_eitr - write EITR register in hardware specific way - * @q_vector: structure containing interrupt and ring information - * - * This function is made to be called by ethtool and by the driver - * when it needs to update EITR registers at runtime. Hardware - * specific quirks/differences are taken care of here. - */ -void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_hw *hw = &adapter->hw; - int v_idx = q_vector->v_idx; - u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - /* must write high and low 16 bits to reset counter */ - itr_reg |= (itr_reg << 16); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* - * set the WDIS bit to not clear the timer bits and cause an - * immediate assertion of the interrupt - */ - itr_reg |= IXGBE_EITR_CNT_WDIS; - break; - default: - break; - } - IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); -} - -static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) -{ - u32 new_itr = q_vector->itr; - u8 current_itr; - - ixgbe_update_itr(q_vector, &q_vector->tx); - ixgbe_update_itr(q_vector, &q_vector->rx); - - current_itr = max(q_vector->rx.itr, q_vector->tx.itr); - - switch (current_itr) { - /* counts and packets in update_itr are dependent on these numbers */ - case lowest_latency: - new_itr = IXGBE_100K_ITR; - break; - case low_latency: - new_itr = IXGBE_20K_ITR; - break; - case bulk_latency: - new_itr = IXGBE_12K_ITR; - break; - default: - break; - } - - if (new_itr != q_vector->itr) { - /* do an exponential smoothing */ - new_itr = (10 * new_itr * q_vector->itr) / - ((9 * new_itr) + q_vector->itr); - - /* save the algorithm value here */ - q_vector->itr = new_itr; - - ixgbe_write_eitr(q_vector); - } -} - -/** - * ixgbe_check_overtemp_subtask - check for over temperature - * @adapter: pointer to adapter - **/ -static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 eicr = adapter->interrupt_event; - s32 rc; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; - - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_T3_LOM: - /* - * Since the warning interrupt is for both ports - * we don't have to check if: - * - This interrupt wasn't for our port. - * - We may have missed the interrupt so always have to - * check if we got a LSC - */ - if (!(eicr & IXGBE_EICR_GPI_SDP0) && - !(eicr & IXGBE_EICR_LSC)) - return; - - if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { - u32 speed; - bool link_up = false; - - hw->mac.ops.check_link(hw, &speed, &link_up, false); - - if (link_up) - return; - } - - /* Check if this is not due to overtemp */ - if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) - return; - - break; - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - rc = hw->phy.ops.check_overtemp(hw); - if (rc != IXGBE_ERR_OVERTEMP) - return; - break; - default: - if (adapter->hw.mac.type >= ixgbe_mac_X540) - return; - if (!(eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw))) - return; - break; - } - e_crit(drv, "%s\n", ixgbe_overheat_msg); - - adapter->interrupt_event = 0; -} - -static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) -{ - struct ixgbe_hw *hw = &adapter->hw; - - if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && - (eicr & IXGBE_EICR_GPI_SDP1)) { - e_crit(probe, "Fan has stopped, replace the adapter\n"); - /* write to clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); - } -} - -static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) -{ - if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) - return; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - /* - * Need to check link state so complete overtemp check - * on service task - */ - if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) && - (!test_bit(__IXGBE_DOWN, &adapter->state))) { - adapter->interrupt_event = eicr; - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; - ixgbe_service_event_schedule(adapter); - return; - } - return; - case ixgbe_mac_X550EM_a: - if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { - adapter->interrupt_event = eicr; - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; - ixgbe_service_event_schedule(adapter); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, - IXGBE_EICR_GPI_SDP0_X550EM_a); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, - IXGBE_EICR_GPI_SDP0_X550EM_a); - } - return; - case ixgbe_mac_X550: - case ixgbe_mac_X540: - if (!(eicr & IXGBE_EICR_TS)) - return; - break; - default: - return; - } - - e_crit(drv, "%s\n", ixgbe_overheat_msg); -} - -static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); - - if (!ixgbe_is_sfp(hw)) - return; - if (hw->mac.type >= ixgbe_mac_X540) - eicr_mask = IXGBE_EICR_GPI_SDP0_X540; - - if (eicr & eicr_mask) { - /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; - adapter->sfp_poll_time = 0; - ixgbe_service_event_schedule(adapter); - } - } - - if (adapter->hw.mac.type == ixgbe_mac_82599EB && - (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { - /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; - ixgbe_service_event_schedule(adapter); - } - } -} - -static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - adapter->lsc_int++; - adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - adapter->link_check_timeout = jiffies; - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); - IXGBE_WRITE_FLUSH(hw); - ixgbe_service_event_schedule(adapter); - } -} - -static void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) -{ - u32 mask; - struct ixgbe_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - mask = (qmask & 0xFFFFFFFF); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); - mask = (qmask >> 32); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); - break; - default: - break; - } - /* skip the flush */ -} - -/** - * ixgbe_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - **/ -static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, - bool flush) -{ - u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); - - /* don't reenable LSC while waiting for link */ - if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) - mask &= ~IXGBE_EIMS_LSC; - - if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X550EM_a: - mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(&adapter->hw); - break; - case ixgbe_mac_X540: - case ixgbe_mac_X550: - mask |= IXGBE_EIMS_TS; - break; - default: - break; - } - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - mask |= IXGBE_EIMS_GPI_SDP1; - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP1; - mask |= IXGBE_EIMS_GPI_SDP2; - /* fall through */ - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || - adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || - adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) - mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(&adapter->hw); - if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) - mask |= IXGBE_EICR_GPI_SDP0_X540; - mask |= IXGBE_EIMS_ECC; - mask |= IXGBE_EIMS_MAILBOX; -#ifdef HAVE_PTP_1588_CLOCK - mask |= IXGBE_EIMS_TIMESYNC; -#endif - - break; - default: - break; - } - - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && - !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) - mask |= IXGBE_EIMS_FLOW_DIR; - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - if (queues) - ixgbe_irq_enable_queues(adapter, ~0); - if (flush) - IXGBE_WRITE_FLUSH(&adapter->hw); -} - -static irqreturn_t ixgbe_msix_other(int __always_unused irq, void *data) -{ - struct ixgbe_adapter *adapter = data; - struct ixgbe_hw *hw = &adapter->hw; - u32 eicr; - - /* - * Workaround for Silicon errata #26 on 82598. Use clear-by-write - * instead of clear-by-read. Reading with EICS will return the - * interrupt causes without clearing, which later be done - * with the write to EICR. - */ - eicr = IXGBE_READ_REG(hw, IXGBE_EICS); - - /* The lower 16bits of the EICR register are for the queue interrupts - * which should be masked here in order to not accidently clear them if - * the bits are high when ixgbe_msix_other is called. There is a race - * condition otherwise which results in possible performance loss - * especially if the ixgbe_msix_other interrupt is triggering - * consistently (as it would when PPS is turned on for the X540 device) - */ - eicr &= 0xFFFF0000; - - IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); - - if (eicr & IXGBE_EICR_LSC) - ixgbe_check_lsc(adapter); - - if (eicr & IXGBE_EICR_MAILBOX) - ixgbe_msg_task(adapter); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - if (hw->phy.type == ixgbe_phy_x550em_ext_t && - (eicr & IXGBE_EICR_GPI_SDP0_X540)) { - adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; - ixgbe_service_event_schedule(adapter); - IXGBE_WRITE_REG(hw, IXGBE_EICR, - IXGBE_EICR_GPI_SDP0_X540); - } - if (eicr & IXGBE_EICR_ECC) { - e_info(link, "Received unrecoverable ECC Err," - "initiating reset.\n"); - set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - ixgbe_service_event_schedule(adapter); - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); - } -#ifdef HAVE_TX_MQ - /* Handle Flow Director Full threshold interrupt */ - if (eicr & IXGBE_EICR_FLOW_DIR) { - int reinit_count = 0; - int i; - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = adapter->tx_ring[i]; - if (test_and_clear_bit( - __IXGBE_TX_FDIR_INIT_DONE, - &ring->state)) - reinit_count++; - } - if (reinit_count) { - /* no more flow director interrupts until - * after init - */ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, - IXGBE_EIMC_FLOW_DIR); - adapter->flags2 |= - IXGBE_FLAG2_FDIR_REQUIRES_REINIT; - ixgbe_service_event_schedule(adapter); - } - } -#endif - ixgbe_check_sfp_event(adapter, eicr); - ixgbe_check_overtemp_event(adapter, eicr); - break; - default: - break; - } - - ixgbe_check_fan_failure(adapter, eicr); - -#ifdef HAVE_PTP_1588_CLOCK - if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter); -#endif - - /* re-enable the original interrupt state, no lsc, no queues */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter, false, false); - - return IRQ_HANDLED; -} - -static irqreturn_t ixgbe_msix_clean_rings(int __always_unused irq, void *data) -{ - struct ixgbe_q_vector *q_vector = data; - - /* EIAM disabled interrupts (on this vector) for us */ - - if (q_vector->rx.ring || q_vector->tx.ring) - napi_schedule_irqoff(&q_vector->napi); - - return IRQ_HANDLED; -} - -/** - * ixgbe_poll - NAPI polling RX/TX cleanup routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function will clean all queues associated with a q_vector. - **/ -int ixgbe_poll(struct napi_struct *napi, int budget) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - int per_ring_budget, work_done = 0; - bool clean_complete = true; - -#if IS_ENABLED(CONFIG_DCA) - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_dca(q_vector); -#endif /* CONFIG_DCA */ - - ixgbe_for_each_ring(ring, q_vector->tx) - clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); - -#ifdef HAVE_NDO_BUSY_POLL - if (test_bit(NAPI_STATE_NPSVC, &napi->state)) - return budget; - - /* Exit if we are called by netpoll or busy polling is active */ - if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) - return budget; -#else - /* Exit if we are called by netpoll */ - if (budget <= 0) - return budget; -#endif - - /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling */ - if (q_vector->rx.count > 1) - per_ring_budget = max(budget/q_vector->rx.count, 1); - else - per_ring_budget = budget; - - ixgbe_for_each_ring(ring, q_vector->rx) { - int cleaned = ixgbe_clean_rx_irq(q_vector, ring, - per_ring_budget); - work_done += cleaned; - clean_complete &= (cleaned < per_ring_budget); - } - -#ifdef HAVE_NDO_BUSY_POLL - ixgbe_qv_unlock_napi(q_vector); -#endif - -#ifndef HAVE_NETDEV_NAPI_LIST - if (!netif_running(adapter->netdev)) - clean_complete = true; - -#endif - /* If all work not completed, return budget and keep polling */ - if (!clean_complete) - return budget; - - /* all work done, exit the polling mode */ - napi_complete_done(napi, work_done); - if (adapter->rx_itr_setting == 1) - ixgbe_set_itr(q_vector); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); - - return min(work_done, budget - 1); -} - -/** - * ixgbe_request_msix_irqs - Initialize MSI-X interrupts - * @adapter: board private structure - * - * ixgbe_request_msix_irqs allocates MSI-X vectors and requests - * interrupts from the kernel. - **/ -static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - unsigned int ri = 0, ti = 0; - int vector, err; - - for (vector = 0; vector < adapter->num_q_vectors; vector++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; - struct msix_entry *entry = &adapter->msix_entries[vector]; - - if (q_vector->tx.ring && q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name), - "%s-TxRx-%u", netdev->name, ri++); - ti++; - } else if (q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name), - "%s-rx-%u", netdev->name, ri++); - } else if (q_vector->tx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name), - "%s-tx-%u", netdev->name, ti++); - } else { - /* skip this unused q_vector */ - continue; - } - err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, - q_vector->name, q_vector); - if (err) { - e_err(probe, "request_irq failed for MSIX interrupt '%s' " - "Error: %d\n", q_vector->name, err); - goto free_queue_irqs; - } -#ifdef HAVE_IRQ_AFFINITY_HINT - /* If Flow Director is enabled, set interrupt affinity */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - /* assign the mask for this irq */ - irq_set_affinity_hint(entry->vector, - &q_vector->affinity_mask); - } -#endif /* HAVE_IRQ_AFFINITY_HINT */ - } - - err = request_irq(adapter->msix_entries[vector].vector, - ixgbe_msix_other, 0, netdev->name, adapter); - if (err) { - e_err(probe, "request_irq for msix_other failed: %d\n", err); - goto free_queue_irqs; - } - - return IXGBE_SUCCESS; - -free_queue_irqs: - while (vector) { - vector--; -#ifdef HAVE_IRQ_AFFINITY_HINT - irq_set_affinity_hint(adapter->msix_entries[vector].vector, - NULL); -#endif - free_irq(adapter->msix_entries[vector].vector, - adapter->q_vector[vector]); - } - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - return err; -} - -/** - * ixgbe_intr - legacy mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t ixgbe_intr(int __always_unused irq, void *data) -{ - struct ixgbe_adapter *adapter = data; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; - u32 eicr; - - /* - * Workaround for silicon errata #26 on 82598. Mask the interrupt - * before the read of EICR. - */ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); - - /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read - * therefore no explicit interrupt disable is necessary */ - eicr = IXGBE_READ_REG(hw, IXGBE_EICR); - if (!eicr) { - /* - * shared interrupt alert! - * make sure interrupts are enabled because the read will - * have disabled interrupts due to EIAM - * finish the workaround of silicon errata on 82598. Unmask - * the interrupt that we masked before the EICR read. - */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter, true, true); - return IRQ_NONE; /* Not our interrupt */ - } - - if (eicr & IXGBE_EICR_LSC) - ixgbe_check_lsc(adapter); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - - if (eicr & IXGBE_EICR_ECC) { - e_info(link, "Received unrecoverable ECC Err," - "initiating reset.\n"); - set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - ixgbe_service_event_schedule(adapter); - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); - } - ixgbe_check_sfp_event(adapter, eicr); - ixgbe_check_overtemp_event(adapter, eicr); - break; - default: - break; - } - - ixgbe_check_fan_failure(adapter, eicr); -#ifdef HAVE_PTP_1588_CLOCK - if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) - ixgbe_ptp_check_pps_event(adapter); -#endif - - /* would disable interrupts here but EIAM disabled it */ - napi_schedule_irqoff(&q_vector->napi); - - /* - * re-enable link(maybe) and non-queue interrupts, no flush. - * ixgbe_poll will re-enable the queue interrupts - */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter, false, false); - - return IRQ_HANDLED; -} - -/** - * ixgbe_request_irq - initialize interrupts - * @adapter: board private structure - * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int ixgbe_request_irq(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - err = ixgbe_request_msix_irqs(adapter); - else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) - err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, - netdev->name, adapter); - else - err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, - netdev->name, adapter); - - if (err) - e_err(probe, "request_irq failed, Error %d\n", err); - - return err; -} - -static void ixgbe_free_irq(struct ixgbe_adapter *adapter) -{ - int vector; - - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { - free_irq(adapter->pdev->irq, adapter); - return; - } - - if (!adapter->msix_entries) - return; - - for (vector = 0; vector < adapter->num_q_vectors; vector++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; - struct msix_entry *entry = &adapter->msix_entries[vector]; - - /* free only the irqs that were actually requested */ - if (!q_vector->rx.ring && !q_vector->tx.ring) - continue; - -#ifdef HAVE_IRQ_AFFINITY_HINT - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(entry->vector, NULL); - -#endif - free_irq(entry->vector, q_vector); - } - - free_irq(adapter->msix_entries[vector].vector, adapter); -} - -/** - * ixgbe_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ -static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) -{ - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); - break; - default: - break; - } - IXGBE_WRITE_FLUSH(&adapter->hw); - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int vector; - - for (vector = 0; vector < adapter->num_q_vectors; vector++) - synchronize_irq(adapter->msix_entries[vector].vector); - - synchronize_irq(adapter->msix_entries[vector++].vector); - } else { - synchronize_irq(adapter->pdev->irq); - } -} - -/** - * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts - * - **/ -static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) -{ - struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; - - ixgbe_write_eitr(q_vector); - - ixgbe_set_ivar(adapter, 0, 0, 0); - ixgbe_set_ivar(adapter, 1, 0, 0); - - e_info(hw, "Legacy interrupt IVAR setup done\n"); -} - -/** - * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset - * @adapter: board private structure - * @ring: structure containing ring specific data - * - * Configure the Tx descriptor ring after a reset. - **/ -void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u64 tdba = ring->dma; - int wait_loop = 10; - u32 txdctl = IXGBE_TXDCTL_ENABLE; - u8 reg_idx = ring->reg_idx; - - /* disable queue to avoid issues while updating state */ - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - IXGBE_WRITE_FLUSH(hw); - - IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); - IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), tdba >> 32); - IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), - ring->count * sizeof(union ixgbe_adv_tx_desc)); - - /* disable head writeback */ - IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(reg_idx), 0); - - /* reset head and tail pointers */ - IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); - ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); - - /* reset ntu and ntc to place SW in sync with hardwdare */ - ring->next_to_clean = 0; - ring->next_to_use = 0; - - /* - * set WTHRESH to encourage burst writeback, it should not be set - * higher than 1 when: - * - ITR is 0 as it could cause false TX hangs - * - ITR is set to > 100k int/sec and BQL is enabled - * - * In order to avoid issues WTHRESH + PTHRESH should always be equal - * to or less than the number of on chip descriptors, which is - * currently 40. - */ - if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) - txdctl |= (1 << 16); /* WTHRESH = 1 */ - else - txdctl |= (8 << 16); /* WTHRESH = 8 */ - - /* - * Setting PTHRESH to 32 both improves performance - * and avoids a TX hang with DFP enabled - */ - txdctl |= (1 << 8) | /* HTHRESH = 1 */ - 32; /* PTHRESH = 32 */ - - /* reinitialize flowdirector state */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - ring->atr_sample_rate = adapter->atr_sample_rate; - ring->atr_count = 0; - set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); - } else { - ring->atr_sample_rate = 0; - } - - /* initialize XPS */ - if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { - struct ixgbe_q_vector *q_vector = ring->q_vector; - - if (q_vector) - netif_set_xps_queue(adapter->netdev, - &q_vector->affinity_mask, - ring->queue_index); - } - - clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); - - /* enable queue */ - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); - - /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - /* poll to verify queue is enabled */ - do { - msleep(1); - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); - } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); - if (!wait_loop) - hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); -} - -static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rttdcs, mtqc; - u8 tcs = netdev_get_num_tc(adapter->netdev); - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - /* disable the arbiter while setting MTQC */ - rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); - rttdcs |= IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); - - /* set transmit pool layout */ - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { - mtqc = IXGBE_MTQC_VT_ENA; - if (tcs > 4) - mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - else if (tcs > 1) - mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; - else if (adapter->ring_feature[RING_F_VMDQ].mask == - IXGBE_82599_VMDQ_4Q_MASK) - mtqc |= IXGBE_MTQC_32VF; - else - mtqc |= IXGBE_MTQC_64VF; - } else { - if (tcs > 4) - mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - else if (tcs > 1) - mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; - else - mtqc = IXGBE_MTQC_64Q_1PB; - } - - IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); - - /* Enable Security TX Buffer IFG for multiple pb */ - if (tcs) { - u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); - sectx |= IXGBE_SECTX_DCB; - IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); - } - - /* re-enable the arbiter */ - rttdcs &= ~IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); -} - -/** - * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ -static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 dmatxctl; - u32 i; - -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - if (adapter->num_tx_queues > 1) - adapter->netdev->features |= NETIF_F_MULTI_QUEUE; - else - adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE; - -#endif - ixgbe_setup_mtqc(adapter); - - if (hw->mac.type != ixgbe_mac_82598EB) { - /* DMATXCTL.EN must be before Tx queues are enabled */ - dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - dmatxctl |= IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); - } - - /* Setup the HW Tx Head and Tail descriptor pointers */ - for (i = 0; i < adapter->num_tx_queues; i++) - ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); -} - -static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u8 reg_idx = ring->reg_idx; - u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); - - srrctl |= IXGBE_SRRCTL_DROP_EN; - - IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); -} - -static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u8 reg_idx = ring->reg_idx; - u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); - - srrctl &= ~IXGBE_SRRCTL_DROP_EN; - - IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); -} - -void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) -{ - int i; - bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; - -#ifdef HAVE_DCBNL_IEEE - if (adapter->ixgbe_ieee_pfc) - pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); - -#endif - /* - * We should set the drop enable bit if: - * SR-IOV is enabled - * or - * Number of Rx queues > 1 and flow control is disabled - * - * This allows us to avoid head of line blocking for security - * and performance reasons. - */ - if (adapter->num_vfs || (adapter->num_rx_queues > 1 && - !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); - } else { - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); - } -} - -static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 srrctl; - u8 reg_idx = rx_ring->reg_idx; - - if (hw->mac.type == ixgbe_mac_82598EB) { - u16 mask = adapter->ring_feature[RING_F_RSS].mask; - - /* program one srrctl register per VMDq index */ - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) - mask = adapter->ring_feature[RING_F_VMDQ].mask; - - /* - * if VMDq is not active we must program one srrctl register - * per RSS queue since we have enabled RDRXCTL.MVMEN - */ - reg_idx &= mask; - - /* divide by the first bit of the mask to get the indices */ - if (reg_idx) - reg_idx /= ((~mask) + 1) & mask; - } - - /* configure header buffer length, needed for RSC */ - srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; - - /* configure the packet buffer length */ -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#else - if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) - srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else - srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#endif - - /* configure descriptor type */ - srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - - IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); -} - -/** - * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries - * @adapter: device handle - * - * - 82598/82599/X540: 128 - * - X550(non-SRIOV mode): 512 - * - X550(SRIOV mode): 64 - */ -u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) -{ - if (adapter->hw.mac.type < ixgbe_mac_X550) - return 128; - else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - return 64; - else - return 512; -} - -/** - * ixgbe_store_key - Write the RSS key to HW - * @adapter: device handle - * - * Write the RSS key stored in adapter.rss_key to HW. - */ -void ixgbe_store_key(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); -} - -/** - * ixgbe_init_rss_key - Initialize adapter RSS key - * @adapter: device handle - * - * Allocates and initializes the RSS key if it is not allocated. - **/ -static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter) -{ - u32 *rss_key; - - if (!adapter->rss_key) { - rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL); - if (unlikely(!rss_key)) - return -ENOMEM; - - netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE); - adapter->rss_key = rss_key; - } - - return 0; -} - -/** - * ixgbe_store_reta - Write the RETA table to HW - * @adapter: device handle - * - * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. - */ -void ixgbe_store_reta(struct ixgbe_adapter *adapter) -{ - u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); - struct ixgbe_hw *hw = &adapter->hw; - u32 reta = 0; - u32 indices_multi; - u8 *indir_tbl = adapter->rss_indir_tbl; - - /* Fill out the redirection table as follows: - * - 82598: 8 bit wide entries containing pair of 4 bit RSS - * indices. - * - 82599/X540: 8 bit wide entries containing 4 bit RSS index - * - X550: 8 bit wide entries containing 6 bit RSS index - */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - indices_multi = 0x11; - else - indices_multi = 0x1; - - /* Write redirection table to HW */ - for (i = 0; i < reta_entries; i++) { - reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; - if ((i & 3) == 3) { - if (i < 128) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); - else - IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), - reta); - reta = 0; - } - } -} - -/** - * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) - * @adapter: device handle - * - * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. - */ -static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) -{ - u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); - struct ixgbe_hw *hw = &adapter->hw; - u32 vfreta = 0; - unsigned int pf_pool = adapter->num_vfs; - - /* Write redirection table to HW */ - for (i = 0; i < reta_entries; i++) { - vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; - if ((i & 3) == 3) { - IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), - vfreta); - vfreta = 0; - } - } -} - -static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) -{ - u32 i, j; - u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); - u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - - /* Program table for at least 4 queues w/ SR-IOV so that VFs can - * make full use of any rings they may have. We will use the - * PSRTYPE register to control how many rings we use within the PF. - */ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) - rss_i = 4; - - /* Fill out hash function seeds */ - ixgbe_store_key(adapter); - - /* Fill out redirection table */ - memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); - - for (i = 0, j = 0; i < reta_entries; i++, j++) { - if (j == rss_i) - j = 0; - - adapter->rss_indir_tbl[i] = j; - } - - ixgbe_store_reta(adapter); -} - -static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - unsigned int pf_pool = adapter->num_vfs; - int i, j; - - /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), - *(adapter->rss_key + i)); - - /* Fill out the redirection table */ - for (i = 0, j = 0; i < 64; i++, j++) { - if (j == rss_i) - j = 0; - - adapter->rss_indir_tbl[i] = j; - } - - ixgbe_store_vfreta(adapter); -} - - -static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rxcsum; - u32 mrqc = 0, rss_field = 0; - u32 vfmrqc = 0; - - /* Disable indicating checksum in descriptor, enables RSS hash */ - rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); - rxcsum |= IXGBE_RXCSUM_PCSD; - IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - if (adapter->ring_feature[RING_F_RSS].mask) - mrqc = IXGBE_MRQC_RSSEN; - } else { - u8 tcs = netdev_get_num_tc(adapter->netdev); - - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { - if (tcs > 4) - mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ - else if (tcs > 1) - mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ - else if (adapter->ring_feature[RING_F_VMDQ].mask == - IXGBE_82599_VMDQ_4Q_MASK) - mrqc = IXGBE_MRQC_VMDQRSS32EN; - else - mrqc = IXGBE_MRQC_VMDQRSS64EN; - } else { - if (tcs > 4) - mrqc = IXGBE_MRQC_RTRSS8TCEN; - else if (tcs > 1) - mrqc = IXGBE_MRQC_RTRSS4TCEN; - else - mrqc = IXGBE_MRQC_RSSEN; - } - - /* Enable L3/L4 for Tx Switched packets */ - mrqc |= IXGBE_MRQC_L3L4TXSWEN; - } - - /* Perform hash on these packet types */ - rss_field = IXGBE_MRQC_RSS_FIELD_IPV4 | - IXGBE_MRQC_RSS_FIELD_IPV4_TCP | - IXGBE_MRQC_RSS_FIELD_IPV6 | - IXGBE_MRQC_RSS_FIELD_IPV6_TCP; - - if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) - rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; - if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) - rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - - if ((hw->mac.type >= ixgbe_mac_X550) && - (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { - unsigned int pf_pool = adapter->num_vfs; - - /* Enable VF RSS mode */ - mrqc |= IXGBE_MRQC_MULTIPLE_RSS; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); - - /* Setup RSS through the VF registers */ - ixgbe_setup_vfreta(adapter); - vfmrqc = IXGBE_MRQC_RSSEN; - vfmrqc |= rss_field; - IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); - } else { - ixgbe_setup_reta(adapter); - mrqc |= rss_field; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); - } -} - -/** - * ixgbe_clear_rscctl - disable RSC for the indicated ring - * @adapter: address of board private structure - * @ring: structure containing ring specific data - **/ -void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rscctrl; - u8 reg_idx = ring->reg_idx; - - rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); - rscctrl &= ~IXGBE_RSCCTL_RSCEN; - IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); - - clear_ring_rsc_enabled(ring); -} - -/** - * ixgbe_configure_rscctl - enable RSC for the indicated ring - * @adapter: address of board private structure - * @ring: structure containing ring specific data - **/ -void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rscctrl; - u8 reg_idx = ring->reg_idx; - - if (!ring_is_rsc_enabled(ring)) - return; - - rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); - rscctrl |= IXGBE_RSCCTL_RSCEN; - /* - * we must limit the number of descriptors so that the - * total size of max desc * buf_len is not greater - * than 65536 - */ -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -#if (MAX_SKB_FRAGS >= 16) - rscctrl |= IXGBE_RSCCTL_MAXDESC_16; -#elif (MAX_SKB_FRAGS >= 8) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; -#elif (MAX_SKB_FRAGS >= 4) - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; -#else - rscctrl |= IXGBE_RSCCTL_MAXDESC_1; -#endif -#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - if (ring->rx_buf_len <= IXGBE_RXBUFFER_4K) - rscctrl |= IXGBE_RSCCTL_MAXDESC_16; - else if (ring->rx_buf_len <= IXGBE_RXBUFFER_8K) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; - else - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; -#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); -} - -static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - int wait_loop = IXGBE_MAX_RX_DESC_POLL; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - if (IXGBE_REMOVED(hw->hw_addr)) - return; - /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - do { - msleep(1); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); - - if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d " - "not set within the polling period\n", reg_idx); - } -} - -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - int wait_loop = IXGBE_MAX_RX_DESC_POLL; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - if (IXGBE_REMOVED(hw->hw_addr)) - return; - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - rxdctl &= ~IXGBE_RXDCTL_ENABLE; - - /* write value back with RXDCTL.ENABLE bit cleared */ - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); - - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - /* the hardware may take up to 100us to really disable the rx queue */ - do { - udelay(10); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); - - if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " - "the polling period\n", reg_idx); - } -} - -void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - union ixgbe_adv_rx_desc *rx_desc; - u64 rdba = ring->dma; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - /* disable queue to avoid issues while updating state */ - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - ixgbe_disable_rx_queue(adapter, ring); - - IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); - IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), rdba >> 32); - IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), - ring->count * sizeof(union ixgbe_adv_rx_desc)); - /* Force flushing of IXGBE_RDLEN to prevent MDD */ - IXGBE_WRITE_FLUSH(hw); - - /* reset head and tail pointers */ - IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); - ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); - - /* reset ntu and ntc to place SW in sync with hardwdare */ - ring->next_to_clean = 0; - ring->next_to_use = 0; -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - ring->next_to_alloc = 0; -#endif - - ixgbe_configure_srrctl(adapter, ring); - /* In ESX, RSCCTL configuration is done by on demand */ - ixgbe_configure_rscctl(adapter, ring); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - /* - * enable cache line friendly hardware writes: - * PTHRESH=32 descriptors (half the internal cache), - * this also removes ugly rx_no_buffer_count increment - * HTHRESH=4 descriptors (to minimize latency on fetch) - * WTHRESH=8 burst writeback up to two cache lines - */ - rxdctl &= ~0x3FFFFF; - rxdctl |= 0x080420; - break; - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -#if (PAGE_SIZE < 8192) - rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | - IXGBE_RXDCTL_RLPML_EN); - - /* Limit the maximum frame size so we don't overrun the skb */ - if (ring_uses_build_skb(ring) && - !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) - rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | - IXGBE_RXDCTL_RLPML_EN; -#endif -#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - /* If operating in IOV mode set RLPML */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - break; - rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; -#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - break; - default: - break; - } - - /* initialize Rx descriptor 0 */ - rx_desc = IXGBE_RX_DESC(ring, 0); - rx_desc->wb.upper.length = 0; - - /* enable receive descriptor ring */ - rxdctl |= IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); - - ixgbe_rx_desc_queue_enable(adapter, ring); - ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); -} - -static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int rss_i = adapter->ring_feature[RING_F_RSS].indices; - int p; - - /* PSRTYPE must be initialized in non 82598 adapters */ - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_L2HDR | - IXGBE_PSRTYPE_IPV6HDR; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - if (rss_i > 3) - psrtype |= 2 << 29; - else if (rss_i > 1) - psrtype |= 1 << 29; - - for (p = 0; p < adapter->num_rx_pools; p++) - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype); -} - -/** - * ixgbe_configure_bridge_mode - common settings for configuring bridge mode - * @adapter - the private structure - * - * This function's purpose is to remove code duplication and configure some - * settings require to switch bridge modes. - **/ -static void ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw * hw = &adapter->hw; - unsigned int p; - u32 vmdctl; - - if (adapter->flags & IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { - /* disable Tx loopback, rely on switch hairpin mode */ - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, 0); - - /* must enable Rx switching replication to allow multicast - * packet reception on all VFs, and to enable source address - * pruning. - */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); - vmdctl |= IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); - - /* enable Rx source address pruning. Note, this requires - * replication to be enabled or else it does nothing. - */ - for (p = 0; p < (adapter->num_vfs + adapter->num_rx_pools); p++) { - if (hw->mac.ops.set_source_address_pruning) - hw->mac.ops.set_source_address_pruning(hw, - true, - p); - } - } else { - /* enable Tx loopback for internal VF/PF communication */ - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - - /* disable Rx switching replication unless we have SR-IOV - * virtual functions - */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); - if (!adapter->num_vfs) - vmdctl &= ~IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); - - /* disable Rx source address pruning, since we don't expect to - * be receiving external loopback of our transmitted frames. - */ - for (p = 0; p < (adapter->num_vfs + adapter->num_rx_pools); p++) { - if (hw->mac.ops.set_source_address_pruning) - hw->mac.ops.set_source_address_pruning(hw, - false, - p); - } - } - -} - -static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg_offset, vf_shift; - u32 gcr_ext, vmdctl; - int i; - - if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) - return; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); - vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; - IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vmdctl |= IXGBE_VT_CTL_VT_ENABLE; - vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; - vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; - if (adapter->num_vfs) - vmdctl |= IXGBE_VT_CTL_REPLEN; - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - - for (i = 1; i < adapter->num_rx_pools; i++) { - u32 vmolr; - int pool = VMDQ_P(i); - - /* accept untagged packets until a vlan tag is - * specifically set for the VMDQ queue/pool - */ - vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); - vmolr |= IXGBE_VMOLR_AUPE; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); - } - - vf_shift = VMDQ_P(0) % 32; - reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; - - /* Enable only the PF pools for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - - /* clear VLAN promisc flag so VFTA - * will be updated if necessary - */ - adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; - break; - default: - break; - } - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return; - - /* - * Set up VF register offsets for selected VT Mode, - * i.e. 32 or 64 VFs for SR-IOV - */ - switch (adapter->ring_feature[RING_F_VMDQ].mask) { - case IXGBE_82599_VMDQ_8Q_MASK: - gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; - break; - case IXGBE_82599_VMDQ_4Q_MASK: - gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; - break; - default: - gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; - break; - } - - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - - /* configure default bridge settings */ - ixgbe_configure_bridge_mode(adapter); -#if IS_ENABLED(CONFIG_PCI_IOV) - for (i = 0; i < adapter->num_vfs; i++) { - /* configure spoof checking */ - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, - adapter->vfinfo[i].spoofchk_enabled); - -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN - /* Enable/Disable RSS query feature */ - ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, - adapter->vfinfo[i].rss_query_enabled); -#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ - } -#endif /* CONFIG_PCI_IOV */ -} - -static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - struct ixgbe_ring *rx_ring; - int i; - u32 mhadd, hlreg0; -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - int rx_buf_len; -#endif - - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - max_frame += IXGBE_TS_HDR_LEN; - default: - break; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* adjust max frame to be able to do baby jumbo for FCoE */ - if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) - max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; -#endif /* CONFIG_FCOE */ - - /* adjust max frame to be at least the size of a standard frame */ - if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) - max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); - - mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); - if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { - mhadd &= ~IXGBE_MHADD_MFS_MASK; - mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; - - IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); - } - -#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ - max_frame += VLAN_HLEN; - - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && - (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) { - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - /* - * Make best use of allocation by using all but 1K of a - * power of 2 allocation that will be used for skb->head. - */ - } else if (max_frame <= IXGBE_RXBUFFER_3K) { - rx_buf_len = IXGBE_RXBUFFER_3K; - } else if (max_frame <= IXGBE_RXBUFFER_7K) { - rx_buf_len = IXGBE_RXBUFFER_7K; - } else if (max_frame <= IXGBE_RXBUFFER_15K) { - rx_buf_len = IXGBE_RXBUFFER_15K; - } else { - rx_buf_len = IXGBE_MAX_RXBUFFER; - } - -#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - /* set jumbo enable since MHADD.MFS is keeping size locked at - * max_frame - */ - hlreg0 |= IXGBE_HLREG0_JUMBOEN; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring - */ - for (i = 0; i < adapter->num_rx_queues; i++) { - rx_ring = adapter->rx_ring[i]; - - clear_ring_rsc_enabled(rx_ring); - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - set_ring_rsc_enabled(rx_ring); - -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); - clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); -#if IS_ENABLED(CONFIG_FCOE) - - if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) - set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); -#endif -#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC - - if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) - continue; - - set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); - -#if (PAGE_SIZE < 8192) - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); - - if (IXGBE_2K_TOO_SMALL_WITH_PADDING || - (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) - set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); -#endif -#else /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ - - adapter->flags2 |= IXGBE_FLAG2_RX_LEGACY; -#endif /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ -#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - - rx_ring->rx_buf_len = rx_buf_len; -#if IS_ENABLED(CONFIG_FCOE) - - if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state) && - (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)) - rx_ring->rx_buf_len = IXGBE_FCOE_JUMBO_FRAME_SIZE; -#endif /* CONFIG_FCOE */ -#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - } -} - -static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - if (adapter->num_vfs) - rdrxctl |= IXGBE_RDRXCTL_PSP; - /* fall through */ - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - /* Disable RSC for ACK packets */ - IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, - (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); - rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; - /* hardware requires some bits to be set by default */ - rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); - rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; - break; - case ixgbe_mac_82598EB: - /* - * For VMDq support of different descriptor types or - * buffer sizes through the use of multiple SRRCTL - * registers, RDRXCTL.MVMEN must be set to 1 - * - * also, the manual doesn't mention it clearly but DCA hints - * will only use queue 0's tags unless this bit is set. Side - * effects of setting this bit are only that SRRCTL must be - * fully programmed [0..15] - */ - rdrxctl |= IXGBE_RDRXCTL_MVMEN; - break; - default: - /* We should do nothing since we don't know this hardware */ - return; - } - - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); -} - -/** - * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ -static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - u32 rxctrl, rfctl; - - /* disable receives while setting up the descriptors */ - ixgbe_disable_rx(hw); - - ixgbe_setup_psrtype(adapter); - ixgbe_setup_rdrxctl(adapter); - - /* RSC Setup */ - rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); - rfctl &= ~IXGBE_RFCTL_RSC_DIS; - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) - rfctl |= IXGBE_RFCTL_RSC_DIS; - - /* disable NFS filtering */ - rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); - IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); - - /* Program registers for the distribution of queues */ - ixgbe_setup_mrqc(adapter); - - /* set_rx_buffer_len must be called before ring initialization */ - ixgbe_set_rx_buffer_len(adapter); - - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring - */ - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); - - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - /* disable drop enable for 82598 parts */ - if (hw->mac.type == ixgbe_mac_82598EB) - rxctrl |= IXGBE_RXCTRL_DMBYPS; - - /* enable all receives */ - rxctrl |= IXGBE_RXCTRL_RXEN; - hw->mac.ops.enable_rx_dma(hw, rxctrl); -} - -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) -#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID -#ifdef NETIF_F_HW_VLAN_CTAG_TX -static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -#else /* !NETIF_F_HW_VLAN_CTAG_TX */ -static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -#endif /* NETIF_F_HW_VLAN_CTAG_TX */ -#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ -static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int pool_ndx = VMDQ_P(0); - - /* add VID to filter table */ - if (hw->mac.ops.set_vfta) { -#ifndef HAVE_VLAN_RX_REGISTER - if (vid < VLAN_N_VID) - set_bit(vid, adapter->active_vlans); -#endif - - if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - hw->mac.ops.set_vfta(hw, vid, pool_ndx, true, !!vid); - - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED && - adapter->hw.mac.type != ixgbe_mac_82598EB) { - int i; - - /* enable vlan id for all pools */ - for (i = 1; i < adapter->num_rx_pools; i++) - hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), true, -#ifdef HAVE_VLAN_RX_REGISTER - false); -#else - true); -#endif - } - } -#ifndef HAVE_NETDEV_VLAN_FEATURES - - /* - * Copy feature flags from netdev to the vlan netdev for this vid. - * This allows things like TSO to bubble down to our vlan device. - * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so - * we will not have a netdev that needs updating. - */ - if (adapter->vlgrp) { - struct vlan_group *vlgrp = adapter->vlgrp; - struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); - if (v_netdev) { - v_netdev->features |= netdev->features; - vlan_group_set_device(vlgrp, vid, v_netdev); - } - } -#endif /* HAVE_NETDEV_VLAN_FEATURES */ -#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID - return 0; -#endif -} - -#if defined(HAVE_VLAN_RX_REGISTER) && defined(CONFIG_PCI_IOV) -int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) -#else -static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) -#endif -{ - u32 vlvf; - int idx; - - /* short cut the special case */ - if (vlan == 0) - return 0; - - /* Search for the vlan id in the VLVF entries */ - for (idx = IXGBE_VLVF_ENTRIES; --idx;) { - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); - if ((vlvf & VLAN_VID_MASK) == vlan) - break; - } - - return idx; -} - -void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 bits, word; - int idx; - - idx = ixgbe_find_vlvf_entry(hw, vid); - if (!idx) - return; - - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ - word = idx * 2 + (VMDQ_P(0) / 32); - bits = ~(1 << (VMDQ_P(0)) % 32); - bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); - - /* Disable the filter so this falls into the default pool. */ - if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { - if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); - } -} - -#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID -#ifdef NETIF_F_HW_VLAN_CTAG_RX -static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -#else /* !NETIF_F_HW_VLAN_CTAG_RX */ -static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -#else -static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -#endif -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int pool_ndx = VMDQ_P(0); - - /* User is not allowed to remove vlan ID 0 */ - if (!vid) -#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID - return 0; -#else - return; -#endif - -#ifdef HAVE_VLAN_RX_REGISTER - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_disable(adapter); - - vlan_group_set_device(adapter->vlgrp, vid, NULL); - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter, true, true); - -#endif /* HAVE_VLAN_RX_REGISTER */ - /* remove VID from filter table */ - if (hw->mac.ops.set_vfta) { - if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - hw->mac.ops.set_vfta(hw, vid, pool_ndx, false, true); - - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED && - adapter->hw.mac.type != ixgbe_mac_82598EB) { - int i; - - /* remove vlan id from all pools */ - for (i = 1; i < adapter->num_rx_pools; i++) - hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), false, - true); - } - } -#ifndef HAVE_VLAN_RX_REGISTER - - clear_bit(vid, adapter->active_vlans); -#endif -#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID - return 0; -#endif -} - -#ifdef HAVE_8021P_SUPPORT -/** - * ixgbe_vlan_strip_disable - helper to disable vlan tag stripping - * @adapter: driver data - */ -void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl; - int i; - - /* leave vlan tag stripping enabled for DCB */ - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - return; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl &= ~IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - for (i = 0; i < adapter->num_rx_queues; i++) { - u8 reg_idx = adapter->rx_ring[i]->reg_idx; - vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - vlnctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl); - } - break; - default: - break; - } -} - -#endif /* HAVE_8021P_SUPPORT */ -/** - * ixgbe_vlan_strip_enable - helper to enable vlan tag stripping - * @adapter: driver data - */ -void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl; - int i; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl |= IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - for (i = 0; i < adapter->num_rx_queues; i++) { - u8 reg_idx = adapter->rx_ring[i]->reg_idx; - vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - vlnctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl); - } - break; - default: - break; - } -} - -#ifndef HAVE_VLAN_RX_REGISTER -static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl, i; - - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { - /* we need to keep the VLAN filter on in SRIOV */ - vlnctrl |= IXGBE_VLNCTRL_VFE; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - } else { - vlnctrl &= ~IXGBE_VLNCTRL_VFE; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - return; - } - - /* Nothing to do for 82598 */ - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - /* We are already in VLAN promisc, nothing to do */ - if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) - return; - - /* Set flag so we don't redo unnecessary work */ - adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; - - /* Add PF to all active pools */ - for (i = IXGBE_VLVF_ENTRIES; --i;) { - u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); - u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); - - vlvfb |= 1 << (VMDQ_P(0) % 32); - IXGBE_WRITE_REG(hw, reg_offset, vlvfb); - } - - /* Set all bits in the VLAN filter table array */ - for (i = hw->mac.vft_size; i--;) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); -} - -#define VFTA_BLOCK_SIZE 8 -static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; - u32 vid_start = vfta_offset * 32; - u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); - u32 i, vid, word, bits; - - for (i = IXGBE_VLVF_ENTRIES; --i;) { - u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); - - /* pull VLAN ID from VLVF */ - vid = vlvf & VLAN_VID_MASK; - - /* only concern outselves with a certain range */ - if (vid < vid_start || vid >= vid_end) - continue; - - if (vlvf) { - /* record VLAN ID in VFTA */ - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); - - /* if PF is part of this then continue */ - if (test_bit(vid, adapter->active_vlans)) - continue; - } - - /* remove PF from the pool */ - word = i * 2 + VMDQ_P(0) / 32; - bits = ~(1 << (VMDQ_P(0) % 32)); - bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); - } - - /* extract values from active_vlans and write back to VFTA */ - for (i = VFTA_BLOCK_SIZE; i--;) { - vid = (vfta_offset + i) * 32; - word = vid / BITS_PER_LONG; - bits = vid % BITS_PER_LONG; - - vfta[i] |= adapter->active_vlans[word] >> bits; - - IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); - } -} - -static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl, i; - - /* configure vlan filtering */ - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl |= IXGBE_VLNCTRL_VFE; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - - if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || - hw->mac.type == ixgbe_mac_82598EB) - return; - - /* We are not in VLAN promisc, nothing to do */ - if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - return; - - /* Set flag so we don't redo unnecessary work */ - adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; - - for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) - ixgbe_scrub_vfta(adapter, i); -} -#endif /* HAVE_VLAN_RX_REGISTER */ - -#ifdef HAVE_VLAN_RX_REGISTER -static void ixgbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp) -#else -void ixgbe_vlan_mode(struct net_device *netdev, u32 features) -#endif -{ -#if defined(HAVE_VLAN_RX_REGISTER) || defined(HAVE_8021P_SUPPORT) - struct ixgbe_adapter *adapter = netdev_priv(netdev); -#endif -#ifdef HAVE_8021P_SUPPORT - bool enable; -#endif - -#ifdef HAVE_VLAN_RX_REGISTER - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_disable(adapter); - - adapter->vlgrp = grp; - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter, true, true); -#endif -#ifdef HAVE_8021P_SUPPORT -#ifdef HAVE_VLAN_RX_REGISTER - enable = (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED)); -#else -#ifdef NETIF_F_HW_VLAN_CTAG_RX - enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); -#else - enable = !!(features & NETIF_F_HW_VLAN_RX); -#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -#endif /* HAVE_VLAN_RX_REGISTER */ - if (enable) - /* enable VLAN tag insert/strip */ - ixgbe_vlan_strip_enable(adapter); - else - /* disable VLAN tag insert/strip */ - ixgbe_vlan_strip_disable(adapter); - -#endif /* HAVE_8021P_SUPPORT */ -} - -static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) -{ - u16 vid = 1; -#ifdef HAVE_VLAN_RX_REGISTER - - ixgbe_vlan_mode(adapter->netdev, adapter->vlgrp); - - /* - * add vlan ID 0 and enable vlan tag stripping so we - * always accept priority-tagged traffic - */ -#ifdef NETIF_F_HW_VLAN_CTAG_RX - ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); -#else - ixgbe_vlan_rx_add_vid(adapter->netdev, 0); -#endif -#ifndef HAVE_8021P_SUPPORT - ixgbe_vlan_strip_enable(adapter); -#endif - - if (adapter->vlgrp) { - for (; vid < VLAN_N_VID; vid++) { - if (!vlan_group_get_device(adapter->vlgrp, vid)) - continue; -#ifdef NETIF_F_HW_VLAN_CTAG_RX - ixgbe_vlan_rx_add_vid(adapter->netdev, - htons(ETH_P_8021Q), vid); -#else - ixgbe_vlan_rx_add_vid(adapter->netdev, vid); -#endif - } - } -#else /* !HAVE_VLAN_RX_REGISTER */ - -#ifdef NETIF_F_HW_VLAN_CTAG_RX - ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); -#else - ixgbe_vlan_rx_add_vid(adapter->netdev, 0); -#endif - - for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) -#ifdef NETIF_F_HW_VLAN_CTAG_RX - ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); -#else - ixgbe_vlan_rx_add_vid(adapter->netdev, vid); -#endif -#endif /* HAVE_VLAN_RX_REGISTER */ -} - -#endif -static u8 *ixgbe_addr_list_itr(struct ixgbe_hw __maybe_unused *hw, u8 **mc_addr_ptr, u32 *vmdq) -{ -#ifdef NETDEV_HW_ADDR_T_MULTICAST - struct netdev_hw_addr *mc_ptr; -#else - struct dev_mc_list *mc_ptr; -#endif -#ifdef CONFIG_PCI_IOV - struct ixgbe_adapter *adapter = hw->back; -#endif /* CONFIG_PCI_IOV */ - u8 *addr = *mc_addr_ptr; - - /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is - * defined, so we have to wrap the pointer above correctly to prevent - * a warning. - */ - *vmdq = VMDQ_P(0); - -#ifdef NETDEV_HW_ADDR_T_MULTICAST - mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); - if (mc_ptr->list.next) { - struct netdev_hw_addr *ha; - - ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); - *mc_addr_ptr = ha->addr; - } -#else - mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); - if (mc_ptr->next) - *mc_addr_ptr = mc_ptr->next->dmi_addr; -#endif - else - *mc_addr_ptr = NULL; - - return addr; -} - -/** - * ixgbe_write_mc_addr_list - write multicast addresses to MTA - * @netdev: network interface device structure - * - * Writes multicast address list to the MTA hash table. - * Returns: -ENOMEM on failure - * 0 on no addresses written - * X on writing X addresses to MTA - **/ -int ixgbe_write_mc_addr_list(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; -#ifdef NETDEV_HW_ADDR_T_MULTICAST - struct netdev_hw_addr *ha; -#endif - u8 *addr_list = NULL; - int addr_count = 0; - - if (!hw->mac.ops.update_mc_addr_list) - return -ENOMEM; - - if (!netif_running(netdev)) - return 0; - - - if (netdev_mc_empty(netdev)) { - hw->mac.ops.update_mc_addr_list(hw, NULL, 0, - ixgbe_addr_list_itr, true); - } else { -#ifdef NETDEV_HW_ADDR_T_MULTICAST - ha = list_first_entry(&netdev->mc.list, - struct netdev_hw_addr, list); - addr_list = ha->addr; -#else - addr_list = netdev->mc_list->dmi_addr; -#endif - addr_count = netdev_mc_count(netdev); - - hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, - ixgbe_addr_list_itr, true); - } - -#ifdef CONFIG_PCI_IOV - ixgbe_restore_vf_multicasts(adapter); -#endif - return addr_count; -} - -#ifdef CONFIG_PCI_IOV -void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; - - if (mac_table->state & IXGBE_MAC_STATE_IN_USE) - hw->mac.ops.set_rar(hw, i, - mac_table->addr, - mac_table->pool, - IXGBE_RAH_AV); - else - hw->mac.ops.clear_rar(hw, i); - } -} -#endif - -static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) - continue; - - mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; - - if (mac_table->state & IXGBE_MAC_STATE_IN_USE) - hw->mac.ops.set_rar(hw, i, - mac_table->addr, - mac_table->pool, - IXGBE_RAH_AV); - else - hw->mac.ops.clear_rar(hw, i); - } -} - -int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - int i, count = 0; - - for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - /* do not count default RAR as available */ - if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) - continue; - - /* only count unused and addresses that belong to us */ - if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { - if (mac_table->pool != pool) - continue; - } - - count++; - } - - return count; -} - -/* this function destroys the first RAR entry */ -static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - - ether_addr_copy(mac_table->addr, hw->mac.addr); - mac_table->pool = VMDQ_P(0); - - mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; - - hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, - IXGBE_RAH_AV); -} - -int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, - const u8 *addr, u16 pool) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - if (is_zero_ether_addr(addr)) - return -EINVAL; - - for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { - continue; - } - - ether_addr_copy(mac_table->addr, addr); - mac_table->pool = pool; - - mac_table->state |= IXGBE_MAC_STATE_MODIFIED | - IXGBE_MAC_STATE_IN_USE; - - ixgbe_sync_mac_table(adapter); - - return i; - } - - return -ENOMEM; -} - -static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - mac_table->state |= IXGBE_MAC_STATE_MODIFIED; - mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; - } - - ixgbe_sync_mac_table(adapter); -} - -int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, - const u8 *addr, u16 pool) -{ - struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - if (is_zero_ether_addr(addr)) - return -EINVAL; - - /* search table for addr, if found clear IN USE flag and sync */ - for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { - /* we can only delete an entry if it is in use */ - if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) - continue; - /* we only care about entries that belong to the given pool */ - if (mac_table->pool != pool) - continue; - /* we only care about a specific MAC address */ - if (!ether_addr_equal(addr, mac_table->addr)) - continue; - - mac_table->state |= IXGBE_MAC_STATE_MODIFIED; - mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; - - ixgbe_sync_mac_table(adapter); - - return 0; - } - - return -ENOMEM; -} - -#ifdef HAVE_SET_RX_MODE -/** - * ixgbe_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) - return -ENOMEM; - - if (!netdev_uc_empty(netdev)) { -#ifdef NETDEV_HW_ADDR_T_UNICAST - struct netdev_hw_addr *ha; -#else - struct dev_mc_list *ha; -#endif - netdev_for_each_uc_addr(ha, netdev) { -#ifdef NETDEV_HW_ADDR_T_UNICAST - ixgbe_del_mac_filter(adapter, ha->addr, vfn); - ixgbe_add_mac_filter(adapter, ha->addr, vfn); -#else - ixgbe_del_mac_filter(adapter, ha->da_addr, vfn); - ixgbe_add_mac_filter(adapter, ha->da_addr, vfn); -#endif - count++; - } - } - return count; -} - -static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int ret; - - ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); - - return min_t(int, ret, 0); -} - -static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); - - return 0; -} - -#endif -/** - * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set - * @netdev: network interface device structure - * - * The set_rx_method entry point is called whenever the unicast/multicast - * address list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper unicast, multicast and - * promiscuous mode. - **/ -void ixgbe_set_rx_mode(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; -#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) - u32 vlnctrl; -#endif -#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_FILTER) - netdev_features_t features = netdev->features; -#endif - int count; - - /* Check for Promiscuous and All Multicast modes */ - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); -#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); -#endif - - /* set all bits that we expect to always be set */ - fctrl |= IXGBE_FCTRL_BAM; - fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ - fctrl |= IXGBE_FCTRL_PMCF; - - /* clear the bits we are changing the status of */ - fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); -#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) - vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); -#endif - if (netdev->flags & IFF_PROMISC) { - hw->addr_ctrl.user_set_promisc = true; - fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - vmolr |= IXGBE_VMOLR_MPE; -#ifdef HAVE_VLAN_RX_REGISTER - /* Only disable hardware filter vlans in promiscuous mode - * if SR-IOV and VMDQ are disabled - otherwise ensure - * that hardware VLAN filters remain enabled. - */ - if ((adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED))) - vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); -#endif -#ifdef NETIF_F_HW_VLAN_CTAG_FILTER - features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; -#endif -#ifdef NETIF_F_HW_VLAN_FILTER - features &= ~NETIF_F_HW_VLAN_FILTER; -#endif - } else { - if (netdev->flags & IFF_ALLMULTI) { - fctrl |= IXGBE_FCTRL_MPE; - vmolr |= IXGBE_VMOLR_MPE; - } - hw->addr_ctrl.user_set_promisc = false; -#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) - /* enable hardware vlan filtering */ - vlnctrl |= IXGBE_VLNCTRL_VFE; -#endif - } - -#ifdef HAVE_SET_RX_MODE - /* - * Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { - fctrl |= IXGBE_FCTRL_UPE; - vmolr |= IXGBE_VMOLR_ROPE; - } - -#endif - /* - * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ - count = ixgbe_write_mc_addr_list(netdev); - if (count < 0) { - fctrl |= IXGBE_FCTRL_MPE; - vmolr |= IXGBE_VMOLR_MPE; - } else if (count) { - vmolr |= IXGBE_VMOLR_ROMPE; - } - - if (hw->mac.type != ixgbe_mac_82598EB) { - vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & - ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_ROPE); - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); - } - - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - -#ifdef HAVE_8021P_SUPPORT -#ifdef NETIF_F_HW_VLAN_CTAG_RX - if (features & NETIF_F_HW_VLAN_CTAG_RX) -#else - if (features & NETIF_F_HW_VLAN_RX) -#endif - ixgbe_vlan_strip_enable(adapter); - else - ixgbe_vlan_strip_disable(adapter); -#endif /* HAVE_8021P_SUPPORT */ - -#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - ixgbe_vlan_promisc_disable(adapter); - else - ixgbe_vlan_promisc_enable(adapter); -#elif defined(NETIF_F_HW_VLAN_FILTER) && !defined(HAVE_VLAN_RX_REGISTER) - if (features & NETIF_F_HW_VLAN_FILTER) - ixgbe_vlan_promisc_disable(adapter); - else - ixgbe_vlan_promisc_enable(adapter); -#elif defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); -#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */ -} - -static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) -{ - struct ixgbe_q_vector *q_vector; - int q_idx; - - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; -#ifdef HAVE_NDO_BUSY_POLL - ixgbe_qv_init_lock(adapter->q_vector[q_idx]); -#endif - napi_enable(&q_vector->napi); - } -} - -static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) -{ - struct ixgbe_q_vector *q_vector; - int q_idx; - - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; - napi_disable(&q_vector->napi); -#ifdef HAVE_NDO_BUSY_POLL - while(!ixgbe_qv_disable(adapter->q_vector[q_idx])) { - pr_info("QV %d locked\n", q_idx); - usleep_range(1000, 20000); - } -#endif - } -} - -#ifdef HAVE_DCBNL_IEEE -s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) -{ - __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; - __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; - int i; - - /* naively give each TC a bwg to map onto CEE hardware */ - __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; - - /* Map TSA onto CEE prio type */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - switch (ets->tc_tsa[i]) { - case IEEE_8021QAZ_TSA_STRICT: - prio_type[i] = 2; - break; - case IEEE_8021QAZ_TSA_ETS: - prio_type[i] = 0; - break; - default: - /* Hardware only supports priority strict or - * ETS transmission selection algorithms if - * we receive some other value from dcbnl - * throw an error - */ - return -EINVAL; - } - } - - ixgbe_dcb_calculate_tc_credits(ets->tc_tx_bw, refill, max, max_frame); - return ixgbe_dcb_hw_config(hw, refill, max, - bwg_id, prio_type, ets->prio_tc); -} -#endif /* HAVE_DCBNL_IEEE */ - -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) -void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vxlanctrl; - - if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | - IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) - return; - - vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); - - if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) - adapter->vxlan_port = 0; -#ifdef HAVE_UDP_ENC_RX_OFFLOAD - if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) - adapter->geneve_port = 0; -#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ -} -#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ - -#ifdef NETIF_F_GSO_PARTIAL -/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ -#ifndef NETIF_F_GSO_IPXIP4 -#define NETIF_F_GSO_IPXIP4 0 -#endif -#ifndef NETIF_F_GSO_IPXIP6 -#define NETIF_F_GSO_IPXIP6 0 -#endif -#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ - NETIF_F_GSO_GRE_CSUM | \ - NETIF_F_GSO_IPXIP4 | \ - NETIF_F_GSO_IPXIP6 | \ - NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_UDP_TUNNEL_CSUM) -#endif /* NETIF_F_GSO_PARTIAL */ - -static inline unsigned long ixgbe_tso_features(void) -{ - unsigned long features = 0; - -#ifdef NETIF_F_TSO - features |= NETIF_F_TSO; -#endif /* NETIF_F_TSO */ -#ifdef NETIF_F_TSO6 - features |= NETIF_F_TSO6; -#endif /* NETIF_F_TSO6 */ -#ifdef NETIF_F_GSO_PARTIAL - features |= NETIF_F_GSO_PARTIAL | IXGBE_GSO_PARTIAL_FEATURES; -#endif - - return features; -} - -/* - * ixgbe_configure_dcb - Configure DCB hardware support - * @adapter: ixgbe adapter struct - * - * Called when the driver opens or needs to reconfigure DCB related bits. - */ -static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - - /* The following workaround for 82598EB was originaly hidden inside a - * kcompat definition of netif_set_gso_max_size. This workaround is - * necessary as the 82598EB hardware does not support TSO and DCB - * unless the stack TSO maximum segment size can be reduced. Older - * kernels do not support the requisite interface, and thus need TSO - * disabled if we want to support DCB. - */ - if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { - if (hw->mac.type == ixgbe_mac_82598EB) { -#ifdef NETDEV_CAN_SET_GSO_MAX_SIZE - netif_set_gso_max_size(netdev, 65536); -#else - /* We previously disabled TSO, so we should enable it - * now. */ - netdev->features |= ixgbe_tso_features(); -#ifdef NETIF_F_GSO_PARTIAL - netdev->gso_partial_features = - IXGBE_GSO_PARTIAL_FEATURES; -#endif -#endif /* NETDEV_CAN_SET_GSO_MAX_SIZE */ - } - return; - } - - if (hw->mac.type == ixgbe_mac_82598EB) { -#ifdef NETDEV_CAN_SET_GSO_MAX_SIZE - netif_set_gso_max_size(netdev, 32768); -#else - /* Simply disable TSO since we cannot change the maximum - * segment size. */ - netdev->features &= ~ixgbe_tso_features(); -#ifdef NETIF_F_GSO_PARTIAL - netdev->gso_partial_features = 0; -#endif -#endif /* NETDEV_CAN_SET_GSO_MAX_SIZE */ - } - -#if IS_ENABLED(CONFIG_FCOE) - if (netdev->features & NETIF_F_FCOE_MTU) - max_frame = max_t(int, max_frame, - IXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif /* CONFIG_FCOE */ - -#ifdef HAVE_DCBNL_IEEE - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { - if (adapter->ixgbe_ieee_ets) - ixgbe_dcb_hw_ets(&adapter->hw, - adapter->ixgbe_ieee_ets, - max_frame); - - if (adapter->ixgbe_ieee_pfc && adapter->ixgbe_ieee_ets) { - struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc; - u8 *tc = adapter->ixgbe_ieee_ets->prio_tc; - - ixgbe_dcb_config_pfc(&adapter->hw, pfc->pfc_en, tc); - } - } else -#endif /* HAVE_DCBNL_IEEE */ - { - ixgbe_dcb_calculate_tc_credits_cee(hw, - &adapter->dcb_cfg, - max_frame, - IXGBE_DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits_cee(hw, - &adapter->dcb_cfg, - max_frame, - IXGBE_DCB_RX_CONFIG); - ixgbe_dcb_hw_config_cee(hw, &adapter->dcb_cfg); - } - - /* Enable RSS Hash per TC */ - if (hw->mac.type != ixgbe_mac_82598EB) { - u32 msb = 0; - u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; - - while (rss_i) { - msb++; - rss_i >>= 1; - } - - /* write msb to all 8 TCs in one write */ - IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); - } -} - -#ifndef IXGBE_NO_LLI -static void ixgbe_configure_lli_82599(struct ixgbe_adapter *adapter) -{ - u16 port; - - if (adapter->lli_etype) { - IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), - (IXGBE_IMIR_LLI_EN_82599 | - IXGBE_IMIR_SIZE_BP_82599 | - IXGBE_IMIR_CTRL_BP_82599)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQS(0), IXGBE_ETQS_LLI); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0), - (adapter->lli_etype | IXGBE_ETQF_FILTER_EN)); - } - - if (adapter->lli_port) { - port = swab16(adapter->lli_port); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), - (IXGBE_IMIR_LLI_EN_82599 | - IXGBE_IMIR_SIZE_BP_82599 | - IXGBE_IMIR_CTRL_BP_82599)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), - (IXGBE_FTQF_POOL_MASK_EN | - (IXGBE_FTQF_PRIORITY_MASK << - IXGBE_FTQF_PRIORITY_SHIFT) | - (IXGBE_FTQF_DEST_PORT_MASK << - IXGBE_FTQF_5TUPLE_MASK_SHIFT))); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_SDPQF(0), (port << 16)); - } - - if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), - (IXGBE_IMIR_LLI_EN_82599 | - IXGBE_IMIR_SIZE_BP_82599 | - IXGBE_IMIR_CTRL_PSH_82599 | - IXGBE_IMIR_CTRL_SYN_82599 | - IXGBE_IMIR_CTRL_URG_82599 | - IXGBE_IMIR_CTRL_ACK_82599 | - IXGBE_IMIR_CTRL_RST_82599 | - IXGBE_IMIR_CTRL_FIN_82599)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, - 0xfc000000); - break; - case ixgbe_mac_X540: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), - (IXGBE_IMIR_LLI_EN_82599 | - IXGBE_IMIR_SIZE_BP_82599 | - IXGBE_IMIR_CTRL_PSH_82599)); - break; - default: - break; - } - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), - (IXGBE_FTQF_POOL_MASK_EN | - (IXGBE_FTQF_PRIORITY_MASK << - IXGBE_FTQF_PRIORITY_SHIFT) | - (IXGBE_FTQF_5TUPLE_MASK_MASK << - IXGBE_FTQF_5TUPLE_MASK_SHIFT))); - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYNQF, 0x80000100); - } - - if (adapter->lli_size) { - IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), - (IXGBE_IMIR_LLI_EN_82599 | - IXGBE_IMIR_CTRL_BP_82599)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, - adapter->lli_size); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), - (IXGBE_FTQF_POOL_MASK_EN | - (IXGBE_FTQF_PRIORITY_MASK << - IXGBE_FTQF_PRIORITY_SHIFT) | - (IXGBE_FTQF_5TUPLE_MASK_MASK << - IXGBE_FTQF_5TUPLE_MASK_SHIFT))); - } - - if (adapter->lli_vlan_pri) { - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIRVP, - (IXGBE_IMIRVP_PRIORITY_EN | - adapter->lli_vlan_pri)); - } -} - -static void ixgbe_configure_lli(struct ixgbe_adapter *adapter) -{ - u16 port; - - /* lli should only be enabled with MSI-X and MSI */ - if (!(adapter->flags & IXGBE_FLAG_MSI_ENABLED) && - !(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - return; - /* LLI not supported on X550 and X550EM_x*/ - if ((adapter->hw.mac.type == ixgbe_mac_X550) || - (adapter->hw.mac.type == ixgbe_mac_X550EM_x)) - return; - /* LLI not supported on X550EM_a */ - if (adapter->hw.mac.type == ixgbe_mac_X550EM_a) - return; - if (adapter->hw.mac.type != ixgbe_mac_82598EB) { - ixgbe_configure_lli_82599(adapter); - return; - } - - if (adapter->lli_port) { - /* use filter 0 for port */ - port = swab16(adapter->lli_port); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(0), - (port | IXGBE_IMIR_PORT_IM_EN)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(0), - (IXGBE_IMIREXT_SIZE_BP | - IXGBE_IMIREXT_CTRL_BP)); - } - - if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { - /* use filter 1 for push flag */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(1), - (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(1), - (IXGBE_IMIREXT_SIZE_BP | - IXGBE_IMIREXT_CTRL_PSH)); - } - - if (adapter->lli_size) { - /* use filter 2 for size */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(2), - (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(2), - (adapter->lli_size | IXGBE_IMIREXT_CTRL_BP)); - } -} - -#endif /* IXGBE_NO_LLI */ -/* Additional bittime to account for IXGBE framing */ -#define IXGBE_ETH_FRAMING 20 - -/* - * ixgbe_hpbthresh - calculate high water mark for flow control - * - * @adapter: board private structure to calculate for - * @pb - packet buffer to calculate - */ -static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *dev = adapter->netdev; - int link, tc, kb, marker; - u32 dv_id, rx_pba; - - /* Calculate max LAN frame size */ - tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; - -#if IS_ENABLED(CONFIG_FCOE) - /* FCoE traffic class uses FCOE jumbo frames */ - if ((dev->features & NETIF_F_FCOE_MTU) && - (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && - (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) - tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; -#endif /* CONFIG_FCOE */ - - /* Calculate delay value for device */ - switch (hw->mac.type) { - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - dv_id = IXGBE_DV_X540(link, tc); - break; - default: - dv_id = IXGBE_DV(link, tc); - break; - } - - /* Loopback switch introduces additional latency */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - dv_id += IXGBE_B2BT(tc); - - /* Delay value is calculated in bit times convert to KB */ - kb = IXGBE_BT2KB(dv_id); - rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; - - marker = rx_pba - kb; - - /* It is possible that the packet buffer is not large enough - * to provide required headroom. In this case throw an error - * to user and a do the best we can. - */ - if (marker < 0) { - e_warn(drv, "Packet Buffer(%i) can not provide enough" - "headroom to suppport flow control." - "Decrease MTU or number of traffic classes\n", pb); - marker = tc + 1; - } - - return marker; -} - -/* - * ixgbe_lpbthresh - calculate low water mark for for flow control - * - * @adapter: board private structure to calculate for - * @pb - packet buffer to calculate - */ -static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int __maybe_unused pb) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *dev = adapter->netdev; - int tc; - u32 dv_id; - - /* Calculate max LAN frame size */ - tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - -#if IS_ENABLED(CONFIG_FCOE) - /* FCoE traffic class uses FCOE jumbo frames */ - if ((dev->features & NETIF_F_FCOE_MTU) && - (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && - (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) - tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; -#endif /* CONFIG_FCOE */ - - /* Calculate delay value for device */ - switch (hw->mac.type) { - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - dv_id = IXGBE_LOW_DV_X540(tc); - break; - default: - dv_id = IXGBE_LOW_DV(tc); - break; - } - - /* Delay value is calculated in bit times convert to KB */ - return IXGBE_BT2KB(dv_id); -} - -/* - * ixgbe_pbthresh_setup - calculate and setup high low water marks - */ -static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int num_tc = netdev_get_num_tc(adapter->netdev); - int i; - - if (!num_tc) - num_tc = 1; - - - for (i = 0; i < num_tc; i++) { - hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); - hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); - - /* Low water marks must not be larger than high water marks */ - if (hw->fc.low_water[i] > hw->fc.high_water[i]) - hw->fc.low_water[i] = 0; - } - - for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) - hw->fc.high_water[i] = 0; -} - -static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int hdrm; - u8 tc = netdev_get_num_tc(adapter->netdev); - - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || - adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) - hdrm = 32 << adapter->fdir_pballoc; - else - hdrm = 0; - - hw->mac.ops.setup_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); - ixgbe_pbthresh_setup(adapter); -} - -static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct hlist_node *node2; - struct ixgbe_fdir_filter *filter; - - spin_lock(&adapter->fdir_perfect_lock); - - if (!hlist_empty(&adapter->fdir_filter_list)) - ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask, - adapter->cloud_mode); - - hlist_for_each_entry_safe(filter, node2, - &adapter->fdir_filter_list, fdir_node) { - ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - (filter->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->reg_idx, - adapter->cloud_mode); - } - - spin_unlock(&adapter->fdir_perfect_lock); -} - -static void ixgbe_configure(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - ixgbe_configure_pb(adapter); - ixgbe_configure_dcb(adapter); - - /* - * We must restore virtualization before VLANs or else - * the VLVF registers will not be populated - */ - ixgbe_configure_virtualization(adapter); - - ixgbe_set_rx_mode(adapter->netdev); -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) - ixgbe_restore_vlan(adapter); -#endif - - if (adapter->hw.mac.type == ixgbe_mac_82599EB || - adapter->hw.mac.type == ixgbe_mac_X540) - hw->mac.ops.disable_sec_rx_path(hw); - - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - ixgbe_init_fdir_signature_82599(&adapter->hw, - adapter->fdir_pballoc); - } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { - ixgbe_init_fdir_perfect_82599(&adapter->hw, - adapter->fdir_pballoc, adapter->cloud_mode); - ixgbe_fdir_filter_restore(adapter); - } - - if (adapter->hw.mac.type == ixgbe_mac_82599EB || - adapter->hw.mac.type == ixgbe_mac_X540) - hw->mac.ops.enable_sec_rx_path(hw); - - /* Enable EEE only when supported and enabled */ - if (hw->mac.ops.setup_eee && - (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) { - bool eee_enable = !!(adapter->flags2 & IXGBE_FLAG2_EEE_ENABLED); - - hw->mac.ops.setup_eee(hw, eee_enable); - } - -#if IS_ENABLED(CONFIG_DCA) - /* configure DCA */ - if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) - ixgbe_setup_dca(adapter); -#endif - -#if IS_ENABLED(CONFIG_FCOE) - /* configure FCoE L2 filters, redirection table, and Rx control */ - ixgbe_configure_fcoe(adapter); -#endif /* CONFIG_FCOE */ - - ixgbe_configure_tx(adapter); - ixgbe_configure_rx(adapter); -} - -static bool ixgbe_is_sfp(struct ixgbe_hw *hw) -{ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - if (hw->phy.type == ixgbe_phy_nl) - return true; - return false; - case ixgbe_mac_82599EB: - switch (hw->mac.ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - case ixgbe_media_type_fiber_qsfp: - return true; - default: - return false; - } - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) - return true; - return false; - default: - return false; - } -} - -/** - * ixgbe_sfp_link_config - set up SFP+ link - * @adapter: pointer to private adapter struct - **/ -static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) -{ - /* - * We are assuming the worst case scenerio here, and that - * is that an SFP was inserted/removed after the reset - * but before SFP detection was enabled. As such the best - * solution is to just start searching as soon as we start - */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; - - adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; - adapter->sfp_poll_time = 0; -} - -/** - * ixgbe_non_sfp_link_config - set up non-SFP+ link - * @hw: pointer to private hardware struct - * - * Returns 0 on success, negative on failure - **/ -static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) -{ - u32 speed; - bool autoneg, link_up = false; - u32 ret = IXGBE_ERR_LINK_SETUP; - - if (hw->mac.ops.check_link) - ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); - - if (ret) - goto link_cfg_out; - - speed = hw->phy.autoneg_advertised; - if ((!speed) && (hw->mac.ops.get_link_capabilities)) - ret = hw->mac.ops.get_link_capabilities(hw, &speed, - &autoneg); - if (ret) - goto link_cfg_out; - - if (hw->mac.ops.setup_link) - ret = hw->mac.ops.setup_link(hw, speed, link_up); -link_cfg_out: - return ret; -} - -/** - * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset - * @adapter: board private structure - * - * On a reset we need to clear out the VF stats or accounting gets - * messed up because they're not clear on read. - **/ -static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - - for (i = 0; i < adapter->num_vfs; i++) { - adapter->vfinfo[i].last_vfstats.gprc = - IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i)); - adapter->vfinfo[i].saved_rst_vfstats.gprc += - adapter->vfinfo[i].vfstats.gprc; - adapter->vfinfo[i].vfstats.gprc = 0; - adapter->vfinfo[i].last_vfstats.gptc = - IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i)); - adapter->vfinfo[i].saved_rst_vfstats.gptc += - adapter->vfinfo[i].vfstats.gptc; - adapter->vfinfo[i].vfstats.gptc = 0; - adapter->vfinfo[i].last_vfstats.gorc = - IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i)); - adapter->vfinfo[i].saved_rst_vfstats.gorc += - adapter->vfinfo[i].vfstats.gorc; - adapter->vfinfo[i].vfstats.gorc = 0; - adapter->vfinfo[i].last_vfstats.gotc = - IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i)); - adapter->vfinfo[i].saved_rst_vfstats.gotc += - adapter->vfinfo[i].vfstats.gotc; - adapter->vfinfo[i].vfstats.gotc = 0; - adapter->vfinfo[i].last_vfstats.mprc = - IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i)); - adapter->vfinfo[i].saved_rst_vfstats.mprc += - adapter->vfinfo[i].vfstats.mprc; - adapter->vfinfo[i].vfstats.mprc = 0; - } -} - -static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 gpie = 0; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | - IXGBE_GPIE_OCD; - gpie |= IXGBE_GPIE_EIAME; - /* - * use EIAM to auto-mask when MSI-X interrupt is asserted - * this saves a register write for every interrupt - */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - default: - IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); - break; - } - } else { - /* legacy interrupts, use EIAM to auto-mask when reading EICR, - * specifically only auto mask tx and rx interrupts */ - IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); - } - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - gpie &= ~IXGBE_GPIE_VTMODE_MASK; - - switch (adapter->ring_feature[RING_F_VMDQ].mask) { - case IXGBE_82599_VMDQ_8Q_MASK: - gpie |= IXGBE_GPIE_VTMODE_16; - break; - case IXGBE_82599_VMDQ_4Q_MASK: - gpie |= IXGBE_GPIE_VTMODE_32; - break; - default: - gpie |= IXGBE_GPIE_VTMODE_64; - break; - } - } - - /* Enable Thermal over heat sensor interrupt */ - if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - gpie |= IXGBE_SDP0_GPIEN; - break; - default: - break; - } - - /* Enable fan failure interrupt */ - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - gpie |= IXGBE_SDP1_GPIEN; - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; - break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - gpie |= IXGBE_SDP0_GPIEN_X540; - break; - default: - break; - } - - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); -} - -static void ixgbe_up_complete(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err; - u32 ctrl_ext; - - ixgbe_get_hw_control(adapter); - ixgbe_setup_gpie(adapter); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - ixgbe_configure_msix(adapter); - else - ixgbe_configure_msi_and_legacy(adapter); - - /* enable the optics for 82599 SFP+ fiber */ - if (hw->mac.ops.enable_tx_laser) - hw->mac.ops.enable_tx_laser(hw); - ixgbe_set_phy_power(hw, true); - - smp_mb__before_atomic(); - clear_bit(__IXGBE_DOWN, &adapter->state); - ixgbe_napi_enable_all(adapter); -#ifndef IXGBE_NO_LLI - ixgbe_configure_lli(adapter); -#endif - - if (ixgbe_is_sfp(hw)) { - ixgbe_sfp_link_config(adapter); - } else if (!hw->phy.reset_disable) { - err = ixgbe_non_sfp_link_config(hw); - if (err) - e_err(probe, "link_config FAILED %d\n", err); - } - - /* clear any pending interrupts, may auto mask */ - IXGBE_READ_REG(hw, IXGBE_EICR); - ixgbe_irq_enable(adapter, true, true); - - /* - * If this adapter has a fan, check to see if we had a failure - * before we enabled the interrupt. - */ - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { - u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (esdp & IXGBE_ESDP_SDP1) - e_crit(drv, "Fan has stopped, replace the adapter\n"); - } - - /* enable transmits */ - netif_tx_start_all_queues(adapter->netdev); - - /* bring the link up in the watchdog, this could race with our first - * link up interrupt but shouldn't be a problem */ - adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - adapter->link_check_timeout = jiffies; - mod_timer(&adapter->service_timer, jiffies); - - ixgbe_clear_vf_stats_counters(adapter); - /* Set PF Reset Done bit so PF/VF Mail Ops can work */ - ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); - ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; - IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); -} - -void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) -{ - WARN_ON(in_interrupt()); - /* put off any impending NetWatchDogTimeout */ -#ifdef HAVE_NETIF_TRANS_UPDATE - netif_trans_update(adapter->netdev); -#else - adapter->netdev->trans_start = jiffies; -#endif - - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - if (adapter->hw.phy.type == ixgbe_phy_fw) - ixgbe_watchdog_link_is_down(adapter); - ixgbe_down(adapter); - /* - * If SR-IOV enabled then wait a bit before bringing the adapter - * back up to give the VFs time to respond to the reset. The - * two second wait is based upon the watchdog timer cycle in - * the VF driver. - */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - msleep(2000); - ixgbe_up(adapter); - clear_bit(__IXGBE_RESETTING, &adapter->state); -} - -void ixgbe_up(struct ixgbe_adapter *adapter) -{ - /* hardware has been reset, we need to reload some things */ - ixgbe_configure(adapter); - - ixgbe_up_complete(adapter); -} - -void ixgbe_reset(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; -#ifdef HAVE_SET_RX_MODE - struct net_device *netdev = adapter->netdev; -#endif - int err; - - if (IXGBE_REMOVED(hw->hw_addr)) - return; - /* lock SFP init bit to prevent race conditions with the watchdog */ - while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - usleep_range(1000, 2000); - - /* clear all SFP and link config related flags while holding SFP_INIT */ - adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | - IXGBE_FLAG2_SFP_NEEDS_RESET); - adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; - - err = hw->mac.ops.init_hw(hw); - switch (err) { - case IXGBE_SUCCESS: - case IXGBE_ERR_SFP_NOT_PRESENT: - case IXGBE_ERR_SFP_NOT_SUPPORTED: - break; - case IXGBE_ERR_MASTER_REQUESTS_PENDING: - e_dev_err("master disable timed out\n"); - break; - case IXGBE_ERR_EEPROM_VERSION: - /* We are running on a pre-production device, log a warning */ - e_dev_warn("This device is a pre-production adapter/LOM. " - "Please be aware there may be issues associated " - "with your hardware. If you are experiencing " - "problems please contact your Intel or hardware " - "representative who provided you with this " - "hardware.\n"); - break; - case IXGBE_ERR_OVERTEMP: - e_crit(drv, "%s\n", ixgbe_overheat_msg); - break; - default: - e_dev_err("Hardware Error: %d\n", err); - } - - clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - - /* flush entries out of MAC table */ - ixgbe_flush_sw_mac_table(adapter); -#ifdef HAVE_SET_RX_MODE - __dev_uc_unsync(netdev, NULL); -#endif - - /* do not flush user set addresses */ - ixgbe_mac_set_default_filter(adapter); - - /* update SAN MAC vmdq pool selection */ - if (hw->mac.san_mac_rar_index) - hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); - - /* Clear saved DMA coalescing values except for watchdog_timer */ - hw->mac.dmac_config.fcoe_en = false; - hw->mac.dmac_config.link_speed = 0; - hw->mac.dmac_config.fcoe_tc = 0; - hw->mac.dmac_config.num_tcs = 0; - -#ifdef HAVE_PTP_1588_CLOCK - if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) - ixgbe_ptp_reset(adapter); -#endif - - if (!netif_running(adapter->netdev) && !adapter->wol) - ixgbe_set_phy_power(hw, false); - else - ixgbe_set_phy_power(hw, true); -} - -/** - * ixgbe_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from - **/ -void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - - if (rx_buffer->skb) { - struct sk_buff *skb = rx_buffer->skb; -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - if (IXGBE_CB(skb)->page_released) - dma_unmap_page_attrs(dev, - IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); -#else - /* We need to clean up RSC frag lists */ - skb = ixgbe_merge_active_tail(skb); - if (ixgbe_close_active_frag_list(skb)) - dma_unmap_single(dev, - IXGBE_CB(skb)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - IXGBE_CB(skb)->dma = 0; -#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - dev_kfree_skb(skb); - rx_buffer->skb = NULL; - } - -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - if (!rx_buffer->page) - continue; - - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - dma_unmap_page_attrs(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - - rx_buffer->page = NULL; -#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - if (!rx_buffer->dma) - continue; - - dma_unmap_single(dev, - rx_buffer->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer->dma = 0; -#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ - } - - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT - - rx_ring->next_to_alloc = 0; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -#endif -} - -/** - * ixgbe_clean_tx_ring - Free Tx Buffers - * @tx_ring: ring to be cleaned - **/ -static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) -{ - struct ixgbe_tx_buffer *tx_buffer_info; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_buffer_info) - return; - - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - } - - netdev_tx_reset_queue(txring_txq(tx_ring)); - - size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); -} - -/** - * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues - * @adapter: board private structure - **/ -static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_clean_rx_ring(adapter->rx_ring[i]); -} - -/** - * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues - * @adapter: board private structure - **/ -static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - ixgbe_clean_tx_ring(adapter->tx_ring[i]); -} - -static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) -{ - struct hlist_node *node2; - struct ixgbe_fdir_filter *filter; - - spin_lock(&adapter->fdir_perfect_lock); - - hlist_for_each_entry_safe(filter, node2, - &adapter->fdir_filter_list, fdir_node) { - hlist_del(&filter->fdir_node); - kfree(filter); - } - adapter->fdir_filter_count = 0; - - spin_unlock(&adapter->fdir_perfect_lock); -} - -void ixgbe_down(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - /* signal that we are down to the interrupt handler */ - if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) - return; /* do nothing if already down */ - - /* disable receives */ - ixgbe_disable_rx(hw); - - /* disable all enabled rx queues */ - for (i = 0; i < adapter->num_rx_queues; i++) - /* this call also flushes the previous write */ - ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); - - usleep_range(10000, 20000); - - netif_tx_stop_all_queues(netdev); - - /* call carrier off first to avoid false dev_watchdog timeouts */ - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - ixgbe_irq_disable(adapter); - - ixgbe_napi_disable_all(adapter); - - adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT); - clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; - - del_timer_sync(&adapter->service_timer); - - if (adapter->num_vfs) { - /* Clear EITR Select mapping */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); - - /* Mark all the VFs as inactive */ - for (i = 0 ; i < adapter->num_vfs; i++) - adapter->vfinfo[i].clear_to_send = 0; - - /* ping all the active vfs to let them know we are going down */ - ixgbe_ping_all_vfs(adapter); - - /* Disable all VFTE/VFRE TX/RX */ - ixgbe_disable_tx_rx(adapter); - } - - /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < adapter->num_tx_queues; i++) { - u8 reg_idx = adapter->tx_ring[i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - } - - /* Disable the Tx DMA engine on 82599 and X540 */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, - (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & - ~IXGBE_DMATXCTL_TE)); - break; - default: - break; - } - -#ifdef HAVE_PCI_ERS - if (!pci_channel_offline(adapter->pdev)) -#endif - ixgbe_reset(adapter); - - /* power down the optics for 82599 SFP+ fiber */ - if (hw->mac.ops.disable_tx_laser) - hw->mac.ops.disable_tx_laser(hw); - - ixgbe_clean_all_tx_rings(adapter); - ixgbe_clean_all_rx_rings(adapter); -} - -/** - * ixgbe_eee_capable - helper function to determine EEE support on X550 - * - **/ -static inline void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - if (!hw->phy.eee_speeds_supported) - break; - adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; - if (!hw->phy.eee_speeds_advertised) - break; - adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; - break; - default: - adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; - adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; - break; - } -} - -#if IS_ENABLED(CONFIG_DCB) -static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) -{ - struct ixgbe_dcb_tc_config *tc; - int j, bwg_pct; - - /* Configure DCB traffic classes */ - bwg_pct = 100 / adapter->dcb_cfg.num_tcs.pg_tcs; - for (j = 0; j < adapter->dcb_cfg.num_tcs.pg_tcs; j++) { - tc = &adapter->dcb_cfg.tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0; - tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; - tc->pfc = ixgbe_dcb_pfc_disabled; - } - - /* reset back to TC 0 */ - tc = &adapter->dcb_cfg.tc_config[0]; - - /* total of all TCs bandwidth needs to be 100 */ - bwg_pct += 100 % adapter->dcb_cfg.num_tcs.pg_tcs; - tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; - - /* Initialize default user to priority mapping, UPx->TC0 */ - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - - adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100; - adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100; - adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal; - adapter->dcb_cfg.pfc_mode_enable = false; - adapter->dcb_cfg.round_robin_enable = false; - adapter->dcb_set_bitmap = 0x00; - if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) - adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; - memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, - sizeof(adapter->temp_dcb_cfg)); -} -#endif /*CONFIG_DCB*/ - -/** - * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) - * @adapter: board private structure to initialize - * - * ixgbe_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - int err; - unsigned int fdir; - u32 fwsm; - u16 device_caps; - - /* PCI config space info */ - - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); - if (hw->revision_id == IXGBE_FAILED_READ_CFG_BYTE && - ixgbe_check_cfg_remove(hw, pdev)) { - e_err(probe, "read of revision id failed\n"); - err = -ENODEV; - goto out; - } - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - - err = ixgbe_init_shared_code(hw); - if (err) { - e_err(probe, "init_shared_code failed: %d\n", err); - goto out; - } - adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * - hw->mac.num_rar_entries, - GFP_ATOMIC); - if (!adapter->mac_table) { - err = IXGBE_ERR_OUT_OF_MEM; - e_err(probe, "mac_table allocation failed: %d\n", err); - goto out; - } - - if (ixgbe_init_rss_key(adapter)) { - err = IXGBE_ERR_OUT_OF_MEM; - e_err(probe, "rss_key allocation failed: %d\n", err); - goto out; - } - - /* Set common capability flags and settings */ -#if IS_ENABLED(CONFIG_DCA) - adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; -#endif -#if IS_ENABLED(CONFIG_DCB) - adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; -#endif -#if IS_ENABLED(CONFIG_FCOE) - adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; -#if IS_ENABLED(CONFIG_DCB) - /* Default traffic class to use for FCoE */ - adapter->fcoe.up = IXGBE_FCOE_DEFUP; - adapter->fcoe.up_set = IXGBE_FCOE_DEFUP; -#endif /* CONFIG_DCB */ -#endif /* CONFIG_FCOE */ - adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); - adapter->ring_feature[RING_F_FDIR].limit = fdir; - adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599; - - /* Set MAC specific capability flags and exceptions */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - adapter->flags |= IXGBE_FLAGS_82598_INIT; - adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; - - if (hw->device_id == IXGBE_DEV_ID_82598AT) - adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; - - adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598; - adapter->ring_feature[RING_F_FDIR].limit = 0; -#if IS_ENABLED(CONFIG_FCOE) - adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; -#if IS_ENABLED(CONFIG_DCB) - adapter->fcoe.up = 0; - adapter->fcoe.up_set = 0; -#endif /* IXGBE_DCB */ -#endif /* CONFIG_FCOE */ - break; - case ixgbe_mac_82599EB: - adapter->flags |= IXGBE_FLAGS_82599_INIT; - if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; -#ifndef IXGBE_NO_SMART_SPEED - hw->phy.smart_speed = ixgbe_smart_speed_on; -#else - hw->phy.smart_speed = ixgbe_smart_speed_off; -#endif - break; - case ixgbe_mac_X540: - adapter->flags |= IXGBE_FLAGS_X540_INIT; - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); - if (fwsm & IXGBE_FWSM_TS_ENABLED) - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - break; - case ixgbe_mac_X550EM_a: - adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - break; - default: - break; - } - /* fall through */ - case ixgbe_mac_X550EM_x: -#if IS_ENABLED(CONFIG_DCB) - adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; -#endif -#if IS_ENABLED(CONFIG_FCOE) - adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; -#if IS_ENABLED(CONFIG_DCB) - adapter->fcoe.up = 0; - adapter->fcoe.up_set = 0; -#endif /* CONFIG_DCB */ -#endif /* CONFIG_FCOE */ - /* fall through */ - case ixgbe_mac_X550: - if (hw->mac.type == ixgbe_mac_X550) - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - ixgbe_set_eee_capable(adapter); - adapter->flags |= IXGBE_FLAGS_X550_INIT; -#if IS_ENABLED(CONFIG_DCA) - adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; -#endif /* CONFIG_DCA */ - default: - break; - } - -#if IS_ENABLED(CONFIG_FCOE) - /* FCoE support exists, always init the FCoE lock */ - spin_lock_init(&adapter->fcoe.lock); -#endif /* CONFIG_FCOE */ - - /* n-tuple support exists, always init our spinlock */ - spin_lock_init(&adapter->fdir_perfect_lock); - -#if IS_ENABLED(CONFIG_DCB) - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - case ixgbe_mac_82599EB: - adapter->dcb_cfg.num_tcs.pg_tcs = 8; - adapter->dcb_cfg.num_tcs.pfc_tcs = 8; - break; - case ixgbe_mac_X540: - case ixgbe_mac_X550: - adapter->dcb_cfg.num_tcs.pg_tcs = 4; - adapter->dcb_cfg.num_tcs.pfc_tcs = 4; - break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - default: - adapter->dcb_cfg.num_tcs.pg_tcs = 1; - adapter->dcb_cfg.num_tcs.pfc_tcs = 1; - break; - } - ixgbe_init_dcb(adapter); - -#endif /* CONFIG_DCB */ - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a || - hw->mac.type == ixgbe_mac_X540) - hw->mbx.ops.init_params(hw); - - /* default flow control settings */ - hw->fc.requested_mode = ixgbe_fc_full; - hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ - - adapter->last_lfc_mode = hw->fc.current_mode; - ixgbe_pbthresh_setup(adapter); - hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; - hw->fc.send_xon = true; - hw->fc.disable_fc_autoneg = false; - - /* set default ring sizes */ - adapter->tx_ring_count = IXGBE_DEFAULT_TXD; - adapter->rx_ring_count = IXGBE_DEFAULT_RXD; - - /* set default work limits */ - adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; - adapter->rx_work_limit = IXGBE_DEFAULT_RX_WORK; - - /* Cache bit indicating need for crosstalk fix */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - hw->mac.ops.get_device_caps(hw, &device_caps); - if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) - adapter->need_crosstalk_fix = false; - else - adapter->need_crosstalk_fix = true; - break; - default: - adapter->need_crosstalk_fix = false; - break; - } - set_bit(__IXGBE_DOWN, &adapter->state); -out: - return err; -} - -/** - * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: tx descriptor ring (for a specific queue) to setup - * - * Return 0 on success, negative on failure - **/ -int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) -{ - struct device *dev = tx_ring->dev; - int orig_node = dev_to_node(dev); - int numa_node = -1; - int size; - - size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - - if (tx_ring->q_vector) - numa_node = tx_ring->q_vector->numa_node; - - tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); - if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = vzalloc(size); - if (!tx_ring->tx_buffer_info) - goto err; - - /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - - set_dev_node(dev, numa_node); - tx_ring->desc = dma_alloc_coherent(dev, - tx_ring->size, - &tx_ring->dma, - GFP_KERNEL); - set_dev_node(dev, orig_node); - if (!tx_ring->desc) - tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->desc) - goto err; - - return 0; - -err: - vfree(tx_ring->tx_buffer_info); - tx_ring->tx_buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); - return -ENOMEM; -} - -/** - * ixgbe_setup_all_tx_resources - allocate all queues Tx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_tx_queues; i++) { - - err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); - if (!err) - continue; - - e_err(probe, "Allocation for Tx Queue %u failed\n", i); - goto err_setup_tx; - } - - return 0; -err_setup_tx: - /* rewind the index freeing the rings as we go */ - while (i--) - ixgbe_free_tx_resources(adapter->tx_ring[i]); - return err; -} - -/** - * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) - * @rx_ring: rx descriptor ring (for a specific queue) to setup - * - * Returns 0 on success, negative on failure - **/ -int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - int orig_node = dev_to_node(dev); - int numa_node = -1; - int size; - - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - - if (rx_ring->q_vector) - numa_node = rx_ring->q_vector->numa_node; - - rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); - if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = vzalloc(size); - if (!rx_ring->rx_buffer_info) - goto err; - - /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); - rx_ring->size = ALIGN(rx_ring->size, 4096); - - set_dev_node(dev, numa_node); - rx_ring->desc = dma_alloc_coherent(dev, - rx_ring->size, - &rx_ring->dma, - GFP_KERNEL); - set_dev_node(dev, orig_node); - if (!rx_ring->desc) - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->desc) - goto err; - - return 0; -err: - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); - return -ENOMEM; -} - -/** - * ixgbe_setup_all_rx_resources - allocate all queues Rx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); - if (!err) { - continue; - } - - e_err(probe, "Allocation for Rx Queue %u failed\n", i); - goto err_setup_rx; - } - -#if IS_ENABLED(CONFIG_FCOE) - err = ixgbe_setup_fcoe_ddp_resources(adapter); - if (!err) -#endif - return 0; -err_setup_rx: - /* rewind the index freeing the rings as we go */ - while (i--) - ixgbe_free_rx_resources(adapter->rx_ring[i]); - return err; -} - -/** - * ixgbe_free_tx_resources - Free Tx Resources per Queue - * @tx_ring: Tx descriptor ring for a specific queue - * - * Free all transmit software resources - **/ -void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) -{ - ixgbe_clean_tx_ring(tx_ring); - - vfree(tx_ring->tx_buffer_info); - tx_ring->tx_buffer_info = NULL; - - /* if not set, then don't free */ - if (!tx_ring->desc) - return; - - dma_free_coherent(tx_ring->dev, tx_ring->size, - tx_ring->desc, tx_ring->dma); - tx_ring->desc = NULL; -} - -/** - * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues - * @adapter: board private structure - * - * Free all transmit software resources - **/ -static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - ixgbe_free_tx_resources(adapter->tx_ring[i]); -} - -/** - * ixgbe_free_rx_resources - Free Rx Resources - * @rx_ring: ring to clean the resources from - * - * Free all receive software resources - **/ -void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) -{ - ixgbe_clean_rx_ring(rx_ring); - - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - - /* if not set, then don't free */ - if (!rx_ring->desc) - return; - - dma_free_coherent(rx_ring->dev, rx_ring->size, - rx_ring->desc, rx_ring->dma); - - rx_ring->desc = NULL; -} - -/** - * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues - * @adapter: board private structure - * - * Free all receive software resources - **/ -static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) -{ - int i; - -#if IS_ENABLED(CONFIG_FCOE) - ixgbe_free_fcoe_ddp_resources(adapter); -#endif - - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_free_rx_resources(adapter->rx_ring[i]); -} - -/** - * ixgbe_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ -static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); -#ifndef HAVE_NETDEVICE_MIN_MAX_MTU - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; -#endif - -#ifndef HAVE_NETDEVICE_MIN_MAX_MTU - /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) - return -EINVAL; - -#endif - /* - * For 82599EB we cannot allow legacy VFs to enable their receive - * paths when MTU greater than 1500 is configured. So display a - * warning that legacy VFs will be disabled. - */ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && - (adapter->hw.mac.type == ixgbe_mac_82599EB) && -#ifndef HAVE_NETDEVICE_MIN_MAX_MTU - (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) -#else - (new_mtu > ETH_DATA_LEN)) -#endif - e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); - - e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - - /* must set new MTU before calling down or up */ - netdev->mtu = new_mtu; - - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - - return 0; -} - -/** - * ixgbe_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ -static int ixgbe_open(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int err; - - /* disallow open during test */ - if (test_bit(__IXGBE_TESTING, &adapter->state)) - return -EBUSY; - - netif_carrier_off(netdev); - - /* allocate transmit descriptors */ - err = ixgbe_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* allocate receive descriptors */ - err = ixgbe_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; - - ixgbe_configure(adapter); - - err = ixgbe_request_irq(adapter); - if (err) - goto err_req_irq; - - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, - adapter->num_rx_pools > 1 ? 1 : - adapter->num_tx_queues); - if (err) - goto err_set_queues; - - err = netif_set_real_num_rx_queues(netdev, - adapter->num_rx_pools > 1 ? 1 : - adapter->num_rx_queues); - if (err) - goto err_set_queues; - -#ifdef HAVE_PTP_1588_CLOCK - ixgbe_ptp_init(adapter); -#endif /* HAVE_PTP_1588_CLOCK*/ - - ixgbe_up_complete(adapter); - -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) - ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); -#endif -#ifdef HAVE_UDP_ENC_RX_OFFLOAD - udp_tunnel_get_rx_info(netdev); -#elif defined(HAVE_VXLAN_RX_OFFLOAD) - vxlan_get_rx_port(netdev); -#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ - return IXGBE_SUCCESS; - -err_set_queues: - ixgbe_free_irq(adapter); -err_req_irq: - ixgbe_free_all_rx_resources(adapter); - if (!adapter->wol) - ixgbe_set_phy_power(&adapter->hw, false); -err_setup_rx: - ixgbe_free_all_tx_resources(adapter); -err_setup_tx: - ixgbe_reset(adapter); - - return err; -} - -/** - * ixgbe_close_suspend - actions necessary to both suspend and close flows - * @adapter: the private adapter struct - * - * This function should contain the necessary work common to both suspending - * and closing of the device. - */ -static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) -{ -#ifdef HAVE_PTP_1588_CLOCK - ixgbe_ptp_suspend(adapter); -#endif - - if (adapter->hw.phy.ops.enter_lplu) { - adapter->hw.phy.reset_disable = true; - ixgbe_down(adapter); - ixgbe_enter_lplu(&adapter->hw); - adapter->hw.phy.reset_disable = false; - } else { - ixgbe_down(adapter); - } - ixgbe_free_irq(adapter); - - ixgbe_free_all_rx_resources(adapter); - ixgbe_free_all_tx_resources(adapter); -} - -/** - * ixgbe_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int ixgbe_close(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - -#ifdef HAVE_PTP_1588_CLOCK - ixgbe_ptp_stop(adapter); -#endif - - if (netif_device_present(netdev)) - ixgbe_close_suspend(adapter); - - ixgbe_fdir_filter_exit(adapter); - - ixgbe_release_hw_control(adapter); - - return 0; -} - -#ifdef CONFIG_PM -#ifndef USE_LEGACY_PM_SUPPORT -static int ixgbe_resume(struct device *dev) -#else -static int ixgbe_resume(struct pci_dev *pdev) -#endif /* USE_LEGACY_PM_SUPPORT */ -{ - struct ixgbe_adapter *adapter; - struct net_device *netdev; - u32 err; -#ifndef USE_LEGACY_PM_SUPPORT - struct pci_dev *pdev = to_pci_dev(dev); -#endif - - adapter = pci_get_drvdata(pdev); - netdev = adapter->netdev; - adapter->hw.hw_addr = adapter->io_addr; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* - * pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - e_dev_err("Cannot enable PCI device from suspend\n"); - return err; - } - smp_mb__before_atomic(); - clear_bit(__IXGBE_DISABLED, &adapter->state); - pci_set_master(pdev); - - pci_wake_from_d3(pdev, false); - - ixgbe_reset(adapter); - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); - - rtnl_lock(); - - err = ixgbe_init_interrupt_scheme(adapter); - if (!err && netif_running(netdev)) - err = ixgbe_open(netdev); - - - if (!err) - netif_device_attach(netdev); - - rtnl_unlock(); - - return err; -} - -#ifndef USE_LEGACY_PM_SUPPORT -/** - * ixgbe_freeze - quiesce the device (no IRQ's or DMA) - * @dev: The port's netdev - */ -static int ixgbe_freeze(struct device *dev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); - struct net_device *netdev = adapter->netdev; - bool lplu_enabled = !!adapter->hw.phy.ops.enter_lplu; - - rtnl_lock(); - netif_device_detach(netdev); - - if (netif_running(netdev)) { - if (lplu_enabled) { - adapter->hw.phy.reset_disable = true; - ixgbe_down(adapter); - adapter->hw.phy.reset_disable = false; - } else { - ixgbe_down(adapter); - } - ixgbe_free_irq(adapter); - } - - ixgbe_reset_interrupt_capability(adapter); - rtnl_unlock(); - - return 0; -} - -/** - * ixgbe_thaw - un-quiesce the device - * @dev: The port's netdev - */ -static int ixgbe_thaw(struct device *dev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); - struct net_device *netdev = adapter->netdev; - bool lplu_enabled = !!adapter->hw.phy.ops.enter_lplu; - - ixgbe_set_interrupt_capability(adapter); - - if (netif_running(netdev)) { - u32 err = ixgbe_request_irq(adapter); - if (err) - return err; - - if (lplu_enabled) { - adapter->hw.phy.reset_disable = true; - ixgbe_up(adapter); - adapter->hw.phy.reset_disable = false; - } else { - ixgbe_up(adapter); - } - } - - netif_device_attach(netdev); - - return 0; -} -#endif /* USE_LEGACY_PM_SUPPORT */ -#endif /* CONFIG_PM */ - -/* - * __ixgbe_shutdown is not used when power management - * is disabled on older kernels (<2.6.12). causes a compile - * warning/error, because it is defined and not used. - */ -#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) -static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u32 ctrl, fctrl; - u32 wufc = adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif - - rtnl_lock(); - netif_device_detach(netdev); - - if (netif_running(netdev)) - ixgbe_close_suspend(adapter); - - ixgbe_clear_interrupt_scheme(adapter); - rtnl_unlock(); - -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; - -#endif - - /* this won't stop link of managebility or WoL is enabled */ - if (hw->mac.type == ixgbe_mac_82599EB) - ixgbe_stop_mac_link_on_d3_82599(hw); - - if (wufc) { - ixgbe_set_rx_mode(netdev); - - /* enable the optics for 82599 SFP+ fiber as we can WoL */ - if (hw->mac.ops.enable_tx_laser) - hw->mac.ops.enable_tx_laser(hw); - - /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & IXGBE_WUFC_MC) { - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl |= IXGBE_FCTRL_MPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - } - - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - ctrl |= IXGBE_CTRL_GIO_DIS; - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - - IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); - } else { - IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); - IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); - } - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - pci_wake_from_d3(pdev, false); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - pci_wake_from_d3(pdev, !!wufc); - break; - default: - break; - } - - *enable_wake = !!wufc; - if (!*enable_wake) - ixgbe_set_phy_power(hw, false); - - ixgbe_release_hw_control(adapter); - - if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) - pci_disable_device(pdev); - - return 0; -} -#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ - -#ifdef CONFIG_PM -#ifndef USE_LEGACY_PM_SUPPORT -static int ixgbe_suspend(struct device *dev) -#else -static int ixgbe_suspend(struct pci_dev *pdev, - pm_message_t __always_unused state) -#endif /* USE_LEGACY_PM_SUPPORT */ -{ - int retval; - bool wake; -#ifndef USE_LEGACY_PM_SUPPORT - struct pci_dev *pdev = to_pci_dev(dev); -#endif - - retval = __ixgbe_shutdown(pdev, &wake); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; -} -#endif /* CONFIG_PM */ - -#ifndef USE_REBOOT_NOTIFIER -static void ixgbe_shutdown(struct pci_dev *pdev) -{ - bool wake; - - __ixgbe_shutdown(pdev, &wake); - - if (system_state == SYSTEM_POWER_OFF) { - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); - } -} - -#endif -#ifdef HAVE_NDO_GET_STATS64 -/** - * ixgbe_get_stats64 - Get System Network Statistics - * @netdev: network interface device structure - * @stats: storage space for 64bit statistics - * - * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This - * function replaces ixgbe_get_stats for kernels which support it. - */ -#ifdef HAVE_VOID_NDO_GET_STATS64 -static void ixgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -#else -static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -#endif -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - - rcu_read_lock(); - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->rx_packets += packets; - stats->rx_bytes += bytes; - } - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->tx_packets += packets; - stats->tx_bytes += bytes; - } - } - rcu_read_unlock(); - /* following stats updated by ixgbe_watchdog_task() */ - stats->multicast = netdev->stats.multicast; - stats->rx_errors = netdev->stats.rx_errors; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; - stats->rx_missed_errors = netdev->stats.rx_missed_errors; -#ifndef HAVE_VOID_NDO_GET_STATS64 - return stats; -#endif -} -#else -/** - * ixgbe_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. - **/ -static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - /* update the stats data */ - ixgbe_update_stats(adapter); - -#ifdef HAVE_NETDEV_STATS_IN_NETDEV - /* only return the current stats */ - return &netdev->stats; -#else - /* only return the current stats */ - return &adapter->net_stats; -#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ -} -#endif -/** - * ixgbe_update_stats - Update the board statistics counters. - * @adapter: board private structure - **/ -void ixgbe_update_stats(struct ixgbe_adapter *adapter) -{ -#ifdef HAVE_NETDEV_STATS_IN_NETDEV - struct net_device_stats *net_stats = &adapter->netdev->stats; -#else - struct net_device_stats *net_stats = &adapter->net_stats; -#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_hw_stats *hwstats = &adapter->stats; - u64 total_mpc = 0; - u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; - u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; - u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; - u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; - - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return; - - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - u64 rsc_count = 0; - u64 rsc_flush = 0; - for (i = 0; i < adapter->num_rx_queues; i++) { - rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; - rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; - } - adapter->rsc_total_count = rsc_count; - adapter->rsc_total_flush = rsc_flush; - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; - non_eop_descs += rx_ring->rx_stats.non_eop_descs; - alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; - alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; - hw_csum_rx_error += rx_ring->rx_stats.csum_err; - bytes += rx_ring->stats.bytes; - packets += rx_ring->stats.packets; - - } - adapter->non_eop_descs = non_eop_descs; - adapter->alloc_rx_page_failed = alloc_rx_page_failed; - adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; - adapter->hw_csum_rx_error = hw_csum_rx_error; - net_stats->rx_bytes = bytes; - net_stats->rx_packets = packets; - - bytes = 0; - packets = 0; - /* gather some stats to the adapter struct that are per queue */ - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - restart_queue += tx_ring->tx_stats.restart_queue; - tx_busy += tx_ring->tx_stats.tx_busy; - bytes += tx_ring->stats.bytes; - packets += tx_ring->stats.packets; - } - adapter->restart_queue = restart_queue; - adapter->tx_busy = tx_busy; - net_stats->tx_bytes = bytes; - net_stats->tx_packets = packets; - - hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); - - /* 8 register reads */ - for (i = 0; i < 8; i++) { - /* for packet buffers not used, the register should read 0 */ - mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); - missed_rx += mpc; - hwstats->mpc[i] += mpc; - total_mpc += hwstats->mpc[i]; - hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); - hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); - hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); - hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); - hwstats->pxonrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - hwstats->pxonrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); - break; - default: - break; - } - } - - /*16 register reads */ - for (i = 0; i < 16; i++) { - hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); - hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X550) || - (hw->mac.type == ixgbe_mac_X550EM_x) || - (hw->mac.type == ixgbe_mac_X550EM_a) || - (hw->mac.type == ixgbe_mac_X540)) { - hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); - IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ - hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); - IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ - } - } - - hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); - /* work around hardware counting issue */ - hwstats->gprc -= missed_rx; - - ixgbe_update_xoff_received(adapter); - - /* 82598 hardware only has a 32 bit counter in the high register */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); - hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); - hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); - hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); - break; - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* OS2BMC stats are X540 only*/ - hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); - hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); - hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); - hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); - /* fall through */ - case ixgbe_mac_82599EB: - for (i = 0; i < 16; i++) - adapter->hw_rx_no_dma_resources += - IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); - hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); - IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ - hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); - IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ - hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); - IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ - hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); -#ifdef HAVE_TX_MQ - hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); -#endif /* HAVE_TX_MQ */ -#if IS_ENABLED(CONFIG_FCOE) - hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); - hwstats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); - hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); - hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); - hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); - hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); - hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); - /* Add up per cpu counters for total ddp alloc fail */ - if (adapter->fcoe.ddp_pool) { - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct ixgbe_fcoe_ddp_pool *ddp_pool; - unsigned int cpu; - u64 noddp = 0, noddp_ext_buff = 0; - for_each_possible_cpu(cpu) { - ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); - noddp += ddp_pool->noddp; - noddp_ext_buff += ddp_pool->noddp_ext_buff; - } - hwstats->fcoe_noddp = noddp; - hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; - } - -#endif /* CONFIG_FCOE */ - break; - default: - break; - } - bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); - hwstats->bprc += bprc; - hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); - if (hw->mac.type == ixgbe_mac_82598EB) - hwstats->mprc -= bprc; - hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); - hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); - hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); - hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); - hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); - hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); - hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); - hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); - lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); - hwstats->lxontxc += lxon; - lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); - hwstats->lxofftxc += lxoff; - hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); - hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); - /* - * 82598 errata - tx of flow control packets is included in tx counters - */ - xon_off_tot = lxon + lxoff; - hwstats->gptc -= xon_off_tot; - hwstats->mptc -= xon_off_tot; - hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); - hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); - hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); - hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); - hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); - hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); - hwstats->ptc64 -= xon_off_tot; - hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); - hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); - hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); - hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); - hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); - hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); - /* Fill out the OS statistics structure */ - net_stats->multicast = hwstats->mprc; - - /* Rx Errors */ - net_stats->rx_errors = hwstats->crcerrs + - hwstats->rlec; - net_stats->rx_dropped = 0; - net_stats->rx_length_errors = hwstats->rlec; - net_stats->rx_crc_errors = hwstats->crcerrs; - net_stats->rx_missed_errors = total_mpc; - - /* - * VF Stats Collection - skip while resetting because these - * are not clear on read and otherwise you'll sometimes get - * crazy values. - */ - if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { - for (i = 0; i < adapter->num_vfs; i++) { - UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), \ - adapter->vfinfo[i].last_vfstats.gprc, \ - adapter->vfinfo[i].vfstats.gprc); - UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), \ - adapter->vfinfo[i].last_vfstats.gptc, \ - adapter->vfinfo[i].vfstats.gptc); - UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), \ - IXGBE_PVFGORC_MSB(i), \ - adapter->vfinfo[i].last_vfstats.gorc, \ - adapter->vfinfo[i].vfstats.gorc); - UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), \ - IXGBE_PVFGOTC_MSB(i), \ - adapter->vfinfo[i].last_vfstats.gotc, \ - adapter->vfinfo[i].vfstats.gotc); - UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), \ - adapter->vfinfo[i].last_vfstats.mprc, \ - adapter->vfinfo[i].vfstats.mprc); - } - } -} - -#ifdef HAVE_TX_MQ -/** - * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table - * @adapter - pointer to the device adapter structure - **/ -static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - - if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; - - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - /* do nothing if we are not using signature filters */ - if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) - return; - - adapter->fdir_overflow++; - - if (ixgbe_reinit_fdir_tables_82599(hw) == IXGBE_SUCCESS) { - for (i = 0; i < adapter->num_tx_queues; i++) - set_bit(__IXGBE_TX_FDIR_INIT_DONE, - &(adapter->tx_ring[i]->state)); - /* re-enable flow director interrupts */ - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); - } else { - e_err(probe, "failed to finish FDIR re-initialization, " - "ignored adding FDIR ATR filters\n"); - } -} - -#endif /* HAVE_TX_MQ */ -/** - * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts - * @adapter - pointer to the device adapter structure - * - * This function serves two purposes. First it strobes the interrupt lines - * in order to make certain interrupts are occurring. Secondly it sets the - * bits needed to check for TX hangs. As a result we should immediately - * determine if a hang has occurred. - */ -static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u64 eics = 0; - int i; - - /* If we're down or resetting, just bail */ - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_REMOVE, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return; - - /* Force detection of hung controller */ - if (netif_carrier_ok(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) - set_check_for_tx_hang(adapter->tx_ring[i]); - } - - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { - /* - * for legacy and MSI interrupts don't set any bits - * that are enabled for EIAM, because this operation - * would set *both* EIMS and EICS for any bit in EIAM - */ - IXGBE_WRITE_REG(hw, IXGBE_EICS, - (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); - } else { - /* get one bit for every active tx/rx interrupt vector */ - for (i = 0; i < adapter->num_q_vectors; i++) { - struct ixgbe_q_vector *qv = adapter->q_vector[i]; - if (qv->rx.ring || qv->tx.ring) - eics |= ((u64)1 << i); - } - } - - /* Cause software interrupt to ensure rings are cleaned */ - ixgbe_irq_rearm_queues(adapter, eics); -} - -/** - * ixgbe_watchdog_update_link - update the link status - * @adapter - pointer to the device adapter structure - * @link_speed - pointer to a u32 to store the link_speed - **/ -static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 link_speed = adapter->link_speed; - bool link_up = adapter->link_up; - bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; - - if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) - return; - - if (hw->mac.ops.check_link) { - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - } else { - /* always assume link is up, if no check link function */ - link_speed = IXGBE_LINK_SPEED_10GB_FULL; - link_up = true; - } - - /* If Crosstalk fix enabled do the sanity check of making sure - * the SFP+ cage is empty. - */ - if (adapter->need_crosstalk_fix) { - u32 sfp_cage_full; - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP2; - break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP0; - break; - default: - /* Non-SFP+ system - sanity check */ - sfp_cage_full = false; - break; - } - - if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) - link_up = false; - } - -#ifdef HAVE_DCBNL_IEEE - if (adapter->ixgbe_ieee_pfc) - pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); - -#endif - if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { - hw->mac.ops.fc_enable(hw); - ixgbe_set_rx_drop_en(adapter); - } - - if (link_up || - time_after(jiffies, (adapter->link_check_timeout + - IXGBE_TRY_LINK_TIMEOUT))) { - adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); - IXGBE_WRITE_FLUSH(hw); - } - - adapter->link_up = link_up; - adapter->link_speed = link_speed; - if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { - u8 num_tcs = netdev_get_num_tc(adapter->netdev); -#if IS_ENABLED(CONFIG_FCOE) - u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); - bool fcoe_en = !!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED); -#endif /* CONFIG_FCOE */ - - if (hw->mac.dmac_config.link_speed != link_speed || -#if IS_ENABLED(CONFIG_FCOE) - hw->mac.dmac_config.fcoe_tc != fcoe_tc || - hw->mac.dmac_config.fcoe_en != fcoe_en || -#endif /* CONFIG_FCOE */ - hw->mac.dmac_config.num_tcs != num_tcs) { - hw->mac.dmac_config.link_speed = link_speed; - hw->mac.dmac_config.num_tcs = num_tcs; -#if IS_ENABLED(CONFIG_FCOE) - hw->mac.dmac_config.fcoe_en = fcoe_en; - hw->mac.dmac_config.fcoe_tc = fcoe_tc; -#endif /* CONFIG_FCOE */ - hw->mac.ops.dmac_config(hw); - } - } -} - -static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) -{ - u8 up = 0; -#ifdef HAVE_DCBNL_IEEE - struct net_device *netdev = adapter->netdev; - struct dcb_app app = { - .selector = DCB_APP_IDTYPE_ETHTYPE, - .protocol = 0, - }; - up = dcb_getapp(netdev, &app); -#endif - -#if IS_ENABLED(CONFIG_FCOE) - adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; -#else - adapter->default_up = up; -#endif -} - -/** - * ixgbe_watchdog_link_is_up - update netif_carrier status and - * print link up message - * @adapter - pointer to the device adapter structure - **/ -static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u32 link_speed = adapter->link_speed; - const char *speed_str; - bool flow_rx, flow_tx; - - /* only continue if link was previously down */ - if (netif_carrier_ok(netdev)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: { - u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); - flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); - flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); - } - break; - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: { - u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); - u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); - flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); - flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); - } - break; - default: - flow_tx = false; - flow_rx = false; - break; - } - -#ifdef HAVE_PTP_1588_CLOCK - adapter->last_rx_ptp_check = jiffies; - - if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) - ixgbe_ptp_start_cyclecounter(adapter); - -#endif - switch (link_speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - speed_str = "10 Gbps"; - break; - case IXGBE_LINK_SPEED_5GB_FULL: - speed_str = "5 Gbps"; - break; - case IXGBE_LINK_SPEED_2_5GB_FULL: - speed_str = "2.5 Gbps"; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - speed_str = "1 Gbps"; - break; - case IXGBE_LINK_SPEED_100_FULL: - speed_str = "100 Mbps"; - break; - case IXGBE_LINK_SPEED_10_FULL: - speed_str = "10 Mbps"; - break; - default: - speed_str = "unknown speed"; - break; - } - e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str, - ((flow_rx && flow_tx) ? "RX/TX" : - (flow_rx ? "RX" : - (flow_tx ? "TX" : "None")))); - - netif_carrier_on(netdev); -#ifdef IFLA_VF_MAX - ixgbe_check_vf_rate_limit(adapter); -#endif /* IFLA_VF_MAX */ - /* Turn on malicious driver detection */ - if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && - (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) - hw->mac.ops.enable_mdd(hw); - - netif_tx_wake_all_queues(netdev); - - /* update the default user priority for VFs */ - ixgbe_update_default_up(adapter); - - /* ping all the active vfs to let them know link has changed */ - ixgbe_ping_all_vfs(adapter); -} - -/** - * ixgbe_watchdog_link_is_down - update netif_carrier status and - * print link down message - * @adapter - pointer to the adapter structure - **/ -static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - - adapter->link_up = false; - adapter->link_speed = 0; - - /* only continue if link was up previously */ - if (!netif_carrier_ok(netdev)) - return; - - /* poll for SFP+ cable when link is down */ - if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) - adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; - -#ifdef HAVE_PTP_1588_CLOCK - if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) - ixgbe_ptp_start_cyclecounter(adapter); - -#endif - e_info(drv, "NIC Link is Down\n"); - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - - /* ping all the active vfs to let them know link has changed */ - ixgbe_ping_all_vfs(adapter); -} - -static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - - if (tx_ring->next_to_use != tx_ring->next_to_clean) - return true; - } - - return false; -} - -static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; - u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); - - int i, j; - - if (!adapter->num_vfs) - return false; - - /* resetting the PF is only needed for MACs < X550 */ - if (hw->mac.type >= ixgbe_mac_X550) - return false; - for (i = 0; i < adapter->num_vfs; i++) { - for (j = 0; j < q_per_pool; j++) { - u32 h, t; - - h = IXGBE_READ_REG(hw, IXGBE_PVFTDHn(q_per_pool, i, j)); - t = IXGBE_READ_REG(hw, IXGBE_PVFTDTn(q_per_pool, i, j)); - - if (h != t) - return true; - } - } - - return false; -} - -/** - * ixgbe_watchdog_flush_tx - flush queues on link down - * @adapter - pointer to the device adapter structure - **/ -static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) -{ - if (!netif_carrier_ok(adapter->netdev)) { - if (ixgbe_ring_tx_pending(adapter) || - ixgbe_vf_tx_pending(adapter)) { - /* We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). - */ - e_warn(drv, "initiating reset due to lost link with pending Tx work\n"); - set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - } - } -} - -#ifdef CONFIG_PCI_IOV -static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, - struct pci_dev *vfdev) -{ - int pos, i; - u16 status; - - /* wait for pending transactions on the bus */ - for (i = 0; i < 4; i++) { - if (i) - msleep((1 << (i - 1)) * 100); - - pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); - if (!(status & PCI_EXP_DEVSTA_TRPND)) - goto clear; - } - - e_dev_warn("Issuing VFLR with pending transactions\n"); - -clear: - pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); - if (!pos) - return; - - e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); - pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_BCR_FLR); - msleep(100); -} - -static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - unsigned int vf; - u32 gpc; - - if (!(netif_carrier_ok(adapter->netdev))) - return; - - gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); - if (gpc) /* If incrementing then no need for the check below */ - return; - /* - * Check to see if a bad DMA write target from an errant or - * malicious VF has caused a PCIe error. If so then we can - * issue a VFLR to the offending VF(s) and then resume without - * requesting a full slot reset. - */ - - if (!pdev) - return; - - /* check status reg for all VFs owned by this PF */ - for (vf = 0; vf < adapter->num_vfs; ++vf) { - struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; - u16 status_reg; - - if (!vfdev) - continue; - pci_read_config_word(vfdev, PCI_STATUS, &status_reg); - if (status_reg != IXGBE_FAILED_READ_CFG_WORD && - status_reg & PCI_STATUS_REC_MASTER_ABORT) - ixgbe_issue_vf_flr(adapter, vfdev); - } -} - -static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) -{ - u32 ssvpc; - - /* Do not perform spoof check for 82598 or if not in IOV mode */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB || - adapter->num_vfs == 0) - return; - - ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); - - /* - * ssvpc register is cleared on read, if zero then no - * spoofed packets in the last interval. - */ - if (!ssvpc) - return; - - e_warn(drv, "%d Spoofed packets detected\n", ssvpc); -} - -#endif /* CONFIG_PCI_IOV */ - -/** - * ixgbe_watchdog_subtask - check and bring link up - * @adapter - pointer to the device adapter structure - **/ -static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) -{ - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_REMOVE, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return; - - ixgbe_watchdog_update_link(adapter); - - if (adapter->link_up) - ixgbe_watchdog_link_is_up(adapter); - else - ixgbe_watchdog_link_is_down(adapter); -#ifdef CONFIG_PCI_IOV - ixgbe_spoof_check(adapter); - ixgbe_check_for_bad_vf(adapter); -#endif /* CONFIG_PCI_IOV */ - ixgbe_update_stats(adapter); - - ixgbe_watchdog_flush_tx(adapter); -} - -/** - * ixgbe_sfp_detection_subtask - poll for SFP+ cable - * @adapter - the ixgbe adapter structure - **/ -static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - s32 err; - - /* If crosstalk fix enabled verify the SFP+ cage is full */ - if (adapter->need_crosstalk_fix) { - u32 sfp_cage_full; - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP2; - break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP0; - break; - default: - /* Non-SFP+ system - sanity check */ - sfp_cage_full = false; - break; - } - if (!sfp_cage_full) - return; - } - - /* not searching for SFP so there is nothing to do here */ - if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && - !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) - return; - - if (adapter->sfp_poll_time && - time_after(adapter->sfp_poll_time, jiffies)) - return; /* If not yet time to poll for SFP */ - - /* someone else is in init, wait until next service event */ - if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - return; - - adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; - - err = hw->phy.ops.identify_sfp(hw); - if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto sfp_out; - - if (err == IXGBE_ERR_SFP_NOT_PRESENT) { - /* If no cable is present, then we need to reset - * the next time we find a good cable. */ - adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; - } - - /* exit on error */ - if (err) - goto sfp_out; - - /* exit if reset not needed */ - if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) - goto sfp_out; - - adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; - - /* - * A module may be identified correctly, but the EEPROM may not have - * support for that module. setup_sfp() will fail in that case, so - * we should not allow that module to load. - */ - if (hw->mac.type == ixgbe_mac_82598EB) - err = hw->phy.ops.reset(hw); - else - err = hw->mac.ops.setup_sfp(hw); - - if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto sfp_out; - - adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; - e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); - -sfp_out: - clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - - if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && - adapter->netdev_registered) { - e_dev_err("failed to initialize because an unsupported " - "SFP+ module type was detected.\n"); - e_dev_err("Reload the driver after installing a " - "supported module.\n"); - unregister_netdev(adapter->netdev); - adapter->netdev_registered = false; - } -} - -/** - * ixgbe_sfp_link_config_subtask - set up link SFP after module install - * @adapter - the ixgbe adapter structure - **/ -static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 speed; - bool autoneg = false; - - if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) - return; - - /* someone else is in init, wait until next service event */ - if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - return; - - adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; - - speed = hw->phy.autoneg_advertised; - if ((!speed) && (hw->mac.ops.get_link_capabilities)) { - hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); - /* setup the highest link when no autoneg */ - if (!autoneg) { - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - speed = IXGBE_LINK_SPEED_10GB_FULL; - } - } - - if (hw->mac.ops.setup_link) - hw->mac.ops.setup_link(hw, speed, true); - - adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - adapter->link_check_timeout = jiffies; - clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); -} - -/** - * ixgbe_service_timer - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void ixgbe_service_timer(unsigned long data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - unsigned long next_event_offset; - - /* poll faster when waiting for link */ - if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) - next_event_offset = HZ / 10; - else - next_event_offset = HZ * 2; - - /* Reset the timer */ - mod_timer(&adapter->service_timer, next_event_offset + jiffies); - - ixgbe_service_event_schedule(adapter); -} - -static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) -{ - u32 status; - - if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) - return; - adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; - status = ixgbe_handle_lasi(&adapter->hw); - if (status != IXGBE_ERR_OVERTEMP) - return; - e_crit(drv, "%s\n", ixgbe_overheat_msg); -} - -static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) -{ - if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) - return; - - /* If we're already down or resetting, just bail */ - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_REMOVE, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return; - - netdev_err(adapter->netdev, "Reset adapter\n"); - adapter->tx_timeout_count++; - - rtnl_lock(); - ixgbe_reinit_locked(adapter); - rtnl_unlock(); -} - -/** - * ixgbe_service_task - manages and runs subtasks - * @work: pointer to work_struct containing our data - **/ -static void ixgbe_service_task(struct work_struct *work) -{ - struct ixgbe_adapter *adapter = container_of(work, - struct ixgbe_adapter, - service_task); - if (IXGBE_REMOVED(adapter->hw.hw_addr)) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - rtnl_lock(); - ixgbe_down(adapter); - rtnl_unlock(); - } - ixgbe_service_event_complete(adapter); - return; - } -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) - if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { - rtnl_lock(); - adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; -#ifdef HAVE_UDP_ENC_RX_OFFLOAD - udp_tunnel_get_rx_info(adapter->netdev); -#else - vxlan_get_rx_port(adapter->netdev); -#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ - rtnl_unlock(); - } -#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ - ixgbe_reset_subtask(adapter); - ixgbe_phy_interrupt_subtask(adapter); - ixgbe_sfp_detection_subtask(adapter); - ixgbe_sfp_link_config_subtask(adapter); - ixgbe_check_overtemp_subtask(adapter); - ixgbe_watchdog_subtask(adapter); -#ifdef HAVE_TX_MQ - ixgbe_fdir_reinit_subtask(adapter); -#endif - ixgbe_check_hang_subtask(adapter); -#ifdef HAVE_PTP_1588_CLOCK - if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { - ixgbe_ptp_overflow_check(adapter); - if (unlikely(adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) - ixgbe_ptp_rx_hang(adapter); - ixgbe_ptp_tx_hang(adapter); - } -#endif /* HAVE_PTP_1588_CLOCK */ - - ixgbe_service_event_complete(adapter); -} - -#ifdef NETIF_F_GSO_PARTIAL -static int ixgbe_tso(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - u8 *hdr_len) -{ - u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; - struct sk_buff *skb = first->skb; - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } ip; - union { - struct tcphdr *tcp; - unsigned char *hdr; - } l4; - u32 paylen, l4_offset; - int err; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (!skb_is_gso(skb)) - return 0; - - err = skb_cow_head(skb, 0); - if (err < 0) - return err; - - ip.hdr = skb_network_header(skb); - l4.hdr = skb_checksum_start(skb); - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - - /* initialize outer IP header fields */ - if (ip.v4->version == 4) { - /* IP header will have to cancel out any data that - * is not a part of the outer IP header - */ - ip.v4->check = csum_fold(csum_add(lco_csum(skb), - csum_unfold(l4.tcp->check))); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - - ip.v4->tot_len = 0; - first->tx_flags |= IXGBE_TX_FLAGS_TSO | - IXGBE_TX_FLAGS_CSUM | - IXGBE_TX_FLAGS_IPV4; - } else { - ip.v6->payload_len = 0; - first->tx_flags |= IXGBE_TX_FLAGS_TSO | - IXGBE_TX_FLAGS_CSUM; - } - - /* determine offset of inner transport header */ - l4_offset = l4.hdr - skb->data; - - /* compute length of segmentation header */ - *hdr_len = (l4.tcp->doff * 4) + l4_offset; - - /* remove payload length from inner checksum */ - paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); - - /* update gso size and bytecount with header size */ - first->gso_segs = skb_shinfo(skb)->gso_segs; - first->bytecount += (first->gso_segs - 1) * *hdr_len; - - /* mss_l4len_id: use 0 as index for TSO */ - mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; - mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = l4.hdr - ip.hdr; - vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, - mss_l4len_idx); - - return 1; -} - -static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) -{ - unsigned int offset = 0; - - ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); - - return offset == skb_checksum_start_offset(skb); -} - -static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first) -{ - struct sk_buff *skb = first->skb; - u32 vlan_macip_lens = 0; - u32 type_tucmd = 0; - - if (skb->ip_summed != CHECKSUM_PARTIAL) { -csum_failed: - if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | - IXGBE_TX_FLAGS_CC))) - return; - goto no_csum; - } - - switch (skb->csum_offset) { - case offsetof(struct tcphdr, check): - type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - /* fall through */ - case offsetof(struct udphdr, check): - break; - case offsetof(struct sctphdr, checksum): - /* validate that this is actually an SCTP request */ - if (((first->protocol == htons(ETH_P_IP)) && - (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || - ((first->protocol == htons(ETH_P_IPV6)) && - ixgbe_ipv6_csum_is_sctp(skb))) { - type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; - break; - } - /* fall through */ - default: - skb_checksum_help(skb); - goto csum_failed; - } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; - vlan_macip_lens = skb_checksum_start_offset(skb) - - skb_network_offset(skb); -no_csum: - /* vlan_macip_lens: MACLEN, VLAN tag */ - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); -} -#else -static int ixgbe_tso(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - u8 *hdr_len) -{ -#ifndef NETIF_F_TSO - return 0; -#else - struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (!skb_is_gso(skb)) - return 0; - - if (skb_header_cloned(skb)) { - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - first->tx_flags |= IXGBE_TX_FLAGS_TSO | - IXGBE_TX_FLAGS_CSUM | - IXGBE_TX_FLAGS_IPV4; -#ifdef NETIF_F_TSO6 - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - first->tx_flags |= IXGBE_TX_FLAGS_TSO | - IXGBE_TX_FLAGS_CSUM; -#endif /* NETIF_F_TSO6 */ - } - - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len = skb_transport_offset(skb) + l4len; - - /* update gso size and bytecount with header size */ - first->gso_segs = skb_shinfo(skb)->gso_segs; - first->bytecount += (first->gso_segs - 1) * *hdr_len; - - /* mss_l4len_id: use 0 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; - mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, - mss_l4len_idx); - - return 1; -#endif /* !NETIF_F_TSO */ -} - -static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first) -{ - struct sk_buff *skb = first->skb; - u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; - u32 type_tucmd = 0; - - if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & IXGBE_TX_FLAGS_CC)) - return; - vlan_macip_lens = skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } else { - u8 l4_hdr = 0; -#ifdef HAVE_ENCAP_TSO_OFFLOAD - union { - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - u8 *raw; - } network_hdr; - union { - struct tcphdr *tcphdr; - u8 *raw; - } transport_hdr; - __be16 frag_off; - - if (skb->encapsulation) { - network_hdr.raw = skb_inner_network_header(skb); - transport_hdr.raw = skb_inner_transport_header(skb); - vlan_macip_lens = skb_inner_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } else { - network_hdr.raw = skb_network_header(skb); - transport_hdr.raw = skb_transport_header(skb); - vlan_macip_lens = skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } - - /* use first 4 bits to determine IP version */ - switch (network_hdr.ipv4->version) { - case IPVERSION: - vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = network_hdr.ipv4->protocol; - break; - case 6: - vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; - l4_hdr = network_hdr.ipv6->nexthdr; - if (likely((transport_hdr.raw - network_hdr.raw) == - sizeof(struct ipv6hdr))) - break; - ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - l4_hdr = NEXTHDR_FRAGMENT; - break; - default: - break; - } - -#else /* HAVE_ENCAP_TSO_OFFLOAD */ - switch (first->protocol) { - case __constant_htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; - break; -#ifdef NETIF_F_IPV6_CSUM - case __constant_htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; - break; -#endif /* NETIF_F_IPV6_CSUM */ - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); - } - break; - } -#endif /* HAVE_ENCAP_TSO_OFFLOAD */ - - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; -#ifdef HAVE_ENCAP_TSO_OFFLOAD - mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << - IXGBE_ADVTXD_L4LEN_SHIFT; -#else - mss_l4len_idx = tcp_hdrlen(skb) << - IXGBE_ADVTXD_L4LEN_SHIFT; -#endif /* HAVE_ENCAP_TSO_OFFLOAD */ - break; -#ifdef HAVE_SCTP - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; -#endif /* HAVE_SCTP */ - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - default: -#ifdef HAVE_ENCAP_TSO_OFFLOAD - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum, version=%d, l4 proto=%x\n", - network_hdr.ipv4->version, l4_hdr); - } - skb_checksum_help(skb); - goto no_csum; -#else - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); - } -#endif /* HAVE_ENCAP_TSO_OFFLOAD */ - break; - } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; - } - -#ifdef HAVE_ENCAP_TSO_OFFLOAD -no_csum: -#endif /* HAVE_ENCAP_TSO_OFFLOAD */ - /* vlan_macip_lens: MACLEN, VLAN tag */ -#ifndef HAVE_ENCAP_TSO_OFFLOAD - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; -#endif /* !HAVE_ENCAP_TSO_OFFLOAD */ - vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, - type_tucmd, mss_l4len_idx); -} -#endif /* NETIF_F_GSO_PARTIAL */ - -#define IXGBE_SET_FLAG(_input, _flag, _result) \ - ((_flag <= _result) ? \ - ((u32)(_input & _flag) * (_result / _flag)) : \ - ((u32)(_input & _flag) / (_flag / _result))) - -static u32 ixgbe_tx_cmd_type(u32 tx_flags) -{ - /* set type for advanced descriptor with frame checksum insertion */ - u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_DEXT | - IXGBE_ADVTXD_DCMD_IFCS; - - /* set HW vlan bit if vlan is present */ - cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, - IXGBE_ADVTXD_DCMD_VLE); - - /* set segmentation enable bits for TSO/FSO */ - cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, - IXGBE_ADVTXD_DCMD_TSE); - - /* set timestamp bit if present */ - cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, - IXGBE_ADVTXD_MAC_TSTAMP); - - return cmd_type; -} - -static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, - u32 tx_flags, unsigned int paylen) -{ - u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; - - /* enable L4 checksum for TSO and TX checksum offload */ - olinfo_status |= IXGBE_SET_FLAG(tx_flags, - IXGBE_TX_FLAGS_CSUM, - IXGBE_ADVTXD_POPTS_TXSM); - - /* enble IPv4 checksum for TSO */ - olinfo_status |= IXGBE_SET_FLAG(tx_flags, - IXGBE_TX_FLAGS_IPV4, - IXGBE_ADVTXD_POPTS_IXSM); - - /* - * Check Context must be set if Tx switch is enabled, which it - * always is for case where virtual functions are running - */ - olinfo_status |= IXGBE_SET_FLAG(tx_flags, - IXGBE_TX_FLAGS_CC, - IXGBE_ADVTXD_CC); - - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); -} - -static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. - */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. - */ - if (likely(ixgbe_desc_unused(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) -{ - if (likely(ixgbe_desc_unused(tx_ring) >= size)) - return 0; - - return __ixgbe_maybe_stop_tx(tx_ring, size); -} - -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ - IXGBE_TXD_CMD_RS) - -static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - const u8 hdr_len) -{ - struct sk_buff *skb = first->skb; - struct ixgbe_tx_buffer *tx_buffer; - union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; - dma_addr_t dma; - unsigned int data_len, size; - u32 tx_flags = first->tx_flags; - u32 cmd_type = ixgbe_tx_cmd_type(tx_flags); - u16 i = tx_ring->next_to_use; - - tx_desc = IXGBE_TX_DESC(tx_ring, i); - - ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); - - size = skb_headlen(skb); - data_len = skb->data_len; - -#if IS_ENABLED(CONFIG_FCOE) - if (tx_flags & IXGBE_TX_FLAGS_FCOE) { - if (data_len < sizeof(struct fcoe_crc_eof)) { - size -= sizeof(struct fcoe_crc_eof) - data_len; - data_len = 0; - } else { - data_len -= sizeof(struct fcoe_crc_eof); - } - } -#endif /* CONFIG_FCOE */ - - dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - - tx_buffer = first; - - for (frag = &skb_shinfo(skb)->frags[0];; frag++) { - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; - - /* record length, and DMA address */ - dma_unmap_len_set(tx_buffer, len, size); - dma_unmap_addr_set(tx_buffer, dma, dma); - - tx_desc->read.buffer_addr = cpu_to_le64(dma); - - while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); - - i++; - tx_desc++; - if (i == tx_ring->count) { - tx_desc = IXGBE_TX_DESC(tx_ring, 0); - i = 0; - } - tx_desc->read.olinfo_status = 0; - - dma += IXGBE_MAX_DATA_PER_TXD; - size -= IXGBE_MAX_DATA_PER_TXD; - - tx_desc->read.buffer_addr = cpu_to_le64(dma); - } - - if (likely(!data_len)) - break; - - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); - - i++; - tx_desc++; - if (i == tx_ring->count) { - tx_desc = IXGBE_TX_DESC(tx_ring, 0); - i = 0; - } - tx_desc->read.olinfo_status = 0; - -#if IS_ENABLED(CONFIG_FCOE) - size = min_t(unsigned int, data_len, skb_frag_size(frag)); -#else - size = skb_frag_size(frag); -#endif - data_len -= size; - - dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, - DMA_TO_DEVICE); - - tx_buffer = &tx_ring->tx_buffer_info[i]; - } - - /* write last descriptor with RS and EOP bits */ - cmd_type |= size | IXGBE_TXD_CMD; - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); - - netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); - - /* set the timestamp */ - first->time_stamp = jiffies; - -#ifndef HAVE_TRANS_START_IN_QUEUE - netdev_ring(tx_ring)->trans_start = first->time_stamp; -#endif - /* - * Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. (Only applicable for weak-ordered - * memory model archs, such as IA-64). - * - * We also need this memory barrier to make certain all of the - * status bits have been updated before next_to_watch is written. - */ - wmb(); - - /* set next_to_watch value indicating a packet is present */ - first->next_to_watch = tx_desc; - - i++; - if (i == tx_ring->count) - i = 0; - - tx_ring->next_to_use = i; - - ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); - -#ifdef HAVE_SKB_XMIT_MORE - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { - writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); - } -#else - /* notify HW of packet */ - writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); -#endif /* HAVE_SKB_XMIT_MORE */ - - return 0; -dma_error: - dev_err(tx_ring->dev, "TX DMA map failed\n"); - tx_buffer = &tx_ring->tx_buffer_info[i]; - - /* clear dma mappings for failed tx_buffer_info map */ - for (;;) { - tx_buffer = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); - if (tx_buffer == first) - break; - if (i == 0) - i = tx_ring->count; - i--; - } - - tx_ring->next_to_use = i; - - return -1; -} - -static void ixgbe_atr(struct ixgbe_ring *ring, - struct ixgbe_tx_buffer *first) -{ - struct ixgbe_q_vector *q_vector = ring->q_vector; - union ixgbe_atr_hash_dword input = { .dword = 0 }; - union ixgbe_atr_hash_dword common = { .dword = 0 }; - union { - unsigned char *network; - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - } hdr; - struct tcphdr *th; -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) - struct sk_buff *skb; -#else -#define IXGBE_NO_VXLAN -#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ -#ifdef IXGBE_NO_VXLAN - unsigned int hlen; -#endif /* IXGBE_NO_VXLAN */ - __be16 vlan_id; - - /* if ring doesn't have a interrupt vector, cannot perform ATR */ - if (!q_vector) - return; - - /* do nothing if sampling is disabled */ - if (!ring->atr_sample_rate) - return; - - ring->atr_count++; - - /* snag network header to get L4 type and address */ -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) - skb = first->skb; - hdr.network = skb_network_header(skb); - th = tcp_hdr(skb); - - if (skb->encapsulation && - first->protocol == htons(ETH_P_IP) && - hdr.ipv4->protocol == IPPROTO_UDP) { - struct ixgbe_adapter *adapter = q_vector->adapter; - - /* verify the port is recognized as VXLAN or GENEVE*/ - if (adapter->vxlan_port && - udp_hdr(skb)->dest == adapter->vxlan_port) { - hdr.network = skb_inner_network_header(skb); - th = inner_tcp_hdr(skb); - } - -#ifdef HAVE_UDP_ENC_RX_OFFLOAD - if (adapter->geneve_port && - udp_hdr(skb)->dest == adapter->geneve_port) { - hdr.network = skb_inner_network_header(skb); - th = inner_tcp_hdr(skb); - } -#endif - } - - /* Currently only IPv4/IPv6 with TCP is supported */ - switch (hdr.ipv4->version) { - case IPVERSION: - if (hdr.ipv4->protocol != IPPROTO_TCP) - return; - break; - case 6: - if (likely((unsigned char *)th - hdr.network == - sizeof(struct ipv6hdr))) { - if (hdr.ipv6->nexthdr != IPPROTO_TCP) - return; - } else { - __be16 frag_off; - u8 l4_hdr; - - ipv6_skip_exthdr(skb, hdr.network - skb->data + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - return; - if (l4_hdr != IPPROTO_TCP) - return; - } - break; - default: - return; - } - -#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ -#ifdef IXGBE_NO_VXLAN - hdr.network = skb_network_header(first->skb); - /* Currently only IPv4/IPv6 with TCP is supported */ - if (first->protocol == htons(ETH_P_IP)) { - if (hdr.ipv4->protocol != IPPROTO_TCP) - return; - - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - } else if (first->protocol == htons(ETH_P_IPV6)) { - if (hdr.ipv6->nexthdr != IPPROTO_TCP) - return; - - hlen = sizeof(struct ipv6hdr); - } else { - return; - } - - th = (struct tcphdr *)(hdr.network + hlen); - -#endif /* IXGBE_NO_VXLAN */ - /* skip this packet since it is invalid or the socket is closing */ - if (!th || th->fin) - return; - - /* sample on all syn packets or once every atr sample count */ - if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) - return; - - /* reset sample count */ - ring->atr_count = 0; - - vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); - - /* - * src and dst are inverted, think how the receiver sees them - * - * The input is broken into two sections, a non-compressed section - * containing vm_pool, vlan_id, and flow_type. The rest of the data - * is XORed together and stored in the compressed dword. - */ - input.formatted.vlan_id = vlan_id; - - /* - * since src port and flex bytes occupy the same word XOR them together - * and write the value to source port portion of compressed dword - */ - if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) - common.port.src ^= th->dest ^ htons(ETH_P_8021Q); - else - common.port.src ^= th->dest ^ first->protocol; - common.port.dst ^= th->source; - -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) - switch (hdr.ipv4->version) { - case IPVERSION: - input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; - common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; - break; - case 6: - input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; - common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ - hdr.ipv6->saddr.s6_addr32[1] ^ - hdr.ipv6->saddr.s6_addr32[2] ^ - hdr.ipv6->saddr.s6_addr32[3] ^ - hdr.ipv6->daddr.s6_addr32[0] ^ - hdr.ipv6->daddr.s6_addr32[1] ^ - hdr.ipv6->daddr.s6_addr32[2] ^ - hdr.ipv6->daddr.s6_addr32[3]; - break; - default: - break; - } - - if (hdr.network != skb_network_header(skb)) - input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; -#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ -#ifdef IXGBE_NO_VXLAN - if (first->protocol == htons(ETH_P_IP)) { - input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; - common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; - } else { - input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; - common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ - hdr.ipv6->saddr.s6_addr32[1] ^ - hdr.ipv6->saddr.s6_addr32[2] ^ - hdr.ipv6->saddr.s6_addr32[3] ^ - hdr.ipv6->daddr.s6_addr32[0] ^ - hdr.ipv6->daddr.s6_addr32[1] ^ - hdr.ipv6->daddr.s6_addr32[2] ^ - hdr.ipv6->daddr.s6_addr32[3]; - } -#endif /* IXGBE_NO_VXLAN */ - - /* This assumes the Rx queue and Tx queue are bound to the same CPU */ - ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, - input, common, ring->queue_index); -} - -#ifdef HAVE_NETDEV_SELECT_QUEUE -#if IS_ENABLED(CONFIG_FCOE) -#if defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) -static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - __always_unused void *accel, - select_queue_fallback_t fallback) -#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) -static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - __always_unused void *accel) -#else -static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) -#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL */ -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_ring_feature *f; - int txq; - - /* - * only execute the code below if protocol is FCoE - * or FIP and we have FCoE enabled on the adapter - */ - switch (vlan_get_protocol(skb)) { - case __constant_htons(ETH_P_FCOE): - case __constant_htons(ETH_P_FIP): - adapter = netdev_priv(dev); - - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - break; - /* fall through */ - default: -#ifdef HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK - return fallback(dev, skb); -#else - return __netdev_pick_tx(dev, skb); -#endif - } - - f = &adapter->ring_feature[RING_F_FCOE]; - - txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : - smp_processor_id(); - - while (txq >= f->indices) - txq -= f->indices; - - return txq + f->offset; -} -#endif /* CONFIG_FCOE */ -#endif /* HAVE_NETDEV_SELECT_QUEUE */ - -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, - struct ixgbe_adapter __maybe_unused *adapter, - struct ixgbe_ring *tx_ring) -{ - struct ixgbe_tx_buffer *first; - int tso; - u32 tx_flags = 0; - unsigned short f; - u16 count = TXD_USE_COUNT(skb_headlen(skb)); - __be16 protocol = skb->protocol; - u8 hdr_len = 0; - - /* - * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, - * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, - * + 2 desc gap to keep tail from touching head, - * + 1 desc for context descriptor, - * otherwise try next time - */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - - if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { - tx_ring->tx_stats.tx_busy++; - return NETDEV_TX_BUSY; - } - - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; - first->skb = skb; - first->bytecount = skb->len; - first->gso_segs = 1; - - /* if we have a HW VLAN tag being added default to the HW one */ - if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; - /* else if it is a SW VLAN check the next protocol and store the tag */ - } else if (protocol == htons(ETH_P_8021Q)) { - struct vlan_hdr *vhdr, _vhdr; - vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); - if (!vhdr) - goto out_drop; - - protocol = vhdr->h_vlan_encapsulated_proto; - tx_flags |= ntohs(vhdr->h_vlan_TCI) << - IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; - } - - skb_tx_timestamp(skb); - -#ifdef HAVE_PTP_1588_CLOCK -#ifdef SKB_SHARED_TX_IS_UNION - if (unlikely(skb_tx(skb)->hardware) && - adapter->ptp_clock) { - if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, - &adapter->state)) { - skb_tx(skb)->in_progress = 1; -#else - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - adapter->ptp_clock) { - if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, - &adapter->state)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; -#endif - tx_flags |= IXGBE_TX_FLAGS_TSTAMP; - - /* schedule check for Tx timestamp */ - adapter->ptp_tx_skb = skb_get(skb); - adapter->ptp_tx_start = jiffies; - schedule_work(&adapter->ptp_tx_work); - } else { - adapter->tx_hwtstamp_skipped++; - } - } - -#endif -#ifdef CONFIG_PCI_IOV - /* - * Use the l2switch_enable flag - would be false if the DMA - * Tx switch had been disabled. - */ - if (adapter->flags & IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE) - tx_flags |= IXGBE_TX_FLAGS_CC; - -#endif -#ifdef HAVE_TX_MQ - if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || - (skb->priority != TC_PRIO_CONTROL))) { - tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; -#if IS_ENABLED(CONFIG_FCOE) - /* for FCoE with DCB, we force the priority to what - * was specified by the switch */ - if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - ((protocol == htons(ETH_P_FCOE)) || - (protocol == htons(ETH_P_FIP)))) - tx_flags |= adapter->fcoe.up << - IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; - else -#endif /* CONFIG_FCOE */ - tx_flags |= skb->priority << - IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; - if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { - struct vlan_ethhdr *vhdr; - if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) - goto out_drop; - vhdr = (struct vlan_ethhdr *)skb->data; - vhdr->h_vlan_TCI = htons(tx_flags >> - IXGBE_TX_FLAGS_VLAN_SHIFT); - } else { - tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; - } - } - -#endif /* HAVE_TX_MQ */ - /* record initial flags and protocol */ - first->tx_flags = tx_flags; - first->protocol = protocol; - -#if IS_ENABLED(CONFIG_FCOE) - /* setup tx offload for FCoE */ - if ((protocol == htons(ETH_P_FCOE)) && - (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { - tso = ixgbe_fso(tx_ring, first, &hdr_len); - if (tso < 0) - goto out_drop; - - goto xmit_fcoe; - } else if (protocol == htons(ETH_P_FIP)) { - /* FCoE stack has a bug where it does not set the network - * header offset for FIP frames sent resulting into MACLEN - * being set to ZERO in the Tx context descriptor. - * This will cause MDD events when trying to Tx such frames. - */ - if (!skb_network_offset(skb)) { - if (tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | - IXGBE_TX_FLAGS_SW_VLAN)) - skb_set_network_header(skb, - sizeof(struct ethhdr) + - sizeof(struct vlan_hdr)); - else - skb_set_network_header(skb, - sizeof(struct ethhdr)); - } - } -#endif /* CONFIG_FCOE */ - - tso = ixgbe_tso(tx_ring, first, &hdr_len); - if (tso < 0) - goto out_drop; - else if (!tso) - ixgbe_tx_csum(tx_ring, first); - - /* add the ATR filter if ATR is on */ - if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) - ixgbe_atr(tx_ring, first); - -#if IS_ENABLED(CONFIG_FCOE) -xmit_fcoe: -#endif /* CONFIG_FCOE */ -#ifdef HAVE_PTP_1588_CLOCK - if (ixgbe_tx_map(tx_ring, first, hdr_len)) - goto cleanup_tx_tstamp; -#else - ixgbe_tx_map(tx_ring, first, hdr_len); -#endif - - return NETDEV_TX_OK; - -out_drop: - dev_kfree_skb_any(first->skb); - first->skb = NULL; -#ifdef HAVE_PTP_1588_CLOCK -cleanup_tx_tstamp: - if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - cancel_work_sync(&adapter->ptp_tx_work); - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - } -#endif - - return NETDEV_TX_OK; -} - -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *tx_ring; -#ifdef HAVE_TX_MQ - unsigned int r_idx = skb->queue_mapping; -#endif - - if (!netif_carrier_ok(netdev)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - /* - * The minimum packet size for olinfo paylen is 17 so pad the skb - * in order to meet this minimum size requirement. - */ - if (skb_put_padto(skb, 17)) - return NETDEV_TX_OK; - -#ifdef HAVE_TX_MQ - if (r_idx >= adapter->num_tx_queues) - r_idx = r_idx % adapter->num_tx_queues; - tx_ring = adapter->tx_ring[r_idx]; -#else - tx_ring = adapter->tx_ring[0]; -#endif - return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); -} - -/** - * ixgbe_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int ixgbe_set_mac(struct net_device *netdev, void *p) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - - ixgbe_mac_set_default_filter(adapter); - - return 0; -} - -#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) -/** - * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding - * netdev->dev_addr_list - * @netdev: network interface device structure - * - * Returns non-zero on failure - **/ -static int ixgbe_add_sanmac_netdev(struct net_device *dev) -{ - int err = IXGBE_SUCCESS; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - - if (is_valid_ether_addr(hw->mac.san_addr)) { - rtnl_lock(); - err = dev_addr_add(dev, hw->mac.san_addr, - NETDEV_HW_ADDR_T_SAN); - rtnl_unlock(); - - /* update SAN MAC vmdq pool selection */ - hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); - } - return err; -} - -/** - * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding - * netdev->dev_addr_list - * @netdev: network interface device structure - * - * Returns non-zero on failure - **/ -static int ixgbe_del_sanmac_netdev(struct net_device *dev) -{ - int err = IXGBE_SUCCESS; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_mac_info *mac = &adapter->hw.mac; - - if (is_valid_ether_addr(mac->san_addr)) { - rtnl_lock(); - err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); - rtnl_unlock(); - } - return err; -} - -#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */ - -static int ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, - u16 addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u16 value; - int rc; - - if (prtad != hw->phy.addr) - return -EINVAL; - rc = hw->phy.ops.read_reg(hw, addr, devad, &value); - if (!rc) - rc = value; - return rc; -} - -static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, - u16 addr, u16 value) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (prtad != hw->phy.addr) - return -EINVAL; - return hw->phy.ops.write_reg(hw, addr, devad, value); -} - -static int ixgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, - int cmd) -{ - struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; - int prtad, devad, ret; - - prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; - devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); - - if (cmd == SIOCGMIIREG) { - ret = ixgbe_mdio_read(netdev, prtad, devad, mii->reg_num); - if (ret < 0) - return ret; - mii->val_out = ret; - return 0; - } else { - return ixgbe_mdio_write(netdev, prtad, devad, mii->reg_num, - mii->val_in); - } -} - -static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ -#ifdef HAVE_PTP_1588_CLOCK - struct ixgbe_adapter *adapter = netdev_priv(netdev); - -#endif - switch (cmd) { -#ifdef HAVE_PTP_1588_CLOCK -#ifdef SIOCGHWTSTAMP - case SIOCGHWTSTAMP: - return ixgbe_ptp_get_ts_config(adapter, ifr); -#endif - case SIOCSHWTSTAMP: - return ixgbe_ptp_set_ts_config(adapter, ifr); -#endif -#ifdef ETHTOOL_OPS_COMPAT - case SIOCETHTOOL: - return ethtool_ioctl(ifr); -#endif - case SIOCGMIIREG: - case SIOCSMIIREG: - return ixgbe_mii_ioctl(netdev, ifr, cmd); - default: - return -EOPNOTSUPP; - } -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void ixgbe_netpoll(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int i; - for (i = 0; i < adapter->num_q_vectors; i++) { - adapter->q_vector[i]->netpoll_rx = true; - ixgbe_msix_clean_rings(0, adapter->q_vector[i]); - adapter->q_vector[i]->netpoll_rx = false; - } - } else { - ixgbe_intr(0, adapter); - } -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - -/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. - * @adapter: pointer to ixgbe_adapter - * @tc: number of traffic classes currently enabled - * - * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm - * 802.1Q priority maps to a packet buffer that exists. - */ -static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg, rsave; - int i; - - /* 82598 have a static priority to TC mapping that can not - * be changed so no validation is needed. - */ - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); - rsave = reg; - - for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { - u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); - - /* If up2tc is out of bounds default to zero */ - if (up2tc > tc) - reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); - } - - if (reg != rsave) - IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); - - return; -} - -/** - * ixgbe_set_prio_tc_map - Configure netdev prio tc map - * @adapter: Pointer to adapter struct - * - * Populate the netdev user priority to tc map - */ -static void ixgbe_set_prio_tc_map(struct ixgbe_adapter __maybe_unused *adapter) -{ -#ifdef HAVE_DCBNL_IEEE - struct net_device *dev = adapter->netdev; - struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; - struct ieee_ets *ets = adapter->ixgbe_ieee_ets; - u8 prio; - - for (prio = 0; prio < IXGBE_DCB_MAX_USER_PRIORITY; prio++) { - u8 tc = 0; - - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) - tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); - else if (ets) - tc = ets->prio_tc[prio]; - - netdev_set_prio_tc_map(dev, prio, tc); - } -#endif -} - -#ifdef NETIF_F_HW_TC -static int -__ixgbe_setup_tc(struct net_device *dev, __always_unused u32 handle, - __always_unused __be16 proto, struct tc_to_netdev *tc) -{ - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; - -#ifdef TC_MQPRIO_HW_OFFLOAD_MAX - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - return ixgbe_setup_tc(dev, tc->mqprio->num_tc); -#else - return ixgbe_setup_tc(dev, tc->tc); -#endif -} -#endif /* NETIF_F_HW_TC */ - -/** - * ixgbe_setup_tc - routine to configure net_device for multiple traffic - * classes. - * - * @netdev: net device to configure - * @tc: number of traffic classes to enable - */ -int ixgbe_setup_tc(struct net_device *dev, u8 tc) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - - /* Hardware supports up to 8 traffic classes */ - if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) - return -EINVAL; - - if (tc && hw->mac.type == ixgbe_mac_82598EB && - tc < IXGBE_DCB_MAX_TRAFFIC_CLASS) - return -EINVAL; - - /* Hardware has to reinitialize queues and interrupts to - * match packet buffer alignment. Unfortunately, the - * hardware is not flexible enough to do this dynamically. - */ - if (netif_running(dev)) - ixgbe_close(dev); - else - ixgbe_reset(adapter); - - ixgbe_clear_interrupt_scheme(adapter); - - if (tc) { - netdev_set_num_tc(dev, tc); - ixgbe_set_prio_tc_map(adapter); - - adapter->flags |= IXGBE_FLAG_DCB_ENABLED; - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - adapter->last_lfc_mode = adapter->hw.fc.requested_mode; - adapter->hw.fc.requested_mode = ixgbe_fc_none; - } - } else { - netdev_reset_tc(dev); - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; - - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - - adapter->temp_dcb_cfg.pfc_mode_enable = false; - adapter->dcb_cfg.pfc_mode_enable = false; - } - - ixgbe_validate_rtr(adapter, tc); - - ixgbe_init_interrupt_scheme(adapter); - if (netif_running(dev)) - ixgbe_open(dev); - - return 0; -} - -#ifdef CONFIG_PCI_IOV -void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - rtnl_lock(); - ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); - rtnl_unlock(); -} -#endif - -void ixgbe_do_reset(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); -} - -#ifdef HAVE_NDO_SET_FEATURES -#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -static u32 ixgbe_fix_features(struct net_device *netdev, u32 features) -#else -static netdev_features_t ixgbe_fix_features(struct net_device *netdev, - netdev_features_t features) -#endif -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - -#if IS_ENABLED(CONFIG_DCB) - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) -#ifdef NETIF_F_HW_VLAN_CTAG_RX - features |= NETIF_F_HW_VLAN_CTAG_RX; -#else - features |= NETIF_F_HW_VLAN_RX; -#endif -#endif /* CONFIG_DCB */ - - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ - if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_LRO; - - /* Turn off LRO if not RSC capable */ - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) - features &= ~NETIF_F_LRO; - - return features; -} - -#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -static int ixgbe_set_features(struct net_device *netdev, u32 features) -#else -static int ixgbe_set_features(struct net_device *netdev, - netdev_features_t features) -#endif -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - bool need_reset = false; - netdev_features_t changed = netdev->features ^ features; - - /* Make sure RSC matches LRO, reset if change */ - if (!(features & NETIF_F_LRO)) { - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - need_reset = true; - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; - } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && - !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { - if (adapter->rx_itr_setting == 1 || - adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - need_reset = true; - } else if (changed & NETIF_F_LRO) { - e_info(probe, "rx-usecs set too low, " - "disabling RSC\n"); - } - } - - /* - * Check if Flow Director n-tuple support was enabled or disabled. If - * the state changed, we need to reset. - */ - switch (features & NETIF_F_NTUPLE) { - case NETIF_F_NTUPLE: - /* turn off ATR, enable perfect filters and reset */ - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - need_reset = true; - - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - break; - default: - /* turn off perfect filters, enable ATR and reset */ - if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) - need_reset = true; - - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - - /* We cannot enable ATR if VMDq is enabled */ - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) - break; - - /* We cannot enable ATR if we have 2 or more traffic classes */ - if (netdev_get_num_tc(netdev) > 1) - break; - - /* We cannot enable ATR if RSS is disabled */ - if (adapter->ring_feature[RING_F_RSS].limit <= 1) - break; - - /* A sample rate of 0 indicates ATR disabled */ - if (!adapter->atr_sample_rate) - break; - - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - break; - } - - netdev->features = features; - -#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) - if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && - features & NETIF_F_RXCSUM) { - if (!need_reset) - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } -#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ - -#ifdef HAVE_UDP_ENC_RX_OFFLOAD - if (adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE && - features & NETIF_F_RXCSUM) { - if (!need_reset) - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } -#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ - if (need_reset) - ixgbe_do_reset(netdev); -#ifdef NETIF_F_HW_VLAN_CTAG_FILTER - else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER)) - ixgbe_set_rx_mode(netdev); -#endif -#ifdef NETIF_F_HW_VLAN_FILTER - else if (changed & (NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER)) - ixgbe_set_rx_mode(netdev); -#endif - return 0; - -} -#endif /* HAVE_NDO_SET_FEATURES */ - -#ifdef HAVE_UDP_ENC_RX_OFFLOAD -/** - * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports - * @dev: The port's netdev - * @ti: Tunnel endpoint information - **/ -static void ixgbe_add_udp_tunnel_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - __be16 port = ti->port; - u32 port_shift = 0; - u32 reg; - - if (ti->sa_family != AF_INET) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; - - if (adapter->vxlan_port == port) - return; - - if (adapter->vxlan_port) { - netdev_info(dev, - "VXLAN port %d set, not adding port %d\n", - ntohs(adapter->vxlan_port), - ntohs(port)); - return; - } - - adapter->vxlan_port = port; - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - if (adapter->geneve_port == port) - return; - - if (adapter->geneve_port) { - netdev_info(dev, - "GENEVE port %d set, not adding port %d\n", - ntohs(adapter->geneve_port), - ntohs(port)); - return; - } - - port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; - adapter->geneve_port = port; - break; - default: - return; - } - - reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); -} - -/** - * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports - * @dev: The port's netdev - * @ti: Tunnel endpoint information - **/ -static void ixgbe_del_udp_tunnel_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - u32 port_mask; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN && - ti->type != UDP_TUNNEL_TYPE_GENEVE) - return; - - if (ti->sa_family != AF_INET) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; - - if (adapter->vxlan_port != ti->port) { - netdev_info(dev, "VXLAN port %d not found\n", - ntohs(ti->port)); - return; - } - - port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - if (adapter->geneve_port != ti->port) { - netdev_info(dev, "GENEVE port %d not found\n", - ntohs(ti->port)); - return; - } - - port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - break; - default: - return; - } - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; -} -#elif defined(HAVE_VXLAN_RX_OFFLOAD) -/** - * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up - * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifiying us about - * @port: New UDP port number that VXLAN started listening to - * @type: Enumerated type specifying UDP tunnel type - */ -static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - - if (sa_family != AF_INET) - return; - - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) - return; - - if (adapter->vxlan_port == port) - return; - - if (adapter->vxlan_port) { - netdev_info(dev, - "Hit Max num of VXLAN ports, not adding port %d\n", - ntohs(port)); - return; - } - - adapter->vxlan_port = port; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port)); -} - -/** - * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away - * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to - * @type: Enumerated type specifying UDP tunnel type - */ -static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) - return; - - if (sa_family != AF_INET) - return; - - if (adapter->vxlan_port != port) { - netdev_info(dev, "Port %d was not found, not deleting\n", - ntohs(port)); - return; - } - - ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; -} -#endif /* HAVE_VXLAN_RX_OFFLOAD */ - -#ifdef HAVE_NDO_GSO_CHECK -static bool -ixgbe_gso_check(struct sk_buff *skb, __always_unused struct net_device *dev) -{ - return vxlan_gso_check(skb); -} -#endif /* HAVE_NDO_GSO_CHECK */ - -#ifdef HAVE_FDB_OPS -#ifdef USE_CONST_DEV_UC_CHAR -static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, -#ifdef HAVE_NDO_FDB_ADD_VID - u16 vid, -#endif - u16 flags) -#else -static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, - struct net_device *dev, - unsigned char *addr, - u16 flags) -#endif /* USE_CONST_DEV_UC_CHAR */ -{ - /* guarantee we can provide a unique filter for the unicast address */ - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { - struct ixgbe_adapter *adapter = netdev_priv(dev); - u16 pool = VMDQ_P(0); - - if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) - return -ENOMEM; - } - -#ifdef USE_CONST_DEV_UC_CHAR -#ifdef HAVE_NDO_FDB_ADD_VID - return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); -#else - return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); -#endif /* HAVE_NDO_FDB_ADD_VID */ -#else - return ndo_dflt_fdb_add(ndm, dev, addr, flags); -#endif /* USE_CONST_DEV_UC_CHAR */ -} - -#ifdef HAVE_BRIDGE_ATTRIBS -#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS -static int ixgbe_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, - __always_unused u16 flags) -#else -static int ixgbe_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh) -#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */ -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct nlattr *attr, *br_spec; - int rem; - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return -EOPNOTSUPP; - - br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); - - nla_for_each_nested(attr, br_spec, rem) { - __u16 mode; - - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - - mode = nla_get_u16(attr); - if (mode == BRIDGE_MODE_VEPA) { - adapter->flags |= IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; - } else if (mode == BRIDGE_MODE_VEB) { - adapter->flags &= ~IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; - } else { - return -EINVAL; - } - - adapter->bridge_mode = mode; - - /* re-configure settings related to bridge mode */ - ixgbe_configure_bridge_mode(adapter); - - e_info(drv, "enabling bridge mode: %s\n", - mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); - } - - return 0; -} - -#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS -static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, - u32 __maybe_unused filter_mask, - int nlflags) -#elif defined(HAVE_BRIDGE_FILTER) -static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, - u32 __always_unused filter_mask) -#else -static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev) -#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - u16 mode; - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return 0; - - mode = adapter->bridge_mode; -#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, - filter_mask, NULL); -#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); -#elif defined(HAVE_NDO_FDB_ADD_VID) || \ - defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); -#else - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); -#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ -} -#endif /* HAVE_BRIDGE_ATTRIBS */ -#endif /* HAVE_FDB_OPS */ - -#ifdef HAVE_NDO_FEATURES_CHECK -#define IXGBE_MAX_TUNNEL_HDR_LEN 80 -#ifdef NETIF_F_GSO_PARTIAL -#define IXGBE_MAX_MAC_HDR_LEN 127 -#define IXGBE_MAX_NETWORK_HDR_LEN 511 - -static netdev_features_t -ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, - netdev_features_t features) -{ - unsigned int network_hdr_len, mac_hdr_len; - - /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; - if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) - return features & ~(NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_TSO | - NETIF_F_TSO6); - - network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); - if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) - return features & ~(NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_TSO | - NETIF_F_TSO6); - - /* We can only support IPV4 TSO in tunnels if we can mangle the - * inner IP ID field, so strip TSO if MANGLEID is not supported. - */ - if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) - features &= ~NETIF_F_TSO; - - return features; -} -#else -static netdev_features_t -ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, - netdev_features_t features) -{ - if (!skb->encapsulation) - return features; - - if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > - IXGBE_MAX_TUNNEL_HDR_LEN)) - return features & ~NETIF_F_CSUM_MASK; - - return features; -} -#endif /* NETIF_F_GSO_PARTIAL */ -#endif /* HAVE_NDO_FEATURES_CHECK */ - -#ifdef HAVE_NET_DEVICE_OPS -static const struct net_device_ops ixgbe_netdev_ops = { - .ndo_open = ixgbe_open, - .ndo_stop = ixgbe_close, - .ndo_start_xmit = ixgbe_xmit_frame, -#if IS_ENABLED(CONFIG_FCOE) - .ndo_select_queue = ixgbe_select_queue, -#else -#ifndef HAVE_MQPRIO - .ndo_select_queue = __netdev_pick_tx, -#endif -#endif - .ndo_set_rx_mode = ixgbe_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ixgbe_set_mac, - .ndo_change_mtu = ixgbe_change_mtu, - .ndo_tx_timeout = ixgbe_tx_timeout, -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) - .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, -#endif - .ndo_do_ioctl = ixgbe_ioctl, -#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT -/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the - * function get_ndo_ext to retrieve offsets for extended fields from with the - * net_device_ops struct and ndo_size is checked to determine whether or not - * the offset is valid. - */ - .ndo_size = sizeof(const struct net_device_ops), -#endif -#ifdef IFLA_VF_MAX - .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, -#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN - .extended.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, -#else - .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, -#endif -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE - .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, -#else - .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, -#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ -#if defined(HAVE_VF_SPOOFCHK_CONFIGURE) && IS_ENABLED(CONFIG_PCI_IOV) - .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, -#endif -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN - .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, -#endif -#ifdef HAVE_NDO_SET_VF_TRUST -#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT - .extended.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, -#else - .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, -#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ -#endif /* HAVE_NDO_SET_VF_TRUST */ - .ndo_get_vf_config = ixgbe_ndo_get_vf_config, -#endif /* IFLA_VF_MAX */ -#ifdef HAVE_NDO_GET_STATS64 - .ndo_get_stats64 = ixgbe_get_stats64, -#else - .ndo_get_stats = ixgbe_get_stats, -#endif /* HAVE_NDO_GET_STATS64 */ -#ifdef HAVE_SETUP_TC -#ifdef NETIF_F_HW_TC - .ndo_setup_tc = __ixgbe_setup_tc, -#else - .ndo_setup_tc = ixgbe_setup_tc, -#endif /* NETIF_F_HW_TC */ -#endif -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgbe_netpoll, -#endif -#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED -#ifdef HAVE_NDO_BUSY_POLL - .ndo_busy_poll = ixgbe_busy_poll_recv, -#endif /* HAVE_NDO_BUSY_POLL */ -#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ -#if IS_ENABLED(CONFIG_FCOE) - .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, -#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET - .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, -#endif - .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, -#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE - .ndo_fcoe_enable = ixgbe_fcoe_enable, - .ndo_fcoe_disable = ixgbe_fcoe_disable, -#endif -#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN - .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, -#endif -#endif /* CONFIG_FCOE */ -#ifdef HAVE_VLAN_RX_REGISTER - .ndo_vlan_rx_register = &ixgbe_vlan_mode, -#endif -#ifdef HAVE_FDB_OPS - .ndo_fdb_add = ixgbe_ndo_fdb_add, -#ifndef USE_DEFAULT_FDB_DEL_DUMP - .ndo_fdb_del = ndo_dflt_fdb_del, - .ndo_fdb_dump = ndo_dflt_fdb_dump, -#endif -#ifdef HAVE_BRIDGE_ATTRIBS - .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, - .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, -#endif /* HAVE_BRIDGE_ATTRIBS */ -#endif -#ifdef HAVE_UDP_ENC_RX_OFFLOAD -#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL - .extended.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, - .extended.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, -#else - .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, - .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, -#endif -#elif defined(HAVE_VXLAN_RX_OFFLOAD) - .ndo_add_vxlan_port = ixgbe_add_vxlan_port, - .ndo_del_vxlan_port = ixgbe_del_vxlan_port, -#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ -#ifdef HAVE_NDO_GSO_CHECK - .ndo_gso_check = ixgbe_gso_check, -#endif /* HAVE_NDO_GSO_CHECK */ -#ifdef HAVE_NDO_FEATURES_CHECK - .ndo_features_check = ixgbe_features_check, -#endif /* HAVE_NDO_FEATURES_CHECK */ -#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -}; - -/* RHEL6 keeps these operations in a separate structure */ -static const struct net_device_ops_ext ixgbe_netdev_ops_ext = { - .size = sizeof(struct net_device_ops_ext), -#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ -#ifdef HAVE_NDO_SET_FEATURES - .ndo_set_features = ixgbe_set_features, - .ndo_fix_features = ixgbe_fix_features, -#endif /* HAVE_NDO_SET_FEATURES */ -}; -#endif /* HAVE_NET_DEVICE_OPS */ - -void ixgbe_assign_netdev_ops(struct net_device *dev) -{ -#ifdef HAVE_NET_DEVICE_OPS - dev->netdev_ops = &ixgbe_netdev_ops; -#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT - set_netdev_ops_ext(dev, &ixgbe_netdev_ops_ext); -#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ -#else /* HAVE_NET_DEVICE_OPS */ - dev->open = &ixgbe_open; - dev->stop = &ixgbe_close; - dev->hard_start_xmit = &ixgbe_xmit_frame; - dev->get_stats = &ixgbe_get_stats; -#ifdef HAVE_SET_RX_MODE - dev->set_rx_mode = &ixgbe_set_rx_mode; -#endif - dev->set_multicast_list = &ixgbe_set_rx_mode; - dev->set_mac_address = &ixgbe_set_mac; - dev->change_mtu = &ixgbe_change_mtu; - dev->do_ioctl = &ixgbe_ioctl; -#ifdef HAVE_TX_TIMEOUT - dev->tx_timeout = &ixgbe_tx_timeout; -#endif -#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) - dev->vlan_rx_register = &ixgbe_vlan_mode; - dev->vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid; - dev->vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid; -#endif -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = &ixgbe_netpoll; -#endif -#ifdef HAVE_NETDEV_SELECT_QUEUE -#if IS_ENABLED(CONFIG_FCOE) - dev->select_queue = &ixgbe_select_queue; -#else - dev->select_queue = &__netdev_pick_tx; -#endif -#endif /* HAVE_NETDEV_SELECT_QUEUE */ -#endif /* HAVE_NET_DEVICE_OPS */ - -#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED -#ifdef HAVE_NDO_BUSY_POLL - netdev_extended(dev)->ndo_busy_poll = ixgbe_busy_poll_recv; -#endif /* HAVE_NDO_BUSY_POLL */ -#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ - - ixgbe_set_ethtool_ops(dev); - dev->watchdog_timeo = 5 * HZ; -} - -/** - * ixgbe_wol_supported - Check whether device supports WoL - * @adapter: the adapter private structure - * @device_id: the device ID - * @subdev_id: the subsystem device ID - * - * This function is used by probe and ethtool to determine - * which devices have WoL support - * - **/ -bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id) -{ - struct ixgbe_hw *hw = &adapter->hw; - u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - - /* WOL not supported on 82598 */ - if (hw->mac.type == ixgbe_mac_82598EB) - return false; - - /* check eeprom to see if WOL is enabled for X540 and newer */ - if (hw->mac.type >= ixgbe_mac_X540) { - if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) - return true; - } - - /* WOL is determined based on device IDs for 82599 MACs */ - switch (device_id) { - case IXGBE_DEV_ID_82599_SFP: - /* Only these subdevices could supports WOL */ - switch (subdevice_id) { - case IXGBE_SUBDEV_ID_82599_560FLR: - case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: - case IXGBE_SUBDEV_ID_82599_SFP_WOL0: - case IXGBE_SUBDEV_ID_82599_SFP_2OCP: - /* only support first port */ - if (hw->bus.func != 0) - break; - /* fall through */ - case IXGBE_SUBDEV_ID_82599_SP_560FLR: - case IXGBE_SUBDEV_ID_82599_SFP: - case IXGBE_SUBDEV_ID_82599_RNDC: - case IXGBE_SUBDEV_ID_82599_ECNA_DP: - case IXGBE_SUBDEV_ID_82599_SFP_1OCP: - case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: - case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: - return true; - } - break; - case IXGBE_DEV_ID_82599EN_SFP: - /* Only these subdevices support WoL */ - switch (subdevice_id) { - case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: - return true; - } - break; - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - /* All except this subdevice support WOL */ - if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) - return true; - break; - case IXGBE_DEV_ID_82599_KX4: - return true; - default: - break; - } - - return false; -} - -/** - * ixgbe_set_fw_version - Set FW version - * @adapter: the adapter private structure - * - * This function is used by probe and ethtool to determine the FW version to - * format to display. The FW version is taken from the EEPROM/NVM. - * - **/ -static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u16 eeprom_verh = 0, eeprom_verl = 0; - u16 offset = 0; - u32 etrack_id; - - /* Check for OEM Product Version block format */ - hw->eeprom.ops.read(hw, 0x1b, &offset); - - /* Make sure offset to OEM Product Version block is valid */ - if (!(offset == 0x0) && !(offset == 0xffff)) { - u16 mod_len = 0, cap = 0, prod_ver = 0, rel_num = 0; - u16 build, major, patch; - - /* Read product version block */ - hw->eeprom.ops.read(hw, offset, &mod_len); - hw->eeprom.ops.read(hw, offset + 0x1, &cap); - hw->eeprom.ops.read(hw, offset + 0x2, &prod_ver); - hw->eeprom.ops.read(hw, offset + 0x3, &rel_num); - - /* Only display OEM product version if valid block */ - if (mod_len == 0x3 && (cap & 0xf) == 0x0) { - major = prod_ver >> 8; - build = prod_ver & 0xff; - patch = rel_num; - - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), - "%x.%x.%x", major, build, patch); - return; - } - } - - /* - * Save off EEPROM version number and Option Rom version which - * together make a unique identify for the eeprom - */ - hw->eeprom.ops.read(hw, 0x2e, &eeprom_verh); - hw->eeprom.ops.read(hw, 0x2d, &eeprom_verl); - etrack_id = (eeprom_verh << 16) | eeprom_verl; - - /* Check for SCSI block version format */ - hw->eeprom.ops.read(hw, 0x17, &offset); - - /* Make sure offset to SCSI block is valid */ - if (!(offset == 0x0) && !(offset == 0xffff)) { - u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; - u16 build, major, patch; - - hw->eeprom.ops.read(hw, offset + 0x84, &eeprom_cfg_blkh); - hw->eeprom.ops.read(hw, offset + 0x83, &eeprom_cfg_blkl); - - /* Only display Option Rom if exist */ - if (eeprom_cfg_blkl && eeprom_cfg_blkh) { - major = eeprom_cfg_blkl >> 8; - build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); - patch = eeprom_cfg_blkh & 0x00ff; - - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), - "0x%08x, %d.%d.%d", etrack_id, major, build, - patch); - return; - } - } - - /* Set ETrack ID format */ - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), - "0x%08x", etrack_id); -} - -/** - * ixgbe_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in ixgbe_pci_tbl - * - * Returns 0 on success, negative on failure - * - * ixgbe_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int __devinit ixgbe_probe(struct pci_dev *pdev, - const struct pci_device_id __always_unused *ent) -{ - struct net_device *netdev; - struct ixgbe_adapter *adapter = NULL; - struct ixgbe_hw *hw = NULL; - static int cards_found; - int err, pci_using_dac, expected_gts; - char *info_string, *i_s_var; - u8 part_str[IXGBE_PBANUM_LENGTH]; - enum ixgbe_mac_type mac_type = ixgbe_mac_unknown; -#ifdef HAVE_TX_MQ - unsigned int indices = MAX_TX_QUEUES; -#endif /* HAVE_TX_MQ */ - bool disable_dev = false; -#if IS_ENABLED(CONFIG_FCOE) - u16 device_caps; -#endif -#ifndef NETIF_F_GSO_PARTIAL -#ifdef HAVE_NDO_SET_FEATURES -#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT - netdev_features_t hw_features; -#else - u32 hw_features; -#endif -#endif -#endif /* NETIF_F_GSO_PARTIAL */ - - err = pci_enable_device_mem(pdev); - if (err) - return err; - - if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { - pci_using_dac = 1; - } else { - err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(pci_dev_to_dev(pdev), - DMA_BIT_MASK(32)); - if (err) { - dev_err(pci_dev_to_dev(pdev), "No usable DMA " - "configuration, aborting\n"); - goto err_dma; - } - } - pci_using_dac = 0; - } - - err = pci_request_mem_regions(pdev, ixgbe_driver_name); - if (err) { - dev_err(pci_dev_to_dev(pdev), - "pci_request_selected_regions failed 0x%x\n", err); - goto err_pci_reg; - } - - /* - * The mac_type is needed before we have the adapter is set up - * so rather than maintain two devID -> MAC tables we dummy up - * an ixgbe_hw stuct and use ixgbe_set_mac_type. - */ - hw = vmalloc(sizeof(struct ixgbe_hw)); - if (!hw) { - pr_info("Unable to allocate memory for early mac " - "check\n"); - } else { - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - ixgbe_set_mac_type(hw); - mac_type = hw->mac.type; - vfree(hw); - } - - /* - * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch - * port to which the 82598 is connected to prevent duplicate - * completions caused by LOs. We need the mac type so that we only - * do this on 82598 devices, ixgbe_set_mac_type does this for us if - * we set it's device ID. - */ - if (mac_type == ixgbe_mac_82598EB) - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); - - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - -#ifdef HAVE_TX_MQ - if (mac_type == ixgbe_mac_82598EB) { -#if IS_ENABLED(CONFIG_DCB) - indices = IXGBE_MAX_DCB_INDICES * 4; -#else /* CONFIG_DCB */ - indices = IXGBE_MAX_RSS_INDICES; -#endif /* !CONFIG_DCB */ - } - - netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); -#else /* HAVE_TX_MQ */ - netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); -#endif /* HAVE_TX_MQ */ - if (!netdev) { - err = -ENOMEM; - goto err_alloc_etherdev; - } - - SET_MODULE_OWNER(netdev); - SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); - - adapter = netdev_priv(netdev); -#ifdef HAVE_TX_MQ -#ifndef HAVE_NETDEV_SELECT_QUEUE - adapter->indices = indices; -#endif -#endif - adapter->netdev = netdev; - adapter->pdev = pdev; - hw = &adapter->hw; - hw->back = adapter; - adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - adapter->io_addr = hw->hw_addr; - if (!hw->hw_addr) { - err = -EIO; - goto err_ioremap; - } - - ixgbe_assign_netdev_ops(netdev); - - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - - adapter->bd_number = cards_found; - - ixgbe_get_hw_control(adapter); - /* setup the private structure */ - err = ixgbe_sw_init(adapter); - if (err) - goto err_sw_init; - - /* Make sure the SWFW semaphore is in a valid state */ - if (hw->mac.ops.init_swfw_sync) - hw->mac.ops.init_swfw_sync(hw); - - /* Make it possible the adapter to be woken up via WOL */ - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); - break; - default: - break; - } - - /* - * If we have a fan, this is as early we know, warn if we - * have had a failure. - */ - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { - u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (esdp & IXGBE_ESDP_SDP1) - e_crit(probe, "Fan has stopped, replace the adapter\n"); - } - - /* - * check_options must be called before setup_link to set up - * hw->fc completely - */ - ixgbe_check_options(adapter); - - /* reset_hw fills in the perm_addr as well */ - hw->phy.reset_if_overtemp = true; - err = hw->mac.ops.reset_hw(hw); - hw->phy.reset_if_overtemp = false; - if (err == IXGBE_ERR_SFP_NOT_PRESENT) { - err = IXGBE_SUCCESS; - } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - e_dev_err("failed to load because an unsupported SFP+ or QSFP " - "module type was detected.\n"); - e_dev_err("Reload the driver after installing a supported " - "module.\n"); - goto err_sw_init; - } else if (err) { - e_dev_err("HW Init failed: %d\n", err); - goto err_sw_init; - } - -#ifdef CONFIG_PCI_IOV -#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE) - if (adapter->max_vfs > 0) { - e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated.\n"); - e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); - e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x/sriov_numvfs\n", - adapter->max_vfs, - pci_domain_nr(pdev->bus), - pdev->bus->number, - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn) - ); - } - -#endif - if (adapter->flags & IXGBE_FLAG_SRIOV_CAPABLE) { - pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); - ixgbe_enable_sriov(adapter); - } - -#endif /* CONFIG_PCI_IOV */ - -#ifdef NETIF_F_GSO_PARTIAL - netdev->features = NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXHASH | - NETIF_F_RXCSUM | - NETIF_F_HW_CSUM; - - netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; - netdev->features |= NETIF_F_GSO_PARTIAL | - IXGBE_GSO_PARTIAL_FEATURES; - - if (hw->mac.type >= ixgbe_mac_82599EB) - netdev->features |= NETIF_F_SCTP_CRC; - - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_RXALL | - NETIF_F_HW_L2FW_DOFFLOAD; - - if (hw->mac.type >= ixgbe_mac_82599EB) - netdev->hw_features |= NETIF_F_NTUPLE | - NETIF_F_HW_TC; - - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; - - netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; - netdev->hw_enc_features |= netdev->vlan_features; - netdev->mpls_features |= NETIF_F_HW_CSUM; - - /* set this bit last since it cannot be part of vlan_features */ - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX; - - netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->priv_flags |= IFF_SUPP_NOFCS; - - /* give us the option of enabling RSC/LRO later */ - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) - netdev->hw_features |= NETIF_F_LRO; - -#else /* NETIF_F_GSO_PARTIAL */ - netdev->features |= NETIF_F_SG | - NETIF_F_IP_CSUM; - -#ifdef NETIF_F_IPV6_CSUM - netdev->features |= NETIF_F_IPV6_CSUM; -#endif - -#ifdef NETIF_F_HW_VLAN_CTAG_TX - netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX; -#endif - -#ifdef NETIF_F_HW_VLAN_TX - netdev->features |= NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_FILTER | - NETIF_F_HW_VLAN_RX; -#endif - netdev->features |= ixgbe_tso_features(); -#ifdef NETIF_F_RXHASH - netdev->features |= NETIF_F_RXHASH; -#endif /* NETIF_F_RXHASH */ - netdev->features |= NETIF_F_RXCSUM; - -#ifdef HAVE_NDO_SET_FEATURES - /* copy netdev features into list of user selectable features */ -#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT - hw_features = netdev->hw_features; -#else - hw_features = get_netdev_hw_features(netdev); -#endif - hw_features |= netdev->features; - - /* give us the option of enabling RSC/LRO later */ - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) - hw_features |= NETIF_F_LRO; - -#else -#ifdef NETIF_F_GRO - - /* this is only needed on kernels prior to 2.6.39 */ - netdev->features |= NETIF_F_GRO; -#endif /* NETIF_F_GRO */ -#endif /* HAVE_NDO_SET_FEATURES */ - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - netdev->features |= NETIF_F_SCTP_CSUM; -#ifdef HAVE_NDO_SET_FEATURES - hw_features |= NETIF_F_SCTP_CSUM | - NETIF_F_NTUPLE; -#endif - break; - default: - break; - } -#ifdef HAVE_NDO_SET_FEATURES -#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT - set_netdev_hw_features(netdev, hw_features); -#else - netdev->hw_features = hw_features; -#endif -#endif - -#ifdef HAVE_NETDEV_VLAN_FEATURES - netdev->vlan_features |= NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6; - -#endif /* HAVE_NETDEV_VLAN_FEATURES */ -#ifdef HAVE_ENCAP_CSUM_OFFLOAD - netdev->hw_enc_features |= NETIF_F_SG; -#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ -#ifdef HAVE_VXLAN_RX_OFFLOAD - if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE) { - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; - } -#endif /* NETIF_F_GSO_PARTIAL */ - -#endif /* HAVE_VXLAN_RX_OFFLOAD */ - if (netdev->features & NETIF_F_LRO) { - if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && - ((adapter->rx_itr_setting == 1) || - (adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR))) { - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - } else if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { - e_dev_info("InterruptThrottleRate set too high, " - "disabling RSC\n"); - } - } -#ifdef IFF_UNICAST_FLT - netdev->priv_flags |= IFF_UNICAST_FLT; -#endif -#ifdef IFF_SUPP_NOFCS - netdev->priv_flags |= IFF_SUPP_NOFCS; -#endif - -#ifdef HAVE_NETDEVICE_MIN_MAX_MTU - /* MTU range: 68 - 9710 */ - netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); - -#endif -#if IS_ENABLED(CONFIG_DCB) - if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) - netdev->dcbnl_ops = &ixgbe_dcbnl_ops; - -#endif /* CONFIG_DCB */ -#if IS_ENABLED(CONFIG_FCOE) -#ifdef NETIF_F_FSO - if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { - unsigned int fcoe_l; - - hw->mac.ops.get_device_caps(hw, &device_caps); - if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) { - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; - e_dev_info("FCoE offload feature is not available. " - "Disabling FCoE offload feature\n"); - } else { - netdev->features |= NETIF_F_FSO | - NETIF_F_FCOE_CRC; -#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE - ixgbe_fcoe_ddp_enable(adapter); - adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - netdev->features |= NETIF_F_FCOE_MTU; -#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ - } - - fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); - adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; - -#ifdef HAVE_NETDEV_VLAN_FEATURES - netdev->vlan_features |= NETIF_F_FSO | - NETIF_F_FCOE_CRC | - NETIF_F_FCOE_MTU; -#endif /* HAVE_NETDEV_VLAN_FEATURES */ - } -#endif /* NETIF_F_FSO */ -#endif /* CONFIG_FCOE */ - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; -#ifdef HAVE_NETDEV_VLAN_FEATURES - netdev->vlan_features |= NETIF_F_HIGHDMA; -#endif /* HAVE_NETDEV_VLAN_FEATURES */ - } - - /* make sure the EEPROM is good */ - if (hw->eeprom.ops.validate_checksum && - (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) { - e_dev_err("The EEPROM Checksum Is Not Valid\n"); - err = -EIO; - goto err_sw_init; - } - - memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); -#ifdef ETHTOOL_GPERMADDR - memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); -#endif - - if (!is_valid_ether_addr(netdev->dev_addr)) { - e_dev_err("invalid MAC address\n"); - err = -EIO; - goto err_sw_init; - } - - /* Set hw->mac.addr to permanent MAC address */ - ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); - ixgbe_mac_set_default_filter(adapter); - - setup_timer(&adapter->service_timer, &ixgbe_service_timer, - (unsigned long) adapter); - - if (IXGBE_REMOVED(hw->hw_addr)) { - err = -EIO; - goto err_sw_init; - } - INIT_WORK(&adapter->service_task, ixgbe_service_task); - set_bit(__IXGBE_SERVICE_INITED, &adapter->state); - clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); - - err = ixgbe_init_interrupt_scheme(adapter); - if (err) - goto err_sw_init; - - /* WOL not supported for all devices */ - adapter->wol = 0; - hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); - if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device)) - adapter->wol = IXGBE_WUFC_MAG; - - hw->wol_enabled = !!(adapter->wol); - - device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); - - ixgbe_set_fw_version(adapter); - - /* reset the hardware with the new settings */ - err = hw->mac.ops.start_hw(hw); - if (err == IXGBE_ERR_EEPROM_VERSION) { - /* We are running on a pre-production device, log a warning */ - e_dev_warn("This device is a pre-production adapter/LOM. " - "Please be aware there may be issues associated " - "with your hardware. If you are experiencing " - "problems please contact your Intel or hardware " - "representative who provided you with this " - "hardware.\n"); - } else if (err == IXGBE_ERR_OVERTEMP) { - e_crit(drv, "%s\n", ixgbe_overheat_msg); - goto err_register; - } else if (err) { - e_dev_err("HW init failed\n"); - goto err_register; - } - - /* pick up the PCI bus settings for reporting later */ - if (ixgbe_pcie_from_parent(hw)) - ixgbe_get_parent_bus_info(hw); - else - if (hw->mac.ops.get_bus_info) - hw->mac.ops.get_bus_info(hw); - - if(!strcmp("0000:03:00.0", pci_name(pdev))) - strcpy(netdev->name, "eth0"); - else if(!strcmp("0000:03:00.1", pci_name(pdev))) - strcpy(netdev->name, "eth1"); - else if(!strcmp("0000:02:00.0", pci_name(pdev))) - strcpy(netdev->name, "eth2"); - else if(!strcmp("0000:02:00.1", pci_name(pdev))) - strcpy(netdev->name, "eth3"); - - err = register_netdev(netdev); - if (err) - goto err_register; - - pci_set_drvdata(pdev, adapter); - adapter->netdev_registered = true; -#ifdef HAVE_PCI_ERS - /* - * call save state here in standalone driver because it relies on - * adapter struct to exist, and needs to call netdev_priv - */ - pci_save_state(pdev); - -#endif - - /* power down the optics for 82599 SFP+ fiber */ - if (hw->mac.ops.disable_tx_laser) - hw->mac.ops.disable_tx_laser(hw); - - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - /* keep stopping all the transmit queues for older kernels */ - netif_tx_stop_all_queues(netdev); - -#if IS_ENABLED(CONFIG_DCA) - if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) { - err = dca_add_requester(pci_dev_to_dev(pdev)); - switch (err) { - case IXGBE_SUCCESS: - adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - ixgbe_setup_dca(adapter); - break; - /* -19 is returned from the kernel when no provider is found */ - case -19: - e_info(rx_err, "No DCA provider found. Please " - "start ioatdma for DCA functionality.\n"); - break; - default: - e_info(probe, "DCA registration failed: %d\n", err); - break; - } - } -#endif - - /* print all messages at the end so that we use our eth%d name */ - - /* calculate the expected PCIe bandwidth required for optimal - * performance. Note that some older parts will never have enough - * bandwidth due to being older generation PCIe parts. We clamp these - * parts to ensure that no warning is displayed, as this could confuse - * users otherwise. */ - switch(hw->mac.type) { - case ixgbe_mac_82598EB: - expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); - break; - default: - expected_gts = ixgbe_enumerate_functions(adapter) * 10; - break; - } - - /* don't check link if we failed to enumerate functions */ - if (expected_gts > 0) - ixgbe_check_minimum_link(adapter, expected_gts); - - /* First try to read PBA as a string */ - err = ixgbe_read_pba_string(hw, part_str, IXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); - if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - e_info(probe, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str); - else - e_info(probe, "MAC: %d, PHY: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, part_str); - - e_dev_info("%02x:%02x:%02x:%02x:%02x:%02x\n", - netdev->dev_addr[0], netdev->dev_addr[1], - netdev->dev_addr[2], netdev->dev_addr[3], - netdev->dev_addr[4], netdev->dev_addr[5]); - -#define INFO_STRING_LEN 255 - info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); - if (!info_string) { - e_err(probe, "allocation for info string failed\n"); - goto no_info_string; - } - i_s_var = info_string; - i_s_var += sprintf(info_string, "Enabled Features: "); - i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", - adapter->num_rx_queues, adapter->num_tx_queues); -#if IS_ENABLED(CONFIG_FCOE) - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - i_s_var += sprintf(i_s_var, "FCoE "); -#endif - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - i_s_var += sprintf(i_s_var, "FdirHash "); - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - i_s_var += sprintf(i_s_var, "DCB "); - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - i_s_var += sprintf(i_s_var, "DCA "); - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - i_s_var += sprintf(i_s_var, "RSC "); - if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE) - i_s_var += sprintf(i_s_var, "vxlan_rx "); - - BUG_ON(i_s_var > (info_string + INFO_STRING_LEN)); - /* end features printing */ - e_info(probe, "%s\n", info_string); - kfree(info_string); -no_info_string: -#ifdef CONFIG_PCI_IOV - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - int i; - for (i = 0; i < adapter->num_vfs; i++) - ixgbe_vf_configuration(pdev, (i | 0x10000000)); - } -#endif - - /* Initialize the LED link active for LED blink support */ - if (hw->mac.ops.init_led_link_act) - hw->mac.ops.init_led_link_act(hw); - - /* firmware requires blank numerical version */ - if (hw->mac.ops.set_fw_drv_ver) - hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, - sizeof(ixgbe_driver_version) - 1, - ixgbe_driver_version); - -#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) - /* add san mac addr to netdev */ - ixgbe_add_sanmac_netdev(netdev); - -#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ - e_info(probe, "Intel(R) 10 Gigabit Network Connection\n"); - cards_found++; - -#ifdef IXGBE_SYSFS - if (ixgbe_sysfs_init(adapter)) - e_err(probe, "failed to allocate sysfs resources\n"); -#else -#ifdef IXGBE_PROCFS - if (ixgbe_procfs_init(adapter)) - e_err(probe, "failed to allocate procfs resources\n"); -#endif /* IXGBE_PROCFS */ -#endif /* IXGBE_SYSFS */ -#ifdef HAVE_IXGBE_DEBUG_FS - - ixgbe_dbg_adapter_init(adapter); -#endif /* HAVE_IXGBE_DEBUG_FS */ - - /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ - if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) - hw->mac.ops.setup_link(hw, - IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, - true); - - if (hw->mac.ops.setup_eee && - (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) { - bool eee_enable = !!(adapter->flags2 & IXGBE_FLAG2_EEE_ENABLED); - - hw->mac.ops.setup_eee(hw, eee_enable); - } - - return 0; - -err_register: - ixgbe_clear_interrupt_scheme(adapter); -err_sw_init: - ixgbe_release_hw_control(adapter); -#ifdef CONFIG_PCI_IOV - ixgbe_disable_sriov(adapter); -#endif /* CONFIG_PCI_IOV */ - adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; - kfree(adapter->mac_table); - kfree(adapter->rss_key); - iounmap(adapter->io_addr); -err_ioremap: - disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); - free_netdev(netdev); -err_alloc_etherdev: - pci_release_mem_regions(pdev); -err_pci_reg: -err_dma: - if (!adapter || disable_dev) - pci_disable_device(pdev); - return err; -} - -/** - * ixgbe_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * ixgbe_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ -static void __devexit ixgbe_remove(struct pci_dev *pdev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev; - bool disable_dev; - - /* if !adapter then we already cleaned up in probe */ - if (!adapter) - return; - - netdev = adapter->netdev; -#ifdef HAVE_IXGBE_DEBUG_FS - ixgbe_dbg_adapter_exit(adapter); - -#endif /*HAVE_IXGBE_DEBUG_FS */ - set_bit(__IXGBE_REMOVE, &adapter->state); - cancel_work_sync(&adapter->service_task); - -#if IS_ENABLED(CONFIG_DCA) - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - dca_remove_requester(pci_dev_to_dev(pdev)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, - IXGBE_DCA_CTRL_DCA_DISABLE); - } -#endif /* CONFIG_DCA */ - -#ifdef IXGBE_SYSFS - ixgbe_sysfs_exit(adapter); -#else -#ifdef IXGBE_PROCFS - ixgbe_procfs_exit(adapter); -#endif /* IXGBE_PROCFS */ -#endif /* IXGBE-SYSFS */ - -#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) - /* remove the added san mac */ - ixgbe_del_sanmac_netdev(netdev); - -#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ - -#ifdef CONFIG_PCI_IOV - ixgbe_disable_sriov(adapter); -#endif /* CONFIG_PCI_IOV */ - if (adapter->netdev_registered) { - unregister_netdev(netdev); - adapter->netdev_registered = false; - } - -#if IS_ENABLED(CONFIG_FCOE) -#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE - ixgbe_fcoe_ddp_disable(adapter); -#endif -#endif /* CONFIG_FCOE */ - ixgbe_clear_interrupt_scheme(adapter); - ixgbe_release_hw_control(adapter); - -#ifdef HAVE_DCBNL_IEEE - kfree(adapter->ixgbe_ieee_pfc); - kfree(adapter->ixgbe_ieee_ets); - -#endif - iounmap(adapter->io_addr); - pci_release_mem_regions(pdev); - - kfree(adapter->mac_table); - kfree(adapter->rss_key); - disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); - free_netdev(netdev); - - pci_disable_pcie_error_reporting(pdev); - - if (disable_dev) - pci_disable_device(pdev); - -} - -static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) -{ - u16 value; - - pci_read_config_word(pdev, PCI_VENDOR_ID, &value); - if (value == IXGBE_FAILED_READ_CFG_WORD) { - ixgbe_remove_adapter(hw); - return true; - } - return false; -} - -u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) -{ - struct ixgbe_adapter *adapter = hw->back; - u16 value; - - if (IXGBE_REMOVED(hw->hw_addr)) - return IXGBE_FAILED_READ_CFG_WORD; - pci_read_config_word(adapter->pdev, reg, &value); - if (value == IXGBE_FAILED_READ_CFG_WORD && - ixgbe_check_cfg_remove(hw, adapter->pdev)) - return IXGBE_FAILED_READ_CFG_WORD; - return value; -} - -#ifdef HAVE_PCI_ERS -#ifdef CONFIG_PCI_IOV -static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) -{ - struct ixgbe_adapter *adapter = hw->back; - u32 value; - - if (IXGBE_REMOVED(hw->hw_addr)) - return IXGBE_FAILED_READ_CFG_DWORD; - pci_read_config_dword(adapter->pdev, reg, &value); - if (value == IXGBE_FAILED_READ_CFG_DWORD && - ixgbe_check_cfg_remove(hw, adapter->pdev)) - return IXGBE_FAILED_READ_CFG_DWORD; - return value; -} -#endif /* CONFIG_PCI_IOV */ -#endif /* HAVE_PCI_ERS */ - -void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) -{ - struct ixgbe_adapter *adapter = hw->back; - - if (IXGBE_REMOVED(hw->hw_addr)) - return; - pci_write_config_word(adapter->pdev, reg, value); -} - -void ewarn(struct ixgbe_hw *hw, const char *st) -{ - struct ixgbe_adapter *adapter = hw->back; - - netif_warn(adapter, drv, adapter->netdev, "%s", st); -} - -#ifdef HAVE_PCI_ERS -/** - * ixgbe_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - -#ifdef CONFIG_PCI_IOV - struct ixgbe_hw *hw = &adapter->hw; - struct pci_dev *bdev, *vfdev; - u32 dw0, dw1, dw2, dw3; - int vf, pos; - u16 req_id, pf_func; - - if (adapter->hw.mac.type == ixgbe_mac_82598EB || - adapter->num_vfs == 0) - goto skip_bad_vf_detection; - - bdev = pdev->bus->self; - while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) - bdev = bdev->bus->self; - - if (!bdev) - goto skip_bad_vf_detection; - - pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); - if (!pos) - goto skip_bad_vf_detection; - - dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); - dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); - dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); - dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); - if (IXGBE_REMOVED(hw->hw_addr)) - goto skip_bad_vf_detection; - - req_id = dw1 >> 16; - /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ - if (!(req_id & 0x0080)) - goto skip_bad_vf_detection; - - pf_func = req_id & 0x01; - if ((pf_func & 1) == (pdev->devfn & 1)) { - unsigned int device_id; - - vf = (req_id & 0x7F) >> 1; - e_dev_err("VF %d has caused a PCIe error\n", vf); - e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " - "%8.8x\tdw3: %8.8x\n", - dw0, dw1, dw2, dw3); - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - device_id = IXGBE_DEV_ID_82599_VF; - break; - case ixgbe_mac_X540: - device_id = IXGBE_DEV_ID_X540_VF; - break; - case ixgbe_mac_X550: - device_id = IXGBE_DEV_ID_X550_VF; - break; - case ixgbe_mac_X550EM_x: - device_id = IXGBE_DEV_ID_X550EM_X_VF; - break; - case ixgbe_mac_X550EM_a: - device_id = IXGBE_DEV_ID_X550EM_A_VF; - break; - default: - device_id = 0; - break; - } - - /* Find the pci device of the offending VF */ - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); - while (vfdev) { - if (vfdev->devfn == (req_id & 0xFF)) - break; - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, - device_id, vfdev); - } - /* - * There's a slim chance the VF could have been hot plugged, - * so if it is no longer present we don't need to issue the - * VFLR. Just clean up the AER in that case. - */ - if (vfdev) { - ixgbe_issue_vf_flr(adapter, vfdev); - /* Free device reference count */ - pci_dev_put(vfdev); - } - - pci_cleanup_aer_uncorrect_error_status(pdev); - } - - /* - * Even though the error may have occurred on the other port - * we still need to increment the vf error reference count for - * both ports because the I/O resume function will be called - * for both of them. - */ - adapter->vferr_refcount++; - - return PCI_ERS_RESULT_RECOVERED; - -skip_bad_vf_detection: -#endif /* CONFIG_PCI_IOV */ - if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) - return PCI_ERS_RESULT_DISCONNECT; - - rtnl_lock(); - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) { - rtnl_unlock(); - return PCI_ERS_RESULT_DISCONNECT; - } - - if (netif_running(netdev)) - ixgbe_close_suspend(adapter); - - if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) - pci_disable_device(pdev); - rtnl_unlock(); - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * ixgbe_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. - */ -static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - pci_ers_result_t result; - - if (pci_enable_device_mem(pdev)) { - e_err(probe, "Cannot re-enable PCI device after reset.\n"); - result = PCI_ERS_RESULT_DISCONNECT; - } else { - smp_mb__before_atomic(); - clear_bit(__IXGBE_DISABLED, &adapter->state); - adapter->hw.hw_addr = adapter->io_addr; - pci_set_master(pdev); - pci_restore_state(pdev); - /* - * After second error pci->state_saved is false, this - * resets it so EEH doesn't break. - */ - pci_save_state(pdev); - - pci_wake_from_d3(pdev, false); - - set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); - ixgbe_service_event_schedule(adapter); - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); - result = PCI_ERS_RESULT_RECOVERED; - } - - pci_cleanup_aer_uncorrect_error_status(pdev); - - return result; -} - -/** - * ixgbe_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. - */ -static void ixgbe_io_resume(struct pci_dev *pdev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - -#ifdef CONFIG_PCI_IOV - if (adapter->vferr_refcount) { - e_info(drv, "Resuming after VF err\n"); - adapter->vferr_refcount--; - return; - } - -#endif - rtnl_lock(); - if (netif_running(netdev)) - ixgbe_open(netdev); - - netif_device_attach(netdev); - rtnl_unlock(); -} - -#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS -static const struct pci_error_handlers ixgbe_err_handler = { -#else -static struct pci_error_handlers ixgbe_err_handler = { -#endif - .error_detected = ixgbe_io_error_detected, - .slot_reset = ixgbe_io_slot_reset, - .resume = ixgbe_io_resume, -}; -#endif /* HAVE_PCI_ERS */ - -struct net_device *ixgbe_hw_to_netdev(const struct ixgbe_hw *hw) -{ - return ((struct ixgbe_adapter *)hw->back)->netdev; -} -struct ixgbe_msg *ixgbe_hw_to_msg(const struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = - container_of(hw, struct ixgbe_adapter, hw); - return (struct ixgbe_msg *)&adapter->msg_enable; -} - -#ifdef HAVE_RHEL6_SRIOV_CONFIGURE -static struct pci_driver_rh ixgbe_driver_rh = { - .sriov_configure = ixgbe_pci_sriov_configure, -}; -#endif - -#ifdef CONFIG_PM -#ifndef USE_LEGACY_PM_SUPPORT -static const struct dev_pm_ops ixgbe_pm_ops = { - .suspend = ixgbe_suspend, - .resume = ixgbe_resume, - .freeze = ixgbe_freeze, - .thaw = ixgbe_thaw, - .poweroff = ixgbe_suspend, - .restore = ixgbe_resume, -}; -#endif /* USE_LEGACY_PM_SUPPORT */ -#endif - -static struct pci_driver ixgbe_driver = { - .name = ixgbe_driver_name, - .id_table = ixgbe_pci_tbl, - .probe = ixgbe_probe, - .remove = __devexit_p(ixgbe_remove), -#ifdef CONFIG_PM -#ifndef USE_LEGACY_PM_SUPPORT - .driver = { - .pm = &ixgbe_pm_ops, - }, -#else - .suspend = ixgbe_suspend, - .resume = ixgbe_resume, -#endif /* USE_LEGACY_PM_SUPPORT */ -#endif -#ifndef USE_REBOOT_NOTIFIER - .shutdown = ixgbe_shutdown, -#endif -#if defined(HAVE_SRIOV_CONFIGURE) - .sriov_configure = ixgbe_pci_sriov_configure, -#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) - .rh_reserved = &ixgbe_driver_rh, -#endif /* HAVE_SRIOV_CONFIGURE */ -#ifdef HAVE_PCI_ERS - .err_handler = &ixgbe_err_handler -#endif -}; - -bool ixgbe_is_ixgbe(struct pci_dev *pcidev) -{ - if (pci_dev_driver(pcidev) != &ixgbe_driver) - return false; - else - return true; -} - -/** - * ixgbe_init_module - Driver Registration Routine - * - * ixgbe_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ -static int __init ixgbe_init_module(void) -{ - int ret; - pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); - pr_info("%s\n", ixgbe_copyright); - - ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); - if (!ixgbe_wq) { - pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); - return -ENOMEM; - } - -#ifdef IXGBE_PROCFS - if (ixgbe_procfs_topdir_init()) - pr_info("Procfs failed to initialize topdir\n"); -#endif - -#ifdef HAVE_IXGBE_DEBUG_FS - ixgbe_dbg_init(); -#endif /* HAVE_IXGBE_DEBUG_FS */ - - ret = pci_register_driver(&ixgbe_driver); - if (ret) { - destroy_workqueue(ixgbe_wq); -#ifdef HAVE_IXGBE_DEBUG_FS - ixgbe_dbg_exit(); -#endif /* HAVE_IXGBE_DEBUG_FS */ -#ifdef IXGBE_PROCFS - ixgbe_procfs_topdir_exit(); -#endif - return ret; -} -#if IS_ENABLED(CONFIG_DCA) - - dca_register_notify(&dca_notifier); -#endif - - return ret; -} - -module_init(ixgbe_init_module); - -/** - * ixgbe_exit_module - Driver Exit Cleanup Routine - * - * ixgbe_exit_module is called just before the driver is removed - * from memory. - **/ -static void __exit ixgbe_exit_module(void) -{ -#if IS_ENABLED(CONFIG_DCA) - dca_unregister_notify(&dca_notifier); -#endif - pci_unregister_driver(&ixgbe_driver); -#ifdef IXGBE_PROCFS - ixgbe_procfs_topdir_exit(); -#endif - destroy_workqueue(ixgbe_wq); -#ifdef HAVE_IXGBE_DEBUG_FS - ixgbe_dbg_exit(); -#endif /* HAVE_IXGBE_DEBUG_FS */ -} - -#if IS_ENABLED(CONFIG_DCA) -static int ixgbe_notify_dca(struct notifier_block __always_unused *nb, unsigned long event, - void __always_unused *p) -{ - int ret_val; - - ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, - __ixgbe_notify_dca); - - return ret_val ? NOTIFY_BAD : NOTIFY_DONE; -} -#endif -module_exit(ixgbe_exit_module); - -/* ixgbe_main.c */ - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c deleted file mode 100644 index ab3aa32489d5..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.c +++ /dev/null @@ -1,760 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_type.h" -#include "ixgbe_mbx.h" - -/** - * ixgbe_read_mbx - Reads a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to read - * - * returns SUCCESS if it successfuly read message from buffer - **/ -s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - DEBUGFUNC("ixgbe_read_mbx"); - - /* limit read to size of mailbox */ - if (size > mbx->size) - size = mbx->size; - - if (mbx->ops.read) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); - - return ret_val; -} - -/** - * ixgbe_write_mbx - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully copied message into the buffer - **/ -s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_write_mbx"); - - if (size > mbx->size) { - ret_val = IXGBE_ERR_MBX; - ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, - "Invalid mailbox message size %d", size); - } else if (mbx->ops.write) - ret_val = mbx->ops.write(hw, msg, size, mbx_id); - - return ret_val; -} - -/** - * ixgbe_check_for_msg - checks to see if someone sent us mail - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - DEBUGFUNC("ixgbe_check_for_msg"); - - if (mbx->ops.check_for_msg) - ret_val = mbx->ops.check_for_msg(hw, mbx_id); - - return ret_val; -} - -/** - * ixgbe_check_for_ack - checks to see if someone sent us ACK - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - DEBUGFUNC("ixgbe_check_for_ack"); - - if (mbx->ops.check_for_ack) - ret_val = mbx->ops.check_for_ack(hw, mbx_id); - - return ret_val; -} - -/** - * ixgbe_check_for_rst - checks to see if other side has reset - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - DEBUGFUNC("ixgbe_check_for_rst"); - - if (mbx->ops.check_for_rst) - ret_val = mbx->ops.check_for_rst(hw, mbx_id); - - return ret_val; -} - -/** - * ixgbe_poll_for_msg - Wait for message notification - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message notification - **/ -STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - DEBUGFUNC("ixgbe_poll_for_msg"); - - if (!countdown || !mbx->ops.check_for_msg) - goto out; - - while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { - countdown--; - if (!countdown) - break; - usec_delay(mbx->usec_delay); - } - - if (countdown == 0) - ERROR_REPORT2(IXGBE_ERROR_POLLING, - "Polling for VF%d mailbox message timedout", mbx_id); - -out: - return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; -} - -/** - * ixgbe_poll_for_ack - Wait for message acknowledgement - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message acknowledgement - **/ -STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - DEBUGFUNC("ixgbe_poll_for_ack"); - - if (!countdown || !mbx->ops.check_for_ack) - goto out; - - while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { - countdown--; - if (!countdown) - break; - usec_delay(mbx->usec_delay); - } - - if (countdown == 0) - ERROR_REPORT2(IXGBE_ERROR_POLLING, - "Polling for VF%d mailbox ack timedout", mbx_id); - -out: - return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; -} - -/** - * ixgbe_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message notification and - * copied it into the receive buffer. - **/ -s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - DEBUGFUNC("ixgbe_read_posted_mbx"); - - if (!mbx->ops.read) - goto out; - - ret_val = ixgbe_poll_for_msg(hw, mbx_id); - - /* if ack received read message, otherwise we timed out */ - if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); -out: - return ret_val; -} - -/** - * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period - **/ -s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - DEBUGFUNC("ixgbe_write_posted_mbx"); - - /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) - goto out; - - /* send msg */ - ret_val = mbx->ops.write(hw, msg, size, mbx_id); - - /* if msg sent wait until we receive an ack */ - if (!ret_val) - ret_val = ixgbe_poll_for_ack(hw, mbx_id); -out: - return ret_val; -} - -/** - * ixgbe_init_mbx_ops_generic - Initialize MB function pointers - * @hw: pointer to the HW structure - * - * Setups up the mailbox read and write message function pointers - **/ -void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - - mbx->ops.read_posted = ixgbe_read_posted_mbx; - mbx->ops.write_posted = ixgbe_write_posted_mbx; -} - -/** - * ixgbe_read_v2p_mailbox - read v2p mailbox - * @hw: pointer to the HW structure - * - * This function is used to read the v2p mailbox without losing the read to - * clear status bits. - **/ -STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw) -{ - u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); - - v2p_mailbox |= hw->mbx.v2p_mailbox; - hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; - - return v2p_mailbox; -} - -/** - * ixgbe_check_for_bit_vf - Determine if a status bit was set - * @hw: pointer to the HW structure - * @mask: bitmask for bits to be tested and cleared - * - * This function is used to check for the read to clear bits within - * the V2P mailbox. - **/ -STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) -{ - u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw); - s32 ret_val = IXGBE_ERR_MBX; - - if (v2p_mailbox & mask) - ret_val = IXGBE_SUCCESS; - - hw->mbx.v2p_mailbox &= ~mask; - - return ret_val; -} - -/** - * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the PF has set the Status bit or else ERR_MBX - **/ -STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) -{ - s32 ret_val = IXGBE_ERR_MBX; - - UNREFERENCED_1PARAMETER(mbx_id); - DEBUGFUNC("ixgbe_check_for_msg_vf"); - - if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { - ret_val = IXGBE_SUCCESS; - hw->mbx.stats.reqs++; - } - - return ret_val; -} - -/** - * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX - **/ -STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) -{ - s32 ret_val = IXGBE_ERR_MBX; - - UNREFERENCED_1PARAMETER(mbx_id); - DEBUGFUNC("ixgbe_check_for_ack_vf"); - - if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { - ret_val = IXGBE_SUCCESS; - hw->mbx.stats.acks++; - } - - return ret_val; -} - -/** - * ixgbe_check_for_rst_vf - checks to see if the PF has reset - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns true if the PF has set the reset done bit or else false - **/ -STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) -{ - s32 ret_val = IXGBE_ERR_MBX; - - UNREFERENCED_1PARAMETER(mbx_id); - DEBUGFUNC("ixgbe_check_for_rst_vf"); - - if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | - IXGBE_VFMAILBOX_RSTI))) { - ret_val = IXGBE_SUCCESS; - hw->mbx.stats.rsts++; - } - - return ret_val; -} - -/** - * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock - * @hw: pointer to the HW structure - * - * return SUCCESS if we obtained the mailbox lock - **/ -STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - DEBUGFUNC("ixgbe_obtain_mbx_lock_vf"); - - /* Take ownership of the buffer */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); - - /* reserve mailbox for vf use */ - if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) - ret_val = IXGBE_SUCCESS; - - return ret_val; -} - -/** - * ixgbe_write_mbx_vf - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully copied message into the buffer - **/ -STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) -{ - s32 ret_val; - u16 i; - - UNREFERENCED_1PARAMETER(mbx_id); - - DEBUGFUNC("ixgbe_write_mbx_vf"); - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbe_obtain_mbx_lock_vf(hw); - if (ret_val) - goto out_no_write; - - /* flush msg and acks as we are overwriting the message buffer */ - ixgbe_check_for_msg_vf(hw, 0); - ixgbe_check_for_ack_vf(hw, 0); - - /* copy the caller specified message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); - - /* update stats */ - hw->mbx.stats.msgs_tx++; - - /* Drop VFU and interrupt the PF to tell it a message has been sent */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); - -out_no_write: - return ret_val; -} - -/** - * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to read - * - * returns SUCCESS if it successfuly read message from buffer - **/ -STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) -{ - s32 ret_val = IXGBE_SUCCESS; - u16 i; - - DEBUGFUNC("ixgbe_read_mbx_vf"); - UNREFERENCED_1PARAMETER(mbx_id); - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbe_obtain_mbx_lock_vf(hw); - if (ret_val) - goto out_no_read; - - /* copy the message from the mailbox memory buffer */ - for (i = 0; i < size; i++) - msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); - - /* Acknowledge receipt and release mailbox, then we're done */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); - - /* update stats */ - hw->mbx.stats.msgs_rx++; - -out_no_read: - return ret_val; -} - -/** - * ixgbe_init_mbx_params_vf - set initial values for vf mailbox - * @hw: pointer to the HW structure - * - * Initializes the hw->mbx struct to correct values for vf mailbox - */ -void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - - /* start mailbox as timed out and let the reset_hw call set the timeout - * value to begin communications */ - mbx->timeout = 0; - mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; - - mbx->size = IXGBE_VFMAILBOX_SIZE; - - mbx->ops.read = ixgbe_read_mbx_vf; - mbx->ops.write = ixgbe_write_mbx_vf; - mbx->ops.read_posted = ixgbe_read_posted_mbx; - mbx->ops.write_posted = ixgbe_write_posted_mbx; - mbx->ops.check_for_msg = ixgbe_check_for_msg_vf; - mbx->ops.check_for_ack = ixgbe_check_for_ack_vf; - mbx->ops.check_for_rst = ixgbe_check_for_rst_vf; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; -} - -STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) -{ - u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); - s32 ret_val = IXGBE_ERR_MBX; - - if (mbvficr & mask) { - ret_val = IXGBE_SUCCESS; - IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); - } - - return ret_val; -} - -/** - * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) -{ - s32 ret_val = IXGBE_ERR_MBX; - s32 index = IXGBE_MBVFICR_INDEX(vf_number); - u32 vf_bit = vf_number % 16; - - DEBUGFUNC("ixgbe_check_for_msg_pf"); - - if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, - index)) { - ret_val = IXGBE_SUCCESS; - hw->mbx.stats.reqs++; - } - - return ret_val; -} - -/** - * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) -{ - s32 ret_val = IXGBE_ERR_MBX; - s32 index = IXGBE_MBVFICR_INDEX(vf_number); - u32 vf_bit = vf_number % 16; - - DEBUGFUNC("ixgbe_check_for_ack_pf"); - - if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, - index)) { - ret_val = IXGBE_SUCCESS; - hw->mbx.stats.acks++; - } - - return ret_val; -} - -/** - * ixgbe_check_for_rst_pf - checks to see if the VF has reset - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) -{ - u32 reg_offset = (vf_number < 32) ? 0 : 1; - u32 vf_shift = vf_number % 32; - u32 vflre = 0; - s32 ret_val = IXGBE_ERR_MBX; - - DEBUGFUNC("ixgbe_check_for_rst_pf"); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); - break; - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - case ixgbe_mac_X540: - vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); - break; - default: - break; - } - - if (vflre & (1 << vf_shift)) { - ret_val = IXGBE_SUCCESS; - IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); - hw->mbx.stats.rsts++; - } - - return ret_val; -} - -/** - * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * return SUCCESS if we obtained the mailbox lock - **/ -STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) -{ - s32 ret_val = IXGBE_ERR_MBX; - u32 p2v_mailbox; - - DEBUGFUNC("ixgbe_obtain_mbx_lock_pf"); - - /* Take ownership of the buffer */ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); - - /* reserve mailbox for vf use */ - p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); - if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) - ret_val = IXGBE_SUCCESS; - else - ERROR_REPORT2(IXGBE_ERROR_POLLING, - "Failed to obtain mailbox lock for VF%d", vf_number); - - - return ret_val; -} - -/** - * ixgbe_write_mbx_pf - Places a message in the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @vf_number: the VF index - * - * returns SUCCESS if it successfully copied message into the buffer - **/ -STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 vf_number) -{ - s32 ret_val; - u16 i; - - DEBUGFUNC("ixgbe_write_mbx_pf"); - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); - if (ret_val) - goto out_no_write; - - /* flush msg and acks as we are overwriting the message buffer */ - ixgbe_check_for_msg_pf(hw, vf_number); - ixgbe_check_for_ack_pf(hw, vf_number); - - /* copy the caller specified message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); - - /* Interrupt VF to tell it a message has been sent and release buffer*/ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); - - /* update stats */ - hw->mbx.stats.msgs_tx++; - -out_no_write: - return ret_val; - -} - -/** - * ixgbe_read_mbx_pf - Read a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @vf_number: the VF index - * - * This function copies a message from the mailbox buffer to the caller's - * memory buffer. The presumption is that the caller knows that there was - * a message due to a VF request so no polling for message is needed. - **/ -STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 vf_number) -{ - s32 ret_val; - u16 i; - - DEBUGFUNC("ixgbe_read_mbx_pf"); - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); - if (ret_val) - goto out_no_read; - - /* copy the message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); - - /* Acknowledge the message and release buffer */ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); - - /* update stats */ - hw->mbx.stats.msgs_rx++; - -out_no_read: - return ret_val; -} - -/** - * ixgbe_init_mbx_params_pf - set initial values for pf mailbox - * @hw: pointer to the HW structure - * - * Initializes the hw->mbx struct to correct values for pf mailbox - */ -void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - - if (hw->mac.type != ixgbe_mac_82599EB && - hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a && - hw->mac.type != ixgbe_mac_X540) - return; - - mbx->timeout = 0; - mbx->usec_delay = 0; - - mbx->size = IXGBE_VFMAILBOX_SIZE; - - mbx->ops.read = ixgbe_read_mbx_pf; - mbx->ops.write = ixgbe_write_mbx_pf; - mbx->ops.read_posted = ixgbe_read_posted_mbx; - mbx->ops.write_posted = ixgbe_write_posted_mbx; - mbx->ops.check_for_msg = ixgbe_check_for_msg_pf; - mbx->ops.check_for_ack = ixgbe_check_for_ack_pf; - mbx->ops.check_for_rst = ixgbe_check_for_rst_pf; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h deleted file mode 100644 index b990c321209d..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_mbx.h +++ /dev/null @@ -1,155 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_MBX_H_ -#define _IXGBE_MBX_H_ - -#include "ixgbe_type.h" - -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 - -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 - -/* Define mailbox register bits */ -#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ -#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ - -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ - -#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ -#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - -/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the - * PF. The reverse is true if it is IXGBE_PF_*. - * Message ACK's are the value or'd with 0xF0000000 - */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - * clear to send requests */ -#define IXGBE_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for extra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) - -/* definitions to support mailbox API version negotiation */ - -/* - * each element denotes a version of the API; existing numbers may not - * change; any additions must go at the end - */ -enum ixgbe_pfvf_api_rev { - ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ - ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ - ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ - ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ - ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ - /* This value should always be last */ - ixgbe_mbox_api_unknown, /* indicates that API version is not known */ -}; - -/* mailbox API, legacy requests */ -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ - -/* mailbox API, version 1.0 VF requests */ -#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ -#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ - -/* mailbox API, version 1.1 VF requests */ -#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ - -/* mailbox API, version 1.2 VF requests */ -#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ -#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ -#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c - -/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ -enum ixgbevf_xcast_modes { - IXGBEVF_XCAST_MODE_NONE = 0, - IXGBEVF_XCAST_MODE_MULTI, - IXGBEVF_XCAST_MODE_ALLMULTI, - IXGBEVF_XCAST_MODE_PROMISC, -}; - -/* GET_QUEUES return data indices within the mailbox */ -#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ -#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ -#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ -#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ - -/* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 -/* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 - -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ - -/* mailbox API, version 2.0 VF requests */ -#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ -#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ -#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ -#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ -#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ -#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ -#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ -#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ - -/* mailbox API, version 2.0 PF requests */ -#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ - -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ - -s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); -s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); -s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); -void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); -void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); -void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); - -#endif /* _IXGBE_MBX_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h deleted file mode 100644 index 2e40048edcb6..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep.h +++ /dev/null @@ -1,200 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -/* glue for the OS independent part of ixgbe - * includes register access macros - */ - -#ifndef _IXGBE_OSDEP_H_ -#define _IXGBE_OSDEP_H_ - -#include -#include -#include -#include -#include -#include "kcompat.h" - -#define IXGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) -#define IXGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) -#define IXGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) -#define IXGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) - -#define msec_delay(_x) msleep(_x) - -#define usec_delay(_x) udelay(_x) - -#define STATIC static - -#define IOMEM __iomem - -#ifdef DBG -#define ASSERT(_x) BUG_ON(!(_x)) -#define DEBUGOUT(S) printk(KERN_DEBUG S) -#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A) -#define DEBUGOUT2(S, A...) printk(KERN_DEBUG S, ## A) -#define DEBUGOUT3(S, A...) printk(KERN_DEBUG S, ## A) -#define DEBUGOUT4(S, A...) printk(KERN_DEBUG S, ## A) -#define DEBUGOUT5(S, A...) printk(KERN_DEBUG S, ## A) -#define DEBUGOUT6(S, A...) printk(KERN_DEBUG S, ## A) -#else -#define ASSERT(_x) do {} while (0) -#define DEBUGOUT(S) do {} while (0) -#define DEBUGOUT1(S, A...) do {} while (0) -#define DEBUGOUT2(S, A...) do {} while (0) -#define DEBUGOUT3(S, A...) do {} while (0) -#define DEBUGOUT4(S, A...) do {} while (0) -#define DEBUGOUT5(S, A...) do {} while (0) -#define DEBUGOUT6(S, A...) do {} while (0) -#endif - -#define DEBUGFUNC(S) do {} while (0) - -#define IXGBE_SFP_DETECT_RETRIES 2 - -struct ixgbe_hw; -struct ixgbe_msg { - u16 msg_enable; -}; -struct net_device *ixgbe_hw_to_netdev(const struct ixgbe_hw *hw); -struct ixgbe_msg *ixgbe_hw_to_msg(const struct ixgbe_hw *hw); - -#define hw_dbg(hw, format, arg...) \ - netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) -#define hw_err(hw, format, arg...) \ - netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) -#define e_dev_info(format, arg...) \ - dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) -#define e_dev_warn(format, arg...) \ - dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) -#define e_dev_err(format, arg...) \ - dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) -#define e_dev_notice(format, arg...) \ - dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) -#define e_dbg(msglvl, format, arg...) \ - netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_info(msglvl, format, arg...) \ - netif_info(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_err(msglvl, format, arg...) \ - netif_err(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_warn(msglvl, format, arg...) \ - netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_crit(msglvl, format, arg...) \ - netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) - -#define IXGBE_DEAD_READ_RETRIES 10 -#define IXGBE_DEAD_READ_REG 0xdeadbeefU -#define IXGBE_FAILED_READ_REG 0xffffffffU -#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU -#define IXGBE_FAILED_READ_CFG_WORD 0xffffU -#define IXGBE_FAILED_READ_CFG_BYTE 0xffU - -#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ - IXGBE_WRITE_REG((a), (reg) + ((offset) << 2), (value)) - -#define IXGBE_READ_REG(h, r) ixgbe_read_reg(h, r, false) -#define IXGBE_R32_Q(h, r) ixgbe_read_reg(h, r, true) - -#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \ - IXGBE_READ_REG((a), (reg) + ((offset) << 2))) - -#ifndef writeq -#define writeq(val, addr) do { writel((u32) (val), addr); \ - writel((u32) (val >> 32), (addr + 4)); \ - } while (0); -#endif - -#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) - -u32 ixgbe_read_reg(struct ixgbe_hw *, u32 reg, bool quiet); -extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); -extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); -extern void ewarn(struct ixgbe_hw *hw, const char *str); - -#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word -#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word -#define IXGBE_EEPROM_GRANT_ATTEMPS 100 -#define IXGBE_HTONL(_i) htonl(_i) -#define IXGBE_NTOHL(_i) ntohl(_i) -#define IXGBE_NTOHS(_i) ntohs(_i) -#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) -#define IXGBE_CPU_TO_LE16(_i) cpu_to_le16(_i) -#define IXGBE_LE32_TO_CPU(_i) le32_to_cpu(_i) -#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) -#define EWARN(H, W) ewarn(H, W) - -enum { - IXGBE_ERROR_SOFTWARE, - IXGBE_ERROR_POLLING, - IXGBE_ERROR_INVALID_STATE, - IXGBE_ERROR_UNSUPPORTED, - IXGBE_ERROR_ARGUMENT, - IXGBE_ERROR_CAUTION, -}; - -#define ERROR_REPORT(level, format, arg...) do { \ - switch (level) { \ - case IXGBE_ERROR_SOFTWARE: \ - case IXGBE_ERROR_CAUTION: \ - case IXGBE_ERROR_POLLING: \ - netif_warn(ixgbe_hw_to_msg(hw), drv, ixgbe_hw_to_netdev(hw), \ - format, ## arg); \ - break; \ - case IXGBE_ERROR_INVALID_STATE: \ - case IXGBE_ERROR_UNSUPPORTED: \ - case IXGBE_ERROR_ARGUMENT: \ - netif_err(ixgbe_hw_to_msg(hw), hw, ixgbe_hw_to_netdev(hw), \ - format, ## arg); \ - break; \ - default: \ - break; \ - } \ -} while (0) - -#define ERROR_REPORT1 ERROR_REPORT -#define ERROR_REPORT2 ERROR_REPORT -#define ERROR_REPORT3 ERROR_REPORT - -#define UNREFERENCED_XPARAMETER -#define UNREFERENCED_1PARAMETER(_p) do { \ - uninitialized_var(_p); \ -} while (0) -#define UNREFERENCED_2PARAMETER(_p, _q) do { \ - uninitialized_var(_p); \ - uninitialized_var(_q); \ -} while (0) -#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ - uninitialized_var(_p); \ - uninitialized_var(_q); \ - uninitialized_var(_r); \ -} while (0) -#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ - uninitialized_var(_p); \ - uninitialized_var(_q); \ - uninitialized_var(_r); \ - uninitialized_var(_s); \ -} while (0) - -#endif /* _IXGBE_OSDEP_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep2.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep2.h deleted file mode 100644 index 549b35350611..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_osdep2.h +++ /dev/null @@ -1,68 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_OSDEP2_H_ -#define _IXGBE_OSDEP2_H_ - -static inline bool ixgbe_removed(void __iomem *addr) -{ - return unlikely(!addr); -} -#define IXGBE_REMOVED(a) ixgbe_removed(a) - -static inline void IXGBE_WRITE_REG(struct ixgbe_hw *hw, u32 reg, u32 value) -{ - u8 __iomem *reg_addr; - - reg_addr = ACCESS_ONCE(hw->hw_addr); - if (IXGBE_REMOVED(reg_addr)) - return; -#ifdef DBG - switch (reg) { - case IXGBE_EIMS: - case IXGBE_EIMC: - case IXGBE_EIAM: - case IXGBE_EIAC: - case IXGBE_EICR: - case IXGBE_EICS: - printk("%s: Reg - 0x%05X, value - 0x%08X\n", __func__, - reg, value); - default: - break; - } -#endif /* DBG */ - writel(value, reg_addr + reg); -} - -static inline void IXGBE_WRITE_REG64(struct ixgbe_hw *hw, u32 reg, u64 value) -{ - u8 __iomem *reg_addr; - - reg_addr = ACCESS_ONCE(hw->hw_addr); - if (IXGBE_REMOVED(reg_addr)) - return; - writeq(value, reg_addr + reg); -} - -#endif /* _IXGBE_OSDEP2_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c deleted file mode 100644 index 5efd0163ccb0..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_param.c +++ /dev/null @@ -1,1256 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include - -#include "ixgbe.h" - -/* This is the only thing that needs to be changed to adjust the - * maximum number of ports that the driver can manage. - */ - -#define IXGBE_MAX_NIC 32 - -#define OPTION_UNSET -1 -#define OPTION_DISABLED 0 -#define OPTION_ENABLED 1 - -#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ -#define XSTRINGIFY(bar) STRINGIFY(bar) - -/* All parameters are treated the same, as an integer array of values. - * This macro just reduces the need to repeat the same declaration code - * over and over (plus this helps to avoid typo bugs). - */ - -#define IXGBE_PARAM_INIT { [0 ... IXGBE_MAX_NIC] = OPTION_UNSET } -#ifndef module_param_array -/* Module Parameters are always initialized to -1, so that the driver - * can tell the difference between no user specified value or the - * user asking for the default value. - * The true default values are loaded in when ixgbe_check_options is called. - * - * This is a GCC extension to ANSI C. - * See the item "Labelled Elements in Initializers" in the section - * "Extensions to the C Language Family" of the GCC documentation. - */ - -#define IXGBE_PARAM(X, desc) \ - static const int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ - MODULE_PARM(X, "1-" __MODULE_STRING(IXGBE_MAX_NIC) "i"); \ - MODULE_PARM_DESC(X, desc); -#else -#define IXGBE_PARAM(X, desc) \ - static int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ - static unsigned int num_##X; \ - module_param_array_named(X, X, int, &num_##X, 0); \ - MODULE_PARM_DESC(X, desc); -#endif - -IXGBE_PARAM(EEE, "Energy Efficient Ethernet (EEE) ,0=disabled, 1=enabled )" - "default EEE disable"); -/* IntMode (Interrupt Mode) - * - * Valid Range: 0-2 - * - 0 - Legacy Interrupt - * - 1 - MSI Interrupt - * - 2 - MSI-X Interrupt(s) - * - * Default Value: 2 - */ -IXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " - "default IntMode (deprecated)"); -IXGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " - "default 2"); -#define IXGBE_INT_LEGACY 0 -#define IXGBE_INT_MSI 1 -#define IXGBE_INT_MSIX 2 - -/* MQ - Multiple Queue enable/disable - * - * Valid Range: 0, 1 - * - 0 - disables MQ - * - 1 - enables MQ - * - * Default Value: 1 - */ - -IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); - -#if IS_ENABLED(CONFIG_DCA) -/* DCA - Direct Cache Access (DCA) Control - * - * This option allows the device to hint to DCA enabled processors - * which CPU should have its cache warmed with the data being - * transferred over PCIe. This can increase performance by reducing - * cache misses. ixgbe hardware supports DCA for: - * tx descriptor writeback - * rx descriptor writeback - * rx data - * rx data header only (in packet split mode) - * - * enabling option 2 can cause cache thrash in some tests, particularly - * if the CPU is completely utilized - * - * Valid Range: 0 - 2 - * - 0 - disables DCA - * - 1 - enables DCA - * - 2 - enables DCA with rx data included - * - * Default Value: 2 - */ - -#define IXGBE_MAX_DCA 2 - -IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, " - "1=descriptor only, 2=descriptor and data"); -#endif /* CONFIG_DCA */ - -/* RSS - Receive-Side Scaling (RSS) Descriptor Queues - * - * Valid Range: 0-16 - * - 0 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()). - * - 1-16 - enables RSS and sets the Desc. Q's to the specified value. - * - * Default Value: 0 - */ - -IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " - "default 0=number of cpus"); - -/* VMDQ - Virtual Machine Device Queues (VMDQ) - * - * Valid Range: 1-16 - * - 0/1 Disables VMDQ by allocating only a single queue. - * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. - * - * Default Value: 8 - */ - -#define IXGBE_DEFAULT_NUM_VMDQ 8 - -IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable (1 queue) " - "2-16 enable (default=" XSTRINGIFY(IXGBE_DEFAULT_NUM_VMDQ) ")"); - -#ifdef CONFIG_PCI_IOV -/* max_vfs - SR I/O Virtualization - * - * Valid Range: 0-63 - * - 0 Disables SR-IOV - * - 1-63 - enables SR-IOV and sets the number of VFs enabled - * - * Default Value: 0 - */ - -#define MAX_SRIOV_VFS 63 - -IXGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " - "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " - "this many VFs"); - -/* VEPA - Set internal bridge to VEPA mode - * - * Valid Range: 0-1 - * - 0 Set bridge to VEB mode - * - 1 Set bridge to VEPA mode - * - * Default Value: 0 - */ -/* - *Note: - *===== - * This provides ability to ensure VEPA mode on the internal bridge even if - * the kernel does not support the netdev bridge setting operations. -*/ -IXGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); -#endif - -/* Interrupt Throttle Rate (interrupts/sec) - * - * Valid Range: 956-488281 (0=off, 1=dynamic) - * - * Default Value: 1 - */ -#define DEFAULT_ITR 1 -IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " - "(0,1,956-488281), default 1"); -#define MAX_ITR IXGBE_MAX_INT_RATE -#define MIN_ITR IXGBE_MIN_INT_RATE - -#ifndef IXGBE_NO_LLI - -/* LLIPort (Low Latency Interrupt TCP Port) - * - * Valid Range: 0 - 65535 - * - * Default Value: 0 (disabled) - */ -IXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); - -#define DEFAULT_LLIPORT 0 -#define MAX_LLIPORT 0xFFFF -#define MIN_LLIPORT 0 - -/* LLIPush (Low Latency Interrupt on TCP Push flag) - * - * Valid Range: 0,1 - * - * Default Value: 0 (disabled) - */ -IXGBE_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1)"); - -#define DEFAULT_LLIPUSH 0 -#define MAX_LLIPUSH 1 -#define MIN_LLIPUSH 0 - -/* LLISize (Low Latency Interrupt on Packet Size) - * - * Valid Range: 0 - 1500 - * - * Default Value: 0 (disabled) - */ -IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); - -#define DEFAULT_LLISIZE 0 -#define MAX_LLISIZE 1500 -#define MIN_LLISIZE 0 - -/* LLIEType (Low Latency Interrupt Ethernet Type) - * - * Valid Range: 0 - 0x8fff - * - * Default Value: 0 (disabled) - */ -IXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); - -#define DEFAULT_LLIETYPE 0 -#define MAX_LLIETYPE 0x8fff -#define MIN_LLIETYPE 0 - -/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) - * - * Valid Range: 0 - 7 - * - * Default Value: 0 (disabled) - */ -IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); - -#define DEFAULT_LLIVLANP 0 -#define MAX_LLIVLANP 7 -#define MIN_LLIVLANP 0 - -#endif /* IXGBE_NO_LLI */ -#ifdef HAVE_TX_MQ -/* Flow Director packet buffer allocation level - * - * Valid Range: 1-3 - * 1 = 8k hash/2k perfect, - * 2 = 16k hash/4k perfect, - * 3 = 32k hash/8k perfect - * - * Default Value: 0 - */ -IXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n" - "\t\t\t1 = 8k hash filters or 2k perfect filters\n" - "\t\t\t2 = 16k hash filters or 4k perfect filters\n" - "\t\t\t3 = 32k hash filters or 8k perfect filters"); - -#define IXGBE_DEFAULT_FDIR_PBALLOC IXGBE_FDIR_PBALLOC_64K - -/* Software ATR packet sample rate - * - * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection - * - * Default Value: 20 - */ -IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); - -#define IXGBE_MAX_ATR_SAMPLE_RATE 255 -#define IXGBE_MIN_ATR_SAMPLE_RATE 1 -#define IXGBE_ATR_SAMPLE_RATE_OFF 0 -#define IXGBE_DEFAULT_ATR_SAMPLE_RATE 20 -#endif /* HAVE_TX_MQ */ - -#if IS_ENABLED(CONFIG_FCOE) -/* FCoE - Fibre Channel over Ethernet Offload Enable/Disable - * - * Valid Range: 0, 1 - * - 0 - disables FCoE Offload - * - 1 - enables FCoE Offload - * - * Default Value: 1 - */ -IXGBE_PARAM(FCoE, "Disable or enable FCoE Offload, default 1"); -#endif /* CONFIG_FCOE */ - -/* Enable/disable Malicious Driver Detection - * - * Valid Values: 0(off), 1(on) - * - * Default Value: 1 - */ -IXGBE_PARAM(MDD, "Malicious Driver Detection: (0,1), default 1 = on"); - -/* Enable/disable Large Receive Offload - * - * Valid Values: 0(off), 1(on) - * - * Default Value: 1 - */ -IXGBE_PARAM(LRO, "Large Receive Offload (0,1), default 0 = off"); - -/* Enable/disable support for untested SFP+ modules on 82599-based adapters - * - * Valid Values: 0(Disable), 1(Enable) - * - * Default Value: 0 - */ -IXGBE_PARAM(allow_unsupported_sfp, "Allow unsupported and untested " - "SFP+ modules on 82599 based adapters, default 0 = Disable"); - -/* Enable/disable support for DMA coalescing - * - * Valid Values: 0(off), 41 - 10000(on) - * - * Default Value: 0 - */ -IXGBE_PARAM(dmac_watchdog, - "DMA coalescing watchdog in microseconds (0,41-10000), default 0 = off"); - -/* Enable/disable support for VXLAN rx checksum offload - * - * Valid Values: 0(Disable), 1(Enable) - * - * Default Value: 1 on hardware that supports it - */ -IXGBE_PARAM(vxlan_rx, - "VXLAN receive checksum offload (0,1), default 1 = Enable"); - - -struct ixgbe_option { - enum { enable_option, range_option, list_option } type; - const char *name; - const char *err; - const char *msg; - int def; - union { - struct { /* range_option info */ - int min; - int max; - } r; - struct { /* list_option info */ - int nr; - const struct ixgbe_opt_list { - int i; - char *str; - } *p; - } l; - } arg; -}; - -#ifndef IXGBE_NO_LLI -#ifdef module_param_array -/** - * helper function to determine LLI support - * - * LLI is only supported for 82599 and X540 - * LLIPush is not supported on 82599 - **/ -static bool __devinit ixgbe_lli_supported(struct ixgbe_adapter *adapter, - struct ixgbe_option *opt) -{ - struct ixgbe_hw *hw = &adapter->hw; - - if (hw->mac.type == ixgbe_mac_82599EB) { - - if (LLIPush[adapter->bd_number] > 0) - goto not_supp; - - return true; - } - - if (hw->mac.type == ixgbe_mac_X540) - return true; - -not_supp: - DPRINTK(PROBE, INFO, "%s not supported on this HW\n", opt->name); - return false; -} -#endif /* module_param_array */ -#endif /* IXGBE_NO_LLI */ - -static int __devinit ixgbe_validate_option(unsigned int *value, - struct ixgbe_option *opt) -{ - if (*value == OPTION_UNSET) { - printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n", - opt->name, *value, opt->err); - *value = opt->def; - return 0; - } - - switch (opt->type) { - case enable_option: - switch (*value) { - case OPTION_ENABLED: - printk(KERN_INFO "ixgbe: %s Enabled\n", opt->name); - return 0; - case OPTION_DISABLED: - printk(KERN_INFO "ixgbe: %s Disabled\n", opt->name); - return 0; - } - break; - case range_option: - if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || - *value == opt->def) { - if (opt->msg) - printk(KERN_INFO "ixgbe: %s set to %d, %s\n", - opt->name, *value, opt->msg); - else - printk(KERN_INFO "ixgbe: %s set to %d\n", - opt->name, *value); - return 0; - } - break; - case list_option: { - int i; - - for (i = 0; i < opt->arg.l.nr; i++) { - const struct ixgbe_opt_list *ent = &opt->arg.l.p[i]; - if (*value == ent->i) { - if (ent->str[0] != '\0') - printk(KERN_INFO "%s\n", ent->str); - return 0; - } - } - } - break; - default: - BUG(); - } - - printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n", - opt->name, *value, opt->err); - *value = opt->def; - return -1; -} - -#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) - -/** - * ixgbe_check_options - Range Checking for Command Line Parameters - * @adapter: board private structure - * - * This routine checks all command line parameters for valid user - * input. If an invalid value is given, or if no user specified - * value exists, a default value is used. The final value is stored - * in a variable in the adapter structure. - **/ -void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) -{ - unsigned int mdd; - int bd = adapter->bd_number; - u32 *aflags = &adapter->flags; - struct ixgbe_ring_feature *feature = adapter->ring_feature; - unsigned int vmdq; - - if (bd >= IXGBE_MAX_NIC) { - printk(KERN_NOTICE - "Warning: no configuration for board #%d\n", bd); - printk(KERN_NOTICE "Using defaults for all values\n"); -#ifndef module_param_array - bd = IXGBE_MAX_NIC; -#endif - } - - { /* Interrupt Mode */ - unsigned int int_mode; - static struct ixgbe_option opt = { - .type = range_option, - .name = "Interrupt Mode", - .err = - "using default of " __MODULE_STRING(IXGBE_INT_MSIX), - .def = IXGBE_INT_MSIX, - .arg = { .r = { .min = IXGBE_INT_LEGACY, - .max = IXGBE_INT_MSIX} } - }; - -#ifdef module_param_array - if (num_IntMode > bd || num_InterruptType > bd) { -#endif - int_mode = IntMode[bd]; - if (int_mode == OPTION_UNSET) - int_mode = InterruptType[bd]; - ixgbe_validate_option(&int_mode, &opt); - switch (int_mode) { - case IXGBE_INT_MSIX: - if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) - printk(KERN_INFO - "Ignoring MSI-X setting; " - "support unavailable\n"); - break; - case IXGBE_INT_MSI: - if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) { - printk(KERN_INFO - "Ignoring MSI setting; " - "support unavailable\n"); - } else { - *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; - } - break; - case IXGBE_INT_LEGACY: - default: - *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; - *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; - break; - } -#ifdef module_param_array - } else { - /* default settings */ - if (*aflags & IXGBE_FLAG_MSIX_CAPABLE) { - *aflags |= IXGBE_FLAG_MSI_CAPABLE; - } else { - *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; - *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; - } - } -#endif - } - { /* Multiple Queue Support */ - static struct ixgbe_option opt = { - .type = enable_option, - .name = "Multiple Queue Support", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED - }; - -#ifdef module_param_array - if (num_MQ > bd) { -#endif - unsigned int mq = MQ[bd]; - ixgbe_validate_option(&mq, &opt); - if (mq) - *aflags |= IXGBE_FLAG_MQ_CAPABLE; - else - *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; -#ifdef module_param_array - } else { - *aflags |= IXGBE_FLAG_MQ_CAPABLE; - } -#endif - /* Check Interoperability */ - if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) && - !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) { - DPRINTK(PROBE, INFO, - "Multiple queues are not supported while MSI-X " - "is disabled. Disabling Multiple Queues.\n"); - *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; - } - } -#if IS_ENABLED(CONFIG_DCA) - { /* Direct Cache Access (DCA) */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Direct Cache Access (DCA)", - .err = "defaulting to Enabled", - .def = IXGBE_MAX_DCA, - .arg = { .r = { .min = OPTION_DISABLED, - .max = IXGBE_MAX_DCA} } - }; - unsigned int dca = opt.def; - -#ifdef module_param_array - if (num_DCA > bd) { -#endif - dca = DCA[bd]; - ixgbe_validate_option(&dca, &opt); - if (!dca) - *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; - - /* Check Interoperability */ - if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) { - DPRINTK(PROBE, INFO, "DCA is disabled\n"); - *aflags &= ~IXGBE_FLAG_DCA_ENABLED; - } - - if (dca == IXGBE_MAX_DCA) { - DPRINTK(PROBE, INFO, - "DCA enabled for rx data\n"); - adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; - } -#ifdef module_param_array - } else { - /* make sure to clear the capability flag if the - * option is disabled by default above */ - if (opt.def == OPTION_DISABLED) - *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; - } -#endif - if (dca == IXGBE_MAX_DCA) - adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; - } -#endif /* CONFIG_DCA */ - { /* Receive-Side Scaling (RSS) */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Receive-Side Scaling (RSS)", - .err = "using default.", - .def = 0, - .arg = { .r = { .min = 0, - .max = 16} } - }; - unsigned int rss = RSS[bd]; - /* adjust Max allowed RSS queues based on MAC type */ - opt.arg.r.max = ixgbe_max_rss_indices(adapter); - -#ifdef module_param_array - if (num_RSS > bd) { -#endif - ixgbe_validate_option(&rss, &opt); - /* base it off num_online_cpus() with hardware limit */ - if (!rss) - rss = min_t(int, opt.arg.r.max, - num_online_cpus()); - else - feature[RING_F_FDIR].limit = rss; - - feature[RING_F_RSS].limit = rss; -#ifdef module_param_array - } else if (opt.def == 0) { - rss = min_t(int, ixgbe_max_rss_indices(adapter), - num_online_cpus()); - feature[RING_F_RSS].limit = rss; - } -#endif - /* Check Interoperability */ - if (rss > 1) { - if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { - DPRINTK(PROBE, INFO, - "Multiqueue is disabled. " - "Limiting RSS.\n"); - feature[RING_F_RSS].limit = 1; - } - } - } - { /* Virtual Machine Device Queues (VMDQ) */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Virtual Machine Device Queues (VMDQ)", - .err = "defaulting to Disabled", - .def = OPTION_DISABLED, - .arg = { .r = { .min = OPTION_DISABLED, - .max = IXGBE_MAX_VMDQ_INDICES - } } - }; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - /* 82598 only supports up to 16 pools */ - opt.arg.r.max = 16; - break; - default: - break; - } - -#ifdef module_param_array - if (num_VMDQ > bd) { -#endif - vmdq = VMDQ[bd]; - - ixgbe_validate_option(&vmdq, &opt); - - /* zero or one both mean disabled from our driver's - * perspective */ - if (vmdq > 1) { - *aflags |= IXGBE_FLAG_VMDQ_ENABLED; - } else - *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; - - feature[RING_F_VMDQ].limit = vmdq; -#ifdef module_param_array - } else { - if (opt.def == OPTION_DISABLED) - *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; - else - *aflags |= IXGBE_FLAG_VMDQ_ENABLED; - - feature[RING_F_VMDQ].limit = opt.def; - } -#endif - /* Check Interoperability */ - if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) { - if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { - DPRINTK(PROBE, INFO, - "VMDQ is not supported while multiple " - "queues are disabled. " - "Disabling VMDQ.\n"); - *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; - feature[RING_F_VMDQ].limit = 0; - } - } - } -#ifdef CONFIG_PCI_IOV - { /* Single Root I/O Virtualization (SR-IOV) */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "I/O Virtualization (IOV)", - .err = "defaulting to Disabled", - .def = OPTION_DISABLED, - .arg = { .r = { .min = OPTION_DISABLED, - .max = MAX_SRIOV_VFS} } - }; - -#ifdef module_param_array - if (num_max_vfs > bd) { -#endif - unsigned int vfs = max_vfs[bd]; - if (ixgbe_validate_option(&vfs, &opt)) { - vfs = 0; - DPRINTK(PROBE, INFO, - "max_vfs out of range " - "Disabling SR-IOV.\n"); - } - - adapter->max_vfs = vfs; - - if (vfs) - *aflags |= IXGBE_FLAG_SRIOV_ENABLED; - else - *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; -#ifdef module_param_array - } else { - if (opt.def == OPTION_DISABLED) { - adapter->max_vfs = 0; - *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; - } else { - adapter->max_vfs = opt.def; - *aflags |= IXGBE_FLAG_SRIOV_ENABLED; - } - } -#endif - - /* Check Interoperability */ - if (*aflags & IXGBE_FLAG_SRIOV_ENABLED) { - if (!(*aflags & IXGBE_FLAG_SRIOV_CAPABLE)) { - DPRINTK(PROBE, INFO, - "IOV is not supported on this " - "hardware. Disabling IOV.\n"); - *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; - adapter->max_vfs = 0; - } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { - DPRINTK(PROBE, INFO, - "IOV is not supported while multiple " - "queues are disabled. " - "Disabling IOV.\n"); - *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; - adapter->max_vfs = 0; - } - } - } - { /* VEPA Bridge Mode enable for SR-IOV mode */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "VEPA Bridge Mode Enable", - .err = "defaulting to disabled", - .def = OPTION_DISABLED, - .arg = { .r = { .min = OPTION_DISABLED, - .max = OPTION_ENABLED} } - }; - -#ifdef module_param_array - if (num_VEPA > bd) { -#endif - unsigned int vepa = VEPA[bd]; - ixgbe_validate_option(&vepa, &opt); - if (vepa) - adapter->flags |= - IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; -#ifdef module_param_array - } else { - if (opt.def == OPTION_ENABLED) - adapter->flags |= - IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; - } -#endif - } -#endif /* CONFIG_PCI_IOV */ - { /* Interrupt Throttling Rate */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Interrupt Throttling Rate (ints/sec)", - .err = "using default of "__MODULE_STRING(DEFAULT_ITR), - .def = DEFAULT_ITR, - .arg = { .r = { .min = MIN_ITR, - .max = MAX_ITR } } - }; - -#ifdef module_param_array - if (num_InterruptThrottleRate > bd) { -#endif - u32 itr = InterruptThrottleRate[bd]; - switch (itr) { - case 0: - DPRINTK(PROBE, INFO, "%s turned off\n", - opt.name); - adapter->rx_itr_setting = 0; - break; - case 1: - DPRINTK(PROBE, INFO, "dynamic interrupt " - "throttling enabled\n"); - adapter->rx_itr_setting = 1; - break; - default: - ixgbe_validate_option(&itr, &opt); - /* the first bit is used as control */ - adapter->rx_itr_setting = (1000000/itr) << 2; - break; - } - adapter->tx_itr_setting = adapter->rx_itr_setting; -#ifdef module_param_array - } else { - adapter->rx_itr_setting = opt.def; - adapter->tx_itr_setting = opt.def; - } -#endif - } -#ifndef IXGBE_NO_LLI - { /* Low Latency Interrupt TCP Port*/ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Low Latency Interrupt TCP Port", - .err = "using default of " - __MODULE_STRING(DEFAULT_LLIPORT), - .def = DEFAULT_LLIPORT, - .arg = { .r = { .min = MIN_LLIPORT, - .max = MAX_LLIPORT } } - }; - -#ifdef module_param_array - if (num_LLIPort > bd && ixgbe_lli_supported(adapter, &opt)) { -#endif - adapter->lli_port = LLIPort[bd]; - if (adapter->lli_port) { - ixgbe_validate_option(&adapter->lli_port, &opt); - } else { - DPRINTK(PROBE, INFO, "%s turned off\n", - opt.name); - } -#ifdef module_param_array - } else { - adapter->lli_port = opt.def; - } -#endif - } - { /* Low Latency Interrupt on Packet Size */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Low Latency Interrupt on Packet Size", - .err = "using default of " - __MODULE_STRING(DEFAULT_LLISIZE), - .def = DEFAULT_LLISIZE, - .arg = { .r = { .min = MIN_LLISIZE, - .max = MAX_LLISIZE } } - }; - -#ifdef module_param_array - if (num_LLISize > bd && ixgbe_lli_supported(adapter, &opt)) { -#endif - adapter->lli_size = LLISize[bd]; - if (adapter->lli_size) { - ixgbe_validate_option(&adapter->lli_size, &opt); - } else { - DPRINTK(PROBE, INFO, "%s turned off\n", - opt.name); - } -#ifdef module_param_array - } else { - adapter->lli_size = opt.def; - } -#endif - } - { /*Low Latency Interrupt on TCP Push flag*/ - static struct ixgbe_option opt = { - .type = enable_option, - .name = "Low Latency Interrupt on TCP Push flag", - .err = "defaulting to Disabled", - .def = OPTION_DISABLED - }; - -#ifdef module_param_array - if (num_LLIPush > bd && ixgbe_lli_supported(adapter, &opt)) { -#endif - unsigned int lli_push = LLIPush[bd]; - - ixgbe_validate_option(&lli_push, &opt); - if (lli_push) - *aflags |= IXGBE_FLAG_LLI_PUSH; - else - *aflags &= ~IXGBE_FLAG_LLI_PUSH; -#ifdef module_param_array - } else { - *aflags &= ~IXGBE_FLAG_LLI_PUSH; - } -#endif - } - { /* Low Latency Interrupt EtherType*/ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Low Latency Interrupt on Ethernet Protocol " - "Type", - .err = "using default of " - __MODULE_STRING(DEFAULT_LLIETYPE), - .def = DEFAULT_LLIETYPE, - .arg = { .r = { .min = MIN_LLIETYPE, - .max = MAX_LLIETYPE } } - }; - -#ifdef module_param_array - if (num_LLIEType > bd && ixgbe_lli_supported(adapter, &opt)) { -#endif - adapter->lli_etype = LLIEType[bd]; - if (adapter->lli_etype) { - ixgbe_validate_option(&adapter->lli_etype, - &opt); - } else { - DPRINTK(PROBE, INFO, "%s turned off\n", - opt.name); - } -#ifdef module_param_array - } else { - adapter->lli_etype = opt.def; - } -#endif - } - { /* LLI VLAN Priority */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Low Latency Interrupt on VLAN priority " - "threshold", - .err = "using default of " - __MODULE_STRING(DEFAULT_LLIVLANP), - .def = DEFAULT_LLIVLANP, - .arg = { .r = { .min = MIN_LLIVLANP, - .max = MAX_LLIVLANP } } - }; - -#ifdef module_param_array - if (num_LLIVLANP > bd && ixgbe_lli_supported(adapter, &opt)) { -#endif - adapter->lli_vlan_pri = LLIVLANP[bd]; - if (adapter->lli_vlan_pri) { - ixgbe_validate_option(&adapter->lli_vlan_pri, - &opt); - } else { - DPRINTK(PROBE, INFO, "%s turned off\n", - opt.name); - } -#ifdef module_param_array - } else { - adapter->lli_vlan_pri = opt.def; - } -#endif - } -#endif /* IXGBE_NO_LLI */ -#ifdef HAVE_TX_MQ - { /* Flow Director packet buffer allocation */ - unsigned int fdir_pballoc_mode; - static struct ixgbe_option opt = { - .type = range_option, - .name = "Flow Director packet buffer allocation", - .err = "using default of " - __MODULE_STRING(IXGBE_DEFAULT_FDIR_PBALLOC), - .def = IXGBE_DEFAULT_FDIR_PBALLOC, - .arg = {.r = {.min = IXGBE_FDIR_PBALLOC_64K, - .max = IXGBE_FDIR_PBALLOC_256K} } - }; - char pstring[10]; - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_NONE; - } else if (num_FdirPballoc > bd) { - fdir_pballoc_mode = FdirPballoc[bd]; - ixgbe_validate_option(&fdir_pballoc_mode, &opt); - switch (fdir_pballoc_mode) { - case IXGBE_FDIR_PBALLOC_256K: - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K; - sprintf(pstring, "256kB"); - break; - case IXGBE_FDIR_PBALLOC_128K: - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K; - sprintf(pstring, "128kB"); - break; - case IXGBE_FDIR_PBALLOC_64K: - default: - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; - sprintf(pstring, "64kB"); - break; - } - DPRINTK(PROBE, INFO, "Flow Director will be allocated " - "%s of packet buffer\n", pstring); - } else { - adapter->fdir_pballoc = opt.def; - } - - } - { /* Flow Director ATR Tx sample packet rate */ - static struct ixgbe_option opt = { - .type = range_option, - .name = "Software ATR Tx packet sample rate", - .err = "using default of " - __MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE), - .def = IXGBE_DEFAULT_ATR_SAMPLE_RATE, - .arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF, - .max = IXGBE_MAX_ATR_SAMPLE_RATE} } - }; - static const char atr_string[] = - "ATR Tx Packet sample rate set to"; - - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF; - } else if (num_AtrSampleRate > bd) { - adapter->atr_sample_rate = AtrSampleRate[bd]; - - if (adapter->atr_sample_rate) { - ixgbe_validate_option(&adapter->atr_sample_rate, - &opt); - DPRINTK(PROBE, INFO, "%s %d\n", atr_string, - adapter->atr_sample_rate); - } - } else { - adapter->atr_sample_rate = opt.def; - } - } -#endif /* HAVE_TX_MQ */ -#if IS_ENABLED(CONFIG_FCOE) - { - *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_82599EB: { - struct ixgbe_option opt = { - .type = enable_option, - .name = "Enabled/Disable FCoE offload", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED - }; -#ifdef module_param_array - if (num_FCoE > bd) { -#endif - unsigned int fcoe = FCoE[bd]; - - ixgbe_validate_option(&fcoe, &opt); - if (fcoe) - *aflags |= IXGBE_FLAG_FCOE_CAPABLE; -#ifdef module_param_array - } else { - if (opt.def == OPTION_ENABLED) - *aflags |= IXGBE_FLAG_FCOE_CAPABLE; - } -#endif - DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n", - (*aflags & IXGBE_FLAG_FCOE_CAPABLE) ? - "en" : "dis"); - } - break; - default: - break; - } - } -#endif /* CONFIG_FCOE */ - { /* LRO - Set Large Receive Offload */ - struct ixgbe_option opt = { - .type = enable_option, - .name = "LRO - Large Receive Offload", - .err = "defaulting to Disabled", - .def = OPTION_DISABLED - }; - struct net_device *netdev = adapter->netdev; - - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) - opt.def = OPTION_DISABLED; - -#ifdef module_param_array - if (num_LRO > bd) { -#endif - unsigned int lro = LRO[bd]; - ixgbe_validate_option(&lro, &opt); - if (lro) - netdev->features |= NETIF_F_LRO; - else - netdev->features &= ~NETIF_F_LRO; -#ifdef module_param_array - } else { - netdev->features &= ~NETIF_F_LRO; - } -#endif - if ((netdev->features & NETIF_F_LRO) && - !(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { - DPRINTK(PROBE, INFO, - "RSC is not supported on this " - "hardware. Disabling RSC.\n"); - netdev->features &= ~NETIF_F_LRO; - } - } - { /* - * allow_unsupported_sfp - Enable/Disable support for unsupported - * and untested SFP+ modules. - */ - struct ixgbe_option opt = { - .type = enable_option, - .name = "allow_unsupported_sfp", - .err = "defaulting to Disabled", - .def = OPTION_DISABLED - }; -#ifdef module_param_array - if (num_allow_unsupported_sfp > bd) { -#endif - unsigned int enable_unsupported_sfp = - allow_unsupported_sfp[bd]; - ixgbe_validate_option(&enable_unsupported_sfp, &opt); - if (enable_unsupported_sfp) { - adapter->hw.allow_unsupported_sfp = true; - } else { - adapter->hw.allow_unsupported_sfp = false; - } -#ifdef module_param_array - } else { - adapter->hw.allow_unsupported_sfp = false; - } -#endif - } - { /* DMA Coalescing */ - struct ixgbe_option opt = { - .type = range_option, - .name = "dmac_watchdog", - .err = "defaulting to 0 (disabled)", - .def = 0, - .arg = { .r = { .min = 41, .max = 10000 } }, - }; - const char *cmsg = "DMA coalescing not supported on this hardware"; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - if (adapter->rx_itr_setting || adapter->tx_itr_setting) - break; - opt.err = "interrupt throttling disabled also disables DMA coalescing"; - opt.arg.r.min = 0; - opt.arg.r.max = 0; - break; - default: - opt.err = cmsg; - opt.msg = cmsg; - opt.arg.r.min = 0; - opt.arg.r.max = 0; - } -#ifdef module_param_array - if (num_dmac_watchdog > bd) { -#endif - unsigned int dmac_wd = dmac_watchdog[bd]; - - ixgbe_validate_option(&dmac_wd, &opt); - adapter->hw.mac.dmac_config.watchdog_timer = dmac_wd; -#ifdef module_param_array - } else { - adapter->hw.mac.dmac_config.watchdog_timer = opt.def; - } -#endif - } - { /* VXLAN rx offload */ - struct ixgbe_option opt = { - .type = range_option, - .name = "vxlan_rx", - .err = "defaulting to 1 (enabled)", - .def = 1, - .arg = { .r = { .min = 0, .max = 1 } }, - }; - const char *cmsg = "VXLAN rx offload not supported on this hardware"; - const u32 flag = IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; - - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { - opt.err = cmsg; - opt.msg = cmsg; - opt.def = 0; - opt.arg.r.max = 0; - } -#ifdef module_param_array - if (num_vxlan_rx > bd) { -#endif - unsigned int enable_vxlan_rx = vxlan_rx[bd]; - - ixgbe_validate_option(&enable_vxlan_rx, &opt); - if (enable_vxlan_rx) - adapter->flags |= flag; - else - adapter->flags &= ~flag; -#ifdef module_param_array - } else if (opt.def) { - adapter->flags |= flag; - } else { - adapter->flags &= ~flag; - } -#endif - } - - { /* MDD support */ - struct ixgbe_option opt = { - .type = enable_option, - .name = "Malicious Driver Detection", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED, - }; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: -#ifdef module_param_array - if (num_MDD > bd) { -#endif - mdd = MDD[bd]; - ixgbe_validate_option(&mdd, &opt); - - if (mdd){ - *aflags |= IXGBE_FLAG_MDD_ENABLED; - - } else{ - *aflags &= ~IXGBE_FLAG_MDD_ENABLED; - } -#ifdef module_param_array - } else { - *aflags |= IXGBE_FLAG_MDD_ENABLED; - } -#endif - break; - default: - *aflags &= ~IXGBE_FLAG_MDD_ENABLED; - break; - } - } - -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c deleted file mode 100644 index 442f9a9c174a..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.c +++ /dev/null @@ -1,2685 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_api.h" -#include "ixgbe_common.h" -#include "ixgbe_phy.h" - -STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw); -STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw); -STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); -STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); -STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); -STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); -STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); -STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); -STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); -STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data); - -/** - * ixgbe_out_i2c_byte_ack - Send I2C byte with ack - * @hw: pointer to the hardware structure - * @byte: byte to send - * - * Returns an error code on error. - */ -STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) -{ - s32 status; - - status = ixgbe_clock_out_i2c_byte(hw, byte); - if (status) - return status; - return ixgbe_get_i2c_ack(hw); -} - -/** - * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack - * @hw: pointer to the hardware structure - * @byte: pointer to a u8 to receive the byte - * - * Returns an error code on error. - */ -STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) -{ - s32 status; - - status = ixgbe_clock_in_i2c_byte(hw, byte); - if (status) - return status; - /* ACK */ - return ixgbe_clock_out_i2c_bit(hw, false); -} - -/** - * ixgbe_ones_comp_byte_add - Perform one's complement addition - * @add1 - addend 1 - * @add2 - addend 2 - * - * Returns one's complement 8-bit sum. - */ -STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) -{ - u16 sum = add1 + add2; - - sum = (sum & 0xFF) + (sum >> 8); - return sum & 0xFF; -} - -/** - * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * @lock: true if to take and release semaphore - * - * Returns an error code on error. - */ -s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, - u16 *val, bool lock) -{ - u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 3; - int retry = 0; - u8 csum_byte; - u8 high_bits; - u8 low_bits; - u8 reg_high; - u8 csum; - - reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ - csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); - csum = ~csum; - do { - if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) - return IXGBE_ERR_SWFW_SYNC; - ixgbe_i2c_start(hw); - /* Device Address and write indication */ - if (ixgbe_out_i2c_byte_ack(hw, addr)) - goto fail; - /* Write bits 14:8 */ - if (ixgbe_out_i2c_byte_ack(hw, reg_high)) - goto fail; - /* Write bits 7:0 */ - if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) - goto fail; - /* Write csum */ - if (ixgbe_out_i2c_byte_ack(hw, csum)) - goto fail; - /* Re-start condition */ - ixgbe_i2c_start(hw); - /* Device Address and read indication */ - if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) - goto fail; - /* Get upper bits */ - if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) - goto fail; - /* Get low bits */ - if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) - goto fail; - /* Get csum */ - if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) - goto fail; - /* NACK */ - if (ixgbe_clock_out_i2c_bit(hw, false)) - goto fail; - ixgbe_i2c_stop(hw); - if (lock) - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - *val = (high_bits << 8) | low_bits; - return 0; - -fail: - ixgbe_i2c_bus_clear(hw); - if (lock) - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - retry++; - if (retry < max_retry) - DEBUGOUT("I2C byte read combined error - Retrying.\n"); - else - DEBUGOUT("I2C byte read combined error.\n"); - } while (retry < max_retry); - - return IXGBE_ERR_I2C; -} - -/** - * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * @lock: true if to take and release semaphore - * - * Returns an error code on error. - */ -s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, - u16 val, bool lock) -{ - u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 1; - int retry = 0; - u8 reg_high; - u8 csum; - - reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ - csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); - csum = ixgbe_ones_comp_byte_add(csum, val >> 8); - csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); - csum = ~csum; - do { - if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) - return IXGBE_ERR_SWFW_SYNC; - ixgbe_i2c_start(hw); - /* Device Address and write indication */ - if (ixgbe_out_i2c_byte_ack(hw, addr)) - goto fail; - /* Write bits 14:8 */ - if (ixgbe_out_i2c_byte_ack(hw, reg_high)) - goto fail; - /* Write bits 7:0 */ - if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) - goto fail; - /* Write data 15:8 */ - if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) - goto fail; - /* Write data 7:0 */ - if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) - goto fail; - /* Write csum */ - if (ixgbe_out_i2c_byte_ack(hw, csum)) - goto fail; - ixgbe_i2c_stop(hw); - if (lock) - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return 0; - -fail: - ixgbe_i2c_bus_clear(hw); - if (lock) - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - retry++; - if (retry < max_retry) - DEBUGOUT("I2C byte write combined error - Retrying.\n"); - else - DEBUGOUT("I2C byte write combined error.\n"); - } while (retry < max_retry); - - return IXGBE_ERR_I2C; -} - -/** - * ixgbe_init_phy_ops_generic - Inits PHY function ptrs - * @hw: pointer to the hardware structure - * - * Initialize the function pointers. - **/ -s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_phy_info *phy = &hw->phy; - - DEBUGFUNC("ixgbe_init_phy_ops_generic"); - - /* PHY */ - phy->ops.identify = ixgbe_identify_phy_generic; - phy->ops.reset = ixgbe_reset_phy_generic; - phy->ops.read_reg = ixgbe_read_phy_reg_generic; - phy->ops.write_reg = ixgbe_write_phy_reg_generic; - phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi; - phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi; - phy->ops.setup_link = ixgbe_setup_phy_link_generic; - phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic; - phy->ops.check_link = NULL; - phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; - phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic; - phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic; - phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic; - phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic; - phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic; - phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear; - phy->ops.identify_sfp = ixgbe_identify_module_generic; - phy->sfp_type = ixgbe_sfp_type_unknown; - phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked; - phy->ops.write_i2c_byte_unlocked = - ixgbe_write_i2c_byte_generic_unlocked; - phy->ops.check_overtemp = ixgbe_tn_check_overtemp; - return IXGBE_SUCCESS; -} - -/** - * ixgbe_probe_phy - Probe a single address for a PHY - * @hw: pointer to hardware structure - * @phy_addr: PHY address to probe - * - * Returns true if PHY found - */ -static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) -{ - u16 ext_ability = 0; - - if (!ixgbe_validate_phy_addr(hw, phy_addr)) { - DEBUGOUT1("Unable to validate PHY address 0x%04X\n", - phy_addr); - return false; - } - - if (ixgbe_get_phy_id(hw)) - return false; - - hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); - - if (hw->phy.type == ixgbe_phy_unknown) { - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); - if (ext_ability & - (IXGBE_MDIO_PHY_10GBASET_ABILITY | - IXGBE_MDIO_PHY_1000BASET_ABILITY)) - hw->phy.type = ixgbe_phy_cu_unknown; - else - hw->phy.type = ixgbe_phy_generic; - } - - return true; -} - -/** - * ixgbe_identify_phy_generic - Get physical layer module - * @hw: pointer to hardware structure - * - * Determines the physical layer module found on the current adapter. - **/ -s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; - u16 phy_addr; - - DEBUGFUNC("ixgbe_identify_phy_generic"); - - if (!hw->phy.phy_semaphore_mask) { - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - } - - if (hw->phy.type != ixgbe_phy_unknown) - return IXGBE_SUCCESS; - - if (hw->phy.nw_mng_if_sel) { - phy_addr = (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; - if (ixgbe_probe_phy(hw, phy_addr)) - return IXGBE_SUCCESS; - else - return IXGBE_ERR_PHY_ADDR_INVALID; - } - - for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { - if (ixgbe_probe_phy(hw, phy_addr)) { - status = IXGBE_SUCCESS; - break; - } - } - - /* Certain media types do not have a phy so an address will not - * be found and the code will take this path. Caller has to - * decide if it is an error or not. - */ - if (status != IXGBE_SUCCESS) - hw->phy.addr = 0; - - return status; -} - -/** - * ixgbe_check_reset_blocked - check status of MNG FW veto bit - * @hw: pointer to the hardware structure - * - * This function checks the MMNGC.MNG_VETO bit to see if there are - * any constraints on link from manageability. For MAC's that don't - * have this bit just return faluse since the link can not be blocked - * via this method. - **/ -s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw) -{ - u32 mmngc; - - DEBUGFUNC("ixgbe_check_reset_blocked"); - - /* If we don't have this bit, it can't be blocking */ - if (hw->mac.type == ixgbe_mac_82598EB) - return false; - - mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); - if (mmngc & IXGBE_MMNGC_MNG_VETO) { - ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, - "MNG_VETO bit detected.\n"); - return true; - } - - return false; -} - -/** - * ixgbe_validate_phy_addr - Determines phy address is valid - * @hw: pointer to hardware structure - * - **/ -bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) -{ - u16 phy_id = 0; - bool valid = false; - - DEBUGFUNC("ixgbe_validate_phy_addr"); - - hw->phy.addr = phy_addr; - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); - - if (phy_id != 0xFFFF && phy_id != 0x0) - valid = true; - - DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id); - - return valid; -} - -/** - * ixgbe_get_phy_id - Get the phy type - * @hw: pointer to hardware structure - * - **/ -s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) -{ - u32 status; - u16 phy_id_high = 0; - u16 phy_id_low = 0; - - DEBUGFUNC("ixgbe_get_phy_id"); - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &phy_id_high); - - if (status == IXGBE_SUCCESS) { - hw->phy.id = (u32)(phy_id_high << 16); - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &phy_id_low); - hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); - hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); - } - DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n", - phy_id_high, phy_id_low); - - return status; -} - -/** - * ixgbe_get_phy_type_from_id - Get the phy type - * @phy_id: PHY ID information - * - **/ -enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) -{ - enum ixgbe_phy_type phy_type; - - DEBUGFUNC("ixgbe_get_phy_type_from_id"); - - switch (phy_id) { - case TN1010_PHY_ID: - phy_type = ixgbe_phy_tn; - break; - case X550_PHY_ID2: - case X550_PHY_ID3: - case X540_PHY_ID: - phy_type = ixgbe_phy_aq; - break; - case QT2022_PHY_ID: - phy_type = ixgbe_phy_qt; - break; - case ATH_PHY_ID: - phy_type = ixgbe_phy_nl; - break; - case X557_PHY_ID: - case X557_PHY_ID2: - phy_type = ixgbe_phy_x550em_ext_t; - break; - case IXGBE_M88E1500_E_PHY_ID: - case IXGBE_M88E1543_E_PHY_ID: - phy_type = ixgbe_phy_ext_1g_t; - break; - default: - phy_type = ixgbe_phy_unknown; - break; - } - return phy_type; -} - -/** - * ixgbe_reset_phy_generic - Performs a PHY reset - * @hw: pointer to hardware structure - **/ -s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) -{ - u32 i; - u16 ctrl = 0; - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_reset_phy_generic"); - - if (hw->phy.type == ixgbe_phy_unknown) - status = ixgbe_identify_phy_generic(hw); - - if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none) - goto out; - - /* Don't reset PHY if it's shut down due to overtemp. */ - if (!hw->phy.reset_if_overtemp && - (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) - goto out; - - /* Blocked by MNG FW so bail */ - if (ixgbe_check_reset_blocked(hw)) - goto out; - - /* - * Perform soft PHY reset to the PHY_XS. - * This will cause a soft reset to the PHY - */ - hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, - IXGBE_MDIO_PHY_XS_RESET); - - /* - * Poll for reset bit to self-clear indicating reset is complete. - * Some PHYs could take up to 3 seconds to complete and need about - * 1.7 usec delay after the reset is complete. - */ - for (i = 0; i < 30; i++) { - msec_delay(100); - if (hw->phy.type == ixgbe_phy_x550em_ext_t) { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_TX_VENDOR_ALARMS_3, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &ctrl); - if (status != IXGBE_SUCCESS) - return status; - - if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { - usec_delay(2); - break; - } - } else { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, - &ctrl); - if (status != IXGBE_SUCCESS) - return status; - - if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { - usec_delay(2); - break; - } - } - } - - if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { - status = IXGBE_ERR_RESET_FAILED; - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "PHY reset polling failed to complete.\n"); - } - -out: - return status; -} - -/** - * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without - * the SWFW lock - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @phy_data: Pointer to read data from PHY register - **/ -s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data) -{ - u32 i, data, command; - - /* Setup and write the address cycle command */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - usec_delay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n"); - DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n"); - return IXGBE_ERR_PHY; - } - - /* - * Address cycle complete, setup and write the read - * command - */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle - * completed. The MDI Command bit will clear when the - * operation is complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - usec_delay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n"); - DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n"); - return IXGBE_ERR_PHY; - } - - /* - * Read operation is complete. Get the data - * from MSRWD - */ - data = IXGBE_READ_REG(hw, IXGBE_MSRWD); - data >>= IXGBE_MSRWD_READ_DATA_SHIFT; - *phy_data = (u16)(data); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register - * using the SWFW lock - this function is needed in most cases - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @phy_data: Pointer to read data from PHY register - **/ -s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) -{ - s32 status; - u32 gssr = hw->phy.phy_semaphore_mask; - - DEBUGFUNC("ixgbe_read_phy_reg_generic"); - - if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) - return IXGBE_ERR_SWFW_SYNC; - - status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); - - hw->mac.ops.release_swfw_sync(hw, gssr); - - return status; -} - -/** - * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register - * without SWFW lock - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 5 bit device type - * @phy_data: Data to write to the PHY register - **/ -s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) -{ - u32 i, command; - - /* Put the data in the MDI single read and write data register*/ - IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); - - /* Setup and write the address cycle command */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - usec_delay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n"); - return IXGBE_ERR_PHY; - } - - /* - * Address cycle complete, setup and write the write - * command - */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle - * completed. The MDI Command bit will clear when the - * operation is complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - usec_delay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n"); - return IXGBE_ERR_PHY; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register - * using SWFW lock- this function is needed in most cases - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 5 bit device type - * @phy_data: Data to write to the PHY register - **/ -s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) -{ - s32 status; - u32 gssr = hw->phy.phy_semaphore_mask; - - DEBUGFUNC("ixgbe_write_phy_reg_generic"); - - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { - status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, - phy_data); - hw->mac.ops.release_swfw_sync(hw, gssr); - } else { - status = IXGBE_ERR_SWFW_SYNC; - } - - return status; -} - -/** - * ixgbe_setup_phy_link_generic - Set and restart auto-neg - * @hw: pointer to hardware structure - * - * Restart auto-negotiation and PHY and waits for completion. - **/ -s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; - bool autoneg = false; - ixgbe_link_speed speed; - - DEBUGFUNC("ixgbe_setup_phy_link_generic"); - - ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); - - /* Set or unset auto-negotiation 10G advertisement */ - hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; - if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && - (speed & IXGBE_LINK_SPEED_10GB_FULL)) - autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; - - hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - autoneg_reg); - - hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - if (hw->mac.type == ixgbe_mac_X550) { - /* Set or unset auto-negotiation 5G advertisement */ - autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; - if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && - (speed & IXGBE_LINK_SPEED_5GB_FULL)) - autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; - - /* Set or unset auto-negotiation 2.5G advertisement */ - autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; - if ((hw->phy.autoneg_advertised & - IXGBE_LINK_SPEED_2_5GB_FULL) && - (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) - autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; - } - - /* Set or unset auto-negotiation 1G advertisement */ - autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; - if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && - (speed & IXGBE_LINK_SPEED_1GB_FULL)) - autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; - - hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - autoneg_reg); - - /* Set or unset auto-negotiation 100M advertisement */ - hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE | - IXGBE_MII_100BASE_T_ADVERTISE_HALF); - if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && - (speed & IXGBE_LINK_SPEED_100_FULL)) - autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; - - hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - autoneg_reg); - - /* Blocked by MNG FW so don't reset PHY */ - if (ixgbe_check_reset_blocked(hw)) - return status; - - /* Restart PHY auto-negotiation. */ - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - - autoneg_reg |= IXGBE_MII_RESTART; - - hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); - - return status; -} - -/** - * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities - * @hw: pointer to hardware structure - * @speed: new link speed - **/ -s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); - - DEBUGFUNC("ixgbe_setup_phy_link_speed_generic"); - - /* - * Clear autoneg_advertised and set new values based on input link - * speed. - */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_5GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; - - if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (speed & IXGBE_LINK_SPEED_100_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; - - if (speed & IXGBE_LINK_SPEED_10_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; - - /* Setup link based on the new speed settings */ - ixgbe_setup_phy_link(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy - * @hw: pointer to hardware structure - * - * Determines the supported link capabilities by reading the PHY auto - * negotiation register. - **/ -static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) -{ - s32 status; - u16 speed_ability; - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &speed_ability); - if (status) - return status; - - if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; - if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; - - switch (hw->mac.type) { - case ixgbe_mac_X550: - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; - break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; - break; - default: - break; - } - - return status; -} - -/** - * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value - **/ -s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); - - *autoneg = true; - if (!hw->phy.speeds_supported) - status = ixgbe_get_copper_speeds_supported(hw); - - *speed = hw->phy.speeds_supported; - return status; -} - -/** - * ixgbe_check_phy_link_tnx - Determine link and speed status - * @hw: pointer to hardware structure - * - * Reads the VS1 register to determine if link is up and the current speed for - * the PHY. - **/ -s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) -{ - s32 status = IXGBE_SUCCESS; - u32 time_out; - u32 max_time_out = 10; - u16 phy_link = 0; - u16 phy_speed = 0; - u16 phy_data = 0; - - DEBUGFUNC("ixgbe_check_phy_link_tnx"); - - /* Initialize speed and link to default case */ - *link_up = false; - *speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* - * Check current speed and link status of the PHY register. - * This is a vendor specific register and may have to - * be changed for other copper PHYs. - */ - for (time_out = 0; time_out < max_time_out; time_out++) { - usec_delay(10); - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - &phy_data); - phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; - phy_speed = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; - if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { - *link_up = true; - if (phy_speed == - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - } - } - - return status; -} - -/** - * ixgbe_setup_phy_link_tnx - Set and restart auto-neg - * @hw: pointer to hardware structure - * - * Restart auto-negotiation and PHY and waits for completion. - **/ -s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; - bool autoneg = false; - ixgbe_link_speed speed; - - DEBUGFUNC("ixgbe_setup_phy_link_tnx"); - - ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - /* Set or unset auto-negotiation 10G advertisement */ - hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; - - hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - autoneg_reg); - } - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) { - /* Set or unset auto-negotiation 1G advertisement */ - hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; - - hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - autoneg_reg); - } - - if (speed & IXGBE_LINK_SPEED_100_FULL) { - /* Set or unset auto-negotiation 100M advertisement */ - hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; - - hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - autoneg_reg); - } - - /* Blocked by MNG FW so don't reset PHY */ - if (ixgbe_check_reset_blocked(hw)) - return status; - - /* Restart PHY auto-negotiation. */ - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - - autoneg_reg |= IXGBE_MII_RESTART; - - hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status; - - DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); - - status = hw->phy.ops.read_reg(hw, TNX_FW_REV, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - firmware_version); - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status; - - DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); - - status = hw->phy.ops.read_reg(hw, AQ_FW_REV, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - firmware_version); - - return status; -} - -/** - * ixgbe_reset_phy_nl - Performs a PHY reset - * @hw: pointer to hardware structure - **/ -s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) -{ - u16 phy_offset, control, eword, edata, block_crc; - bool end_data = false; - u16 list_offset, data_offset; - u16 phy_data = 0; - s32 ret_val = IXGBE_SUCCESS; - u32 i; - - DEBUGFUNC("ixgbe_reset_phy_nl"); - - /* Blocked by MNG FW so bail */ - if (ixgbe_check_reset_blocked(hw)) - goto out; - - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); - - /* reset the PHY and poll for completion */ - hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, - (phy_data | IXGBE_MDIO_PHY_XS_RESET)); - - for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); - if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) - break; - msec_delay(10); - } - - if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { - DEBUGOUT("PHY reset did not complete.\n"); - ret_val = IXGBE_ERR_PHY; - goto out; - } - - /* Get init offsets */ - ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, - &data_offset); - if (ret_val != IXGBE_SUCCESS) - goto out; - - ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); - data_offset++; - while (!end_data) { - /* - * Read control word from PHY init contents offset - */ - ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); - if (ret_val) - goto err_eeprom; - control = (eword & IXGBE_CONTROL_MASK_NL) >> - IXGBE_CONTROL_SHIFT_NL; - edata = eword & IXGBE_DATA_MASK_NL; - switch (control) { - case IXGBE_DELAY_NL: - data_offset++; - DEBUGOUT1("DELAY: %d MS\n", edata); - msec_delay(edata); - break; - case IXGBE_DATA_NL: - DEBUGOUT("DATA:\n"); - data_offset++; - ret_val = hw->eeprom.ops.read(hw, data_offset, - &phy_offset); - if (ret_val) - goto err_eeprom; - data_offset++; - for (i = 0; i < edata; i++) { - ret_val = hw->eeprom.ops.read(hw, data_offset, - &eword); - if (ret_val) - goto err_eeprom; - hw->phy.ops.write_reg(hw, phy_offset, - IXGBE_TWINAX_DEV, eword); - DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword, - phy_offset); - data_offset++; - phy_offset++; - } - break; - case IXGBE_CONTROL_NL: - data_offset++; - DEBUGOUT("CONTROL:\n"); - if (edata == IXGBE_CONTROL_EOL_NL) { - DEBUGOUT("EOL\n"); - end_data = true; - } else if (edata == IXGBE_CONTROL_SOL_NL) { - DEBUGOUT("SOL\n"); - } else { - DEBUGOUT("Bad control value\n"); - ret_val = IXGBE_ERR_PHY; - goto out; - } - break; - default: - DEBUGOUT("Bad control type\n"); - ret_val = IXGBE_ERR_PHY; - goto out; - } - } - -out: - return ret_val; - -err_eeprom: - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", data_offset); - return IXGBE_ERR_PHY; -} - -/** - * ixgbe_identify_module_generic - Identifies module type - * @hw: pointer to hardware structure - * - * Determines HW type and calls appropriate function. - **/ -s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_SFP_NOT_PRESENT; - - DEBUGFUNC("ixgbe_identify_module_generic"); - - switch (hw->mac.ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - status = ixgbe_identify_sfp_module_generic(hw); - break; - - case ixgbe_media_type_fiber_qsfp: - status = ixgbe_identify_qsfp_module_generic(hw); - break; - - default: - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - status = IXGBE_ERR_SFP_NOT_PRESENT; - break; - } - - return status; -} - -/** - * ixgbe_identify_sfp_module_generic - Identifies SFP modules - * @hw: pointer to hardware structure - * - * Searches for and identifies the SFP module and assigns appropriate PHY type. - **/ -s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; - u32 vendor_oui = 0; - enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; - u8 identifier = 0; - u8 comp_codes_1g = 0; - u8 comp_codes_10g = 0; - u8 oui_bytes[3] = {0, 0, 0}; - u8 cable_tech = 0; - u8 cable_spec = 0; - u16 enforce_sfp = 0; - - DEBUGFUNC("ixgbe_identify_sfp_module_generic"); - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - status = IXGBE_ERR_SFP_NOT_PRESENT; - goto out; - } - - /* LAN ID is needed for I2C access */ - hw->mac.ops.set_lan_id(hw); - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_IDENTIFIER, - &identifier); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - } else { - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_1GBE_COMP_CODES, - &comp_codes_1g); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_10GBE_COMP_CODES, - &comp_codes_10g); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_CABLE_TECHNOLOGY, - &cable_tech); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - /* ID Module - * ========= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CORE0 - 82599-specific - * 4 SFP_DA_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific - * 7 SFP_act_lmt_DA_CORE0 - 82599-specific - * 8 SFP_act_lmt_DA_CORE1 - 82599-specific - * 9 SFP_1g_cu_CORE0 - 82599-specific - * 10 SFP_1g_cu_CORE1 - 82599-specific - * 11 SFP_1g_sx_CORE0 - 82599-specific - * 12 SFP_1g_sx_CORE1 - 82599-specific - */ - if (hw->mac.type == ixgbe_mac_82598EB) { - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.sfp_type = ixgbe_sfp_type_da_cu; - else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - hw->phy.sfp_type = ixgbe_sfp_type_sr; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - hw->phy.sfp_type = ixgbe_sfp_type_lr; - else - hw->phy.sfp_type = ixgbe_sfp_type_unknown; - } else { - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_da_cu_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_da_cu_core1; - } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { - hw->phy.ops.read_i2c_eeprom( - hw, IXGBE_SFF_CABLE_SPEC_COMP, - &cable_spec); - if (cable_spec & - IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core1; - } else { - hw->phy.sfp_type = - ixgbe_sfp_type_unknown; - } - } else if (comp_codes_10g & - (IXGBE_SFF_10GBASESR_CAPABLE | - IXGBE_SFF_10GBASELR_CAPABLE)) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_cu_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_cu_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_sx_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_sx_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_lx_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_lx_core1; - } else { - hw->phy.sfp_type = ixgbe_sfp_type_unknown; - } - } - - if (hw->phy.sfp_type != stored_sfp_type) - hw->phy.sfp_setup_needed = true; - - /* Determine if the SFP+ PHY is dual speed or not. */ - hw->phy.multispeed_fiber = false; - if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || - ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) - hw->phy.multispeed_fiber = true; - - /* Determine PHY vendor */ - if (hw->phy.type != ixgbe_phy_nl) { - hw->phy.id = identifier; - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE0, - &oui_bytes[0]); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE1, - &oui_bytes[1]); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE2, - &oui_bytes[2]); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - vendor_oui = - ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | - (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | - (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); - - switch (vendor_oui) { - case IXGBE_SFF_VENDOR_OUI_TYCO: - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_passive_tyco; - break; - case IXGBE_SFF_VENDOR_OUI_FTL: - if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) - hw->phy.type = ixgbe_phy_sfp_ftl_active; - else - hw->phy.type = ixgbe_phy_sfp_ftl; - break; - case IXGBE_SFF_VENDOR_OUI_AVAGO: - hw->phy.type = ixgbe_phy_sfp_avago; - break; - case IXGBE_SFF_VENDOR_OUI_INTEL: - hw->phy.type = ixgbe_phy_sfp_intel; - break; - default: - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_passive_unknown; - else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_active_unknown; - else - hw->phy.type = ixgbe_phy_sfp_unknown; - break; - } - } - - /* Allow any DA cable vendor */ - if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | - IXGBE_SFF_DA_ACTIVE_CABLE)) { - status = IXGBE_SUCCESS; - goto out; - } - - /* Verify supported 1G SFP modules */ - if (comp_codes_10g == 0 && - !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; - } - - /* Anything else 82598-based is supported */ - if (hw->mac.type == ixgbe_mac_82598EB) { - status = IXGBE_SUCCESS; - goto out; - } - - ixgbe_get_device_caps(hw, &enforce_sfp); - if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && - !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { - /* Make sure we're a supported PHY type */ - if (hw->phy.type == ixgbe_phy_sfp_intel) { - status = IXGBE_SUCCESS; - } else { - if (hw->allow_unsupported_sfp == true) { - EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); - status = IXGBE_SUCCESS; - } else { - DEBUGOUT("SFP+ module not supported\n"); - hw->phy.type = - ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - } - } - } else { - status = IXGBE_SUCCESS; - } - } - -out: - return status; - -err_read_i2c_eeprom: - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - if (hw->phy.type != ixgbe_phy_nl) { - hw->phy.id = 0; - hw->phy.type = ixgbe_phy_unknown; - } - return IXGBE_ERR_SFP_NOT_PRESENT; -} - -/** - * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current SFP. - */ -u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw) -{ - u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - u8 comp_codes_10g = 0; - u8 comp_codes_1g = 0; - - DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic"); - - hw->phy.ops.identify_sfp(hw); - if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) - return physical_layer; - - switch (hw->phy.type) { - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - case ixgbe_phy_qsfp_passive_unknown: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; - break; - case ixgbe_phy_sfp_ftl_active: - case ixgbe_phy_sfp_active_unknown: - case ixgbe_phy_qsfp_active_unknown: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; - break; - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); - if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; - else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; - break; - case ixgbe_phy_qsfp_intel: - case ixgbe_phy_qsfp_unknown: - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); - if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - break; - default: - break; - } - - return physical_layer; -} - -/** - * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules - * @hw: pointer to hardware structure - * - * Searches for and identifies the QSFP module and assigns appropriate PHY type - **/ -s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; - u32 vendor_oui = 0; - enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; - u8 identifier = 0; - u8 comp_codes_1g = 0; - u8 comp_codes_10g = 0; - u8 oui_bytes[3] = {0, 0, 0}; - u16 enforce_sfp = 0; - u8 connector = 0; - u8 cable_length = 0; - u8 device_tech = 0; - bool active_cable = false; - - DEBUGFUNC("ixgbe_identify_qsfp_module_generic"); - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - status = IXGBE_ERR_SFP_NOT_PRESENT; - goto out; - } - - /* LAN ID is needed for I2C access */ - hw->mac.ops.set_lan_id(hw); - - status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, - &identifier); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; - } - - hw->phy.id = identifier; - - status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, - &comp_codes_10g); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, - &comp_codes_1g); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { - hw->phy.type = ixgbe_phy_qsfp_passive_unknown; - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; - else - hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; - } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | - IXGBE_SFF_10GBASELR_CAPABLE)) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; - else - hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; - } else { - if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) - active_cable = true; - - if (!active_cable) { - /* check for active DA cables that pre-date - * SFF-8436 v3.6 */ - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_CONNECTOR, - &connector); - - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_CABLE_LENGTH, - &cable_length); - - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_DEVICE_TECH, - &device_tech); - - if ((connector == - IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && - (cable_length > 0) && - ((device_tech >> 4) == - IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) - active_cable = true; - } - - if (active_cable) { - hw->phy.type = ixgbe_phy_qsfp_active_unknown; - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core1; - } else { - /* unsupported module type */ - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; - } - } - - if (hw->phy.sfp_type != stored_sfp_type) - hw->phy.sfp_setup_needed = true; - - /* Determine if the QSFP+ PHY is dual speed or not. */ - hw->phy.multispeed_fiber = false; - if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || - ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) - hw->phy.multispeed_fiber = true; - - /* Determine PHY vendor for optical modules */ - if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | - IXGBE_SFF_10GBASELR_CAPABLE)) { - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, - &oui_bytes[0]); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, - &oui_bytes[1]); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, - &oui_bytes[2]); - - if (status != IXGBE_SUCCESS) - goto err_read_i2c_eeprom; - - vendor_oui = - ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | - (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | - (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); - - if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) - hw->phy.type = ixgbe_phy_qsfp_intel; - else - hw->phy.type = ixgbe_phy_qsfp_unknown; - - ixgbe_get_device_caps(hw, &enforce_sfp); - if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { - /* Make sure we're a supported PHY type */ - if (hw->phy.type == ixgbe_phy_qsfp_intel) { - status = IXGBE_SUCCESS; - } else { - if (hw->allow_unsupported_sfp == true) { - EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); - status = IXGBE_SUCCESS; - } else { - DEBUGOUT("QSFP module not supported\n"); - hw->phy.type = - ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - } - } - } else { - status = IXGBE_SUCCESS; - } - } - -out: - return status; - -err_read_i2c_eeprom: - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - hw->phy.id = 0; - hw->phy.type = ixgbe_phy_unknown; - - return IXGBE_ERR_SFP_NOT_PRESENT; -} - -/** - * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence - * @hw: pointer to hardware structure - * @list_offset: offset to the SFP ID list - * @data_offset: offset to the SFP data block - * - * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if - * so it returns the offsets to the phy init sequence block. - **/ -s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, - u16 *list_offset, - u16 *data_offset) -{ - u16 sfp_id; - u16 sfp_type = hw->phy.sfp_type; - - DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets"); - - if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) - return IXGBE_ERR_SFP_NOT_SUPPORTED; - - if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) - return IXGBE_ERR_SFP_NOT_PRESENT; - - if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && - (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) - return IXGBE_ERR_SFP_NOT_SUPPORTED; - - /* - * Limiting active cables and 1G Phys must be initialized as - * SR modules - */ - if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || - sfp_type == ixgbe_sfp_type_1g_lx_core0 || - sfp_type == ixgbe_sfp_type_1g_cu_core0 || - sfp_type == ixgbe_sfp_type_1g_sx_core0) - sfp_type = ixgbe_sfp_type_srlr_core0; - else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || - sfp_type == ixgbe_sfp_type_1g_lx_core1 || - sfp_type == ixgbe_sfp_type_1g_cu_core1 || - sfp_type == ixgbe_sfp_type_1g_sx_core1) - sfp_type = ixgbe_sfp_type_srlr_core1; - - /* Read offset to PHY init contents */ - if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", - IXGBE_PHY_INIT_OFFSET_NL); - return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; - } - - if ((!*list_offset) || (*list_offset == 0xFFFF)) - return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; - - /* Shift offset to first ID word */ - (*list_offset)++; - - /* - * Find the matching SFP ID in the EEPROM - * and program the init sequence - */ - if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) - goto err_phy; - - while (sfp_id != IXGBE_PHY_INIT_END_NL) { - if (sfp_id == sfp_type) { - (*list_offset)++; - if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) - goto err_phy; - if ((!*data_offset) || (*data_offset == 0xFFFF)) { - DEBUGOUT("SFP+ module not supported\n"); - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } else { - break; - } - } else { - (*list_offset) += 2; - if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) - goto err_phy; - } - } - - if (sfp_id == IXGBE_PHY_INIT_END_NL) { - DEBUGOUT("No matching SFP+ module found\n"); - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - - return IXGBE_SUCCESS; - -err_phy: - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "eeprom read at offset %d failed", *list_offset); - return IXGBE_ERR_PHY; -} - -/** - * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read - * @eeprom_data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data) -{ - DEBUGFUNC("ixgbe_read_i2c_eeprom_generic"); - - return hw->phy.ops.read_i2c_byte(hw, byte_offset, - IXGBE_I2C_EEPROM_DEV_ADDR, - eeprom_data); -} - -/** - * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: byte offset at address 0xA2 - * @eeprom_data: value read - * - * Performs byte read operation to SFP module's SFF-8472 data over I2C - **/ -STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data) -{ - return hw->phy.ops.read_i2c_byte(hw, byte_offset, - IXGBE_I2C_EEPROM_DEV_ADDR2, - sff8472_data); -} - -/** - * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to write - * @eeprom_data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 eeprom_data) -{ - DEBUGFUNC("ixgbe_write_i2c_eeprom_generic"); - - return hw->phy.ops.write_i2c_byte(hw, byte_offset, - IXGBE_I2C_EEPROM_DEV_ADDR, - eeprom_data); -} - -/** - * ixgbe_is_sfp_probe - Returns true if SFP is being detected - * @hw: pointer to hardware structure - * @offset: eeprom offset to be read - * @addr: I2C address to be read - */ -STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) -{ - if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && - offset == IXGBE_SFF_IDENTIFIER && - hw->phy.sfp_type == ixgbe_sfp_type_not_present) - return true; - return false; -} - -/** - * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @data: value read - * @lock: true if to take and release semaphore - * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data, bool lock) -{ - s32 status; - u32 max_retry = 10; - u32 retry = 0; - u32 swfw_mask = hw->phy.phy_semaphore_mask; - bool nack = 1; - *data = 0; - - DEBUGFUNC("ixgbe_read_i2c_byte_generic"); - - if (hw->mac.type >= ixgbe_mac_X550) - max_retry = 3; - if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) - max_retry = IXGBE_SFP_DETECT_RETRIES; - - do { - if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) - return IXGBE_ERR_SWFW_SYNC; - - ixgbe_i2c_start(hw); - - /* Device Address and write indication */ - status = ixgbe_clock_out_i2c_byte(hw, dev_addr); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_clock_out_i2c_byte(hw, byte_offset); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != IXGBE_SUCCESS) - goto fail; - - ixgbe_i2c_start(hw); - - /* Device Address and read indication */ - status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_clock_in_i2c_byte(hw, data); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_clock_out_i2c_bit(hw, nack); - if (status != IXGBE_SUCCESS) - goto fail; - - ixgbe_i2c_stop(hw); - if (lock) - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return IXGBE_SUCCESS; - -fail: - ixgbe_i2c_bus_clear(hw); - if (lock) { - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msec_delay(100); - } - retry++; - if (retry < max_retry) - DEBUGOUT("I2C byte read error - Retrying.\n"); - else - DEBUGOUT("I2C byte read error.\n"); - - } while (retry < max_retry); - - return status; -} - -/** - * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) -{ - return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, - data, true); -} - -/** - * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) -{ - return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, - data, false); -} - -/** - * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @data: value to write - * @lock: true if to take and release semaphore - * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data, bool lock) -{ - s32 status; - u32 max_retry = 1; - u32 retry = 0; - u32 swfw_mask = hw->phy.phy_semaphore_mask; - - DEBUGFUNC("ixgbe_write_i2c_byte_generic"); - - if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != - IXGBE_SUCCESS) - return IXGBE_ERR_SWFW_SYNC; - - do { - ixgbe_i2c_start(hw); - - status = ixgbe_clock_out_i2c_byte(hw, dev_addr); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_clock_out_i2c_byte(hw, byte_offset); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_clock_out_i2c_byte(hw, data); - if (status != IXGBE_SUCCESS) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != IXGBE_SUCCESS) - goto fail; - - ixgbe_i2c_stop(hw); - if (lock) - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - return IXGBE_SUCCESS; - -fail: - ixgbe_i2c_bus_clear(hw); - retry++; - if (retry < max_retry) - DEBUGOUT("I2C byte write error - Retrying.\n"); - else - DEBUGOUT("I2C byte write error.\n"); - } while (retry < max_retry); - - if (lock) - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - - return status; -} - -/** - * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) -{ - return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, - data, true); -} - -/** - * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) -{ - return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, - data, false); -} - -/** - * ixgbe_i2c_start - Sets I2C start condition - * @hw: pointer to hardware structure - * - * Sets I2C start condition (High -> Low on SDA while SCL is High) - * Set bit-bang mode on X550 hardware. - **/ -STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw) -{ - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - - DEBUGFUNC("ixgbe_i2c_start"); - - i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw); - - /* Start condition must begin with data and clock high */ - ixgbe_set_i2c_data(hw, &i2cctl, 1); - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Setup time for start condition (4.7us) */ - usec_delay(IXGBE_I2C_T_SU_STA); - - ixgbe_set_i2c_data(hw, &i2cctl, 0); - - /* Hold time for start condition (4us) */ - usec_delay(IXGBE_I2C_T_HD_STA); - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Minimum low period of clock is 4.7 us */ - usec_delay(IXGBE_I2C_T_LOW); - -} - -/** - * ixgbe_i2c_stop - Sets I2C stop condition - * @hw: pointer to hardware structure - * - * Sets I2C stop condition (Low -> High on SDA while SCL is High) - * Disables bit-bang mode and negates data output enable on X550 - * hardware. - **/ -STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw) -{ - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); - u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); - u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw); - - DEBUGFUNC("ixgbe_i2c_stop"); - - /* Stop condition must begin with data low and clock high */ - ixgbe_set_i2c_data(hw, &i2cctl, 0); - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Setup time for stop condition (4us) */ - usec_delay(IXGBE_I2C_T_SU_STO); - - ixgbe_set_i2c_data(hw, &i2cctl, 1); - - /* bus free time between stop and start (4.7us)*/ - usec_delay(IXGBE_I2C_T_BUF); - - if (bb_en_bit || data_oe_bit || clk_oe_bit) { - i2cctl &= ~bb_en_bit; - i2cctl |= data_oe_bit | clk_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); - IXGBE_WRITE_FLUSH(hw); - } -} - -/** - * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C - * @hw: pointer to hardware structure - * @data: data byte to clock in - * - * Clocks in one byte data via I2C data/clock - **/ -STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) -{ - s32 i; - bool bit = 0; - - DEBUGFUNC("ixgbe_clock_in_i2c_byte"); - - *data = 0; - for (i = 7; i >= 0; i--) { - ixgbe_clock_in_i2c_bit(hw, &bit); - *data |= bit << i; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C - * @hw: pointer to hardware structure - * @data: data byte clocked out - * - * Clocks out one byte data via I2C data/clock - **/ -STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) -{ - s32 status = IXGBE_SUCCESS; - s32 i; - u32 i2cctl; - bool bit; - - DEBUGFUNC("ixgbe_clock_out_i2c_byte"); - - for (i = 7; i >= 0; i--) { - bit = (data >> i) & 0x1; - status = ixgbe_clock_out_i2c_bit(hw, bit); - - if (status != IXGBE_SUCCESS) - break; - } - - /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); - i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); - IXGBE_WRITE_FLUSH(hw); - - return status; -} - -/** - * ixgbe_get_i2c_ack - Polls for I2C ACK - * @hw: pointer to hardware structure - * - * Clocks in/out one bit via I2C data/clock - **/ -STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) -{ - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); - s32 status = IXGBE_SUCCESS; - u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - u32 timeout = 10; - bool ack = 1; - - DEBUGFUNC("ixgbe_get_i2c_ack"); - - if (data_oe_bit) { - i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); - i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); - IXGBE_WRITE_FLUSH(hw); - } - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Minimum high period of clock is 4us */ - usec_delay(IXGBE_I2C_T_HIGH); - - /* Poll for ACK. Note that ACK in I2C spec is - * transition from 1 to 0 */ - for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - ack = ixgbe_get_i2c_data(hw, &i2cctl); - - usec_delay(1); - if (!ack) - break; - } - - if (ack) { - DEBUGOUT("I2C ack was not received.\n"); - status = IXGBE_ERR_I2C; - } - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Minimum low period of clock is 4.7 us */ - usec_delay(IXGBE_I2C_T_LOW); - - return status; -} - -/** - * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock - * @hw: pointer to hardware structure - * @data: read data value - * - * Clocks in one bit via I2C data/clock - **/ -STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) -{ - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); - - DEBUGFUNC("ixgbe_clock_in_i2c_bit"); - - if (data_oe_bit) { - i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); - i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); - IXGBE_WRITE_FLUSH(hw); - } - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Minimum high period of clock is 4us */ - usec_delay(IXGBE_I2C_T_HIGH); - - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - *data = ixgbe_get_i2c_data(hw, &i2cctl); - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Minimum low period of clock is 4.7 us */ - usec_delay(IXGBE_I2C_T_LOW); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock - * @hw: pointer to hardware structure - * @data: data value to write - * - * Clocks out one bit via I2C data/clock - **/ -STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) -{ - s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - - DEBUGFUNC("ixgbe_clock_out_i2c_bit"); - - status = ixgbe_set_i2c_data(hw, &i2cctl, data); - if (status == IXGBE_SUCCESS) { - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Minimum high period of clock is 4us */ - usec_delay(IXGBE_I2C_T_HIGH); - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Minimum low period of clock is 4.7 us. - * This also takes care of the data hold time. - */ - usec_delay(IXGBE_I2C_T_LOW); - } else { - status = IXGBE_ERR_I2C; - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "I2C data was not set to %X\n", data); - } - - return status; -} - -/** - * ixgbe_raise_i2c_clk - Raises the I2C SCL clock - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * - * Raises the I2C clock line '0'->'1' - * Negates the I2C clock output enable on X550 hardware. - **/ -STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) -{ - u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); - u32 i = 0; - u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; - u32 i2cctl_r = 0; - - DEBUGFUNC("ixgbe_raise_i2c_clk"); - - if (clk_oe_bit) { - *i2cctl |= clk_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); - } - - for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); - - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); - IXGBE_WRITE_FLUSH(hw); - /* SCL rise time (1000ns) */ - usec_delay(IXGBE_I2C_T_RISE); - - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) - break; - } -} - -/** - * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * - * Lowers the I2C clock line '1'->'0' - * Asserts the I2C clock output enable on X550 hardware. - **/ -STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) -{ - DEBUGFUNC("ixgbe_lower_i2c_clk"); - - *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw)); - *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); - - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); - IXGBE_WRITE_FLUSH(hw); - - /* SCL fall time (300ns) */ - usec_delay(IXGBE_I2C_T_FALL); -} - -/** - * ixgbe_set_i2c_data - Sets the I2C data bit - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * @data: I2C data value (0 or 1) to set - * - * Sets the I2C data bit - * Asserts the I2C data output enable on X550 hardware. - **/ -STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) -{ - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_set_i2c_data"); - - if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); - else - *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw)); - *i2cctl &= ~data_oe_bit; - - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); - IXGBE_WRITE_FLUSH(hw); - - /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ - usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); - - if (!data) /* Can't verify data in this case */ - return IXGBE_SUCCESS; - if (data_oe_bit) { - *i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); - IXGBE_WRITE_FLUSH(hw); - } - - /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - if (data != ixgbe_get_i2c_data(hw, i2cctl)) { - status = IXGBE_ERR_I2C; - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "Error - I2C data was not set to %X.\n", - data); - } - - return status; -} - -/** - * ixgbe_get_i2c_data - Reads the I2C SDA data bit - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * - * Returns the I2C data bit value - * Negates the I2C data output enable on X550 hardware. - **/ -STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) -{ - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); - bool data; - - DEBUGFUNC("ixgbe_get_i2c_data"); - - if (data_oe_bit) { - *i2cctl |= data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); - IXGBE_WRITE_FLUSH(hw); - usec_delay(IXGBE_I2C_T_FALL); - } - - if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) - data = 1; - else - data = 0; - - return data; -} - -/** - * ixgbe_i2c_bus_clear - Clears the I2C bus - * @hw: pointer to hardware structure - * - * Clears the I2C bus by sending nine clock pulses. - * Used when data line is stuck low. - **/ -void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) -{ - u32 i2cctl; - u32 i; - - DEBUGFUNC("ixgbe_i2c_bus_clear"); - - ixgbe_i2c_start(hw); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - - ixgbe_set_i2c_data(hw, &i2cctl, 1); - - for (i = 0; i < 9; i++) { - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Min high period of clock is 4us */ - usec_delay(IXGBE_I2C_T_HIGH); - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Min low period of clock is 4.7us*/ - usec_delay(IXGBE_I2C_T_LOW); - } - - ixgbe_i2c_start(hw); - - /* Put the i2c bus back to default state */ - ixgbe_i2c_stop(hw); -} - -/** - * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. - * @hw: pointer to hardware structure - * - * Checks if the LASI temp alarm status was triggered due to overtemp - **/ -s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u16 phy_data = 0; - - DEBUGFUNC("ixgbe_tn_check_overtemp"); - - if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) - goto out; - - /* Check that the LASI temp alarm status was triggered */ - hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); - - if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) - goto out; - - status = IXGBE_ERR_OVERTEMP; - ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature"); -out: - return status; -} - -/** - * ixgbe_set_copper_phy_power - Control power for copper phy - * @hw: pointer to hardware structure - * @on: true for on, false for off - */ -s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) -{ - u32 status; - u16 reg; - - if (!on && ixgbe_mng_present(hw)) - return 0; - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - if (status) - return status; - - if (on) { - reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; - } else { - if (ixgbe_check_reset_blocked(hw)) - return 0; - reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; - } - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - reg); - return status; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h deleted file mode 100644 index 445394ff9a32..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_phy.h +++ /dev/null @@ -1,209 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_PHY_H_ -#define _IXGBE_PHY_H_ - -#include "ixgbe_type.h" -#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 -#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 -#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF - -/* EEPROM byte offsets */ -#define IXGBE_SFF_IDENTIFIER 0x0 -#define IXGBE_SFF_IDENTIFIER_SFP 0x3 -#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 -#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 -#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 -#define IXGBE_SFF_1GBE_COMP_CODES 0x6 -#define IXGBE_SFF_10GBE_COMP_CODES 0x3 -#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 -#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C -#define IXGBE_SFF_SFF_8472_SWAP 0x5C -#define IXGBE_SFF_SFF_8472_COMP 0x5E -#define IXGBE_SFF_SFF_8472_OSCB 0x6E -#define IXGBE_SFF_SFF_8472_ESCB 0x76 -#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD -#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 -#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 -#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 -#define IXGBE_SFF_QSFP_CONNECTOR 0x82 -#define IXGBE_SFF_QSFP_10GBE_COMP 0x83 -#define IXGBE_SFF_QSFP_1GBE_COMP 0x86 -#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92 -#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 - -/* Bitmasks */ -#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 -#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 -#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 -#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 -#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 -#define IXGBE_SFF_1GBASET_CAPABLE 0x8 -#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 -#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 -#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 -#define IXGBE_SFF_ADDRESSING_MODE 0x4 -#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 -#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 -#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 -#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 -#define IXGBE_I2C_EEPROM_READ_MASK 0x100 -#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 -#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 -#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 -#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 -#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 - -#define IXGBE_CS4227 0xBE /* CS4227 address */ -#define IXGBE_CS4227_GLOBAL_ID_LSB 0 -#define IXGBE_CS4227_GLOBAL_ID_MSB 1 -#define IXGBE_CS4227_SCRATCH 2 -#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 -#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F -#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ -#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ -#define IXGBE_CS4227_RESET_PENDING 0x1357 -#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 -#define IXGBE_CS4227_RETRIES 15 -#define IXGBE_CS4227_EFUSE_STATUS 0x0181 -#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ -#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ -#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ -#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ -#define IXGBE_CS4227_EEPROM_STATUS 0x5001 -#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 -#define IXGBE_CS4227_SPEED_1G 0x8000 -#define IXGBE_CS4227_SPEED_10G 0 -#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 -#define IXGBE_CS4227_EDC_MODE_SR 0x0004 -#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 -#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ -#define IXGBE_CS4227_RESET_DELAY 450 /* milliseconds */ -#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ -#define IXGBE_PE 0xE0 /* Port expander address */ -#define IXGBE_PE_OUTPUT 1 /* Output register offset */ -#define IXGBE_PE_CONFIG 3 /* Config register offset */ -#define IXGBE_PE_BIT1 (1 << 1) - -/* Flow control defines */ -#define IXGBE_TAF_SYM_PAUSE 0x400 -#define IXGBE_TAF_ASM_PAUSE 0x800 - -/* Bit-shift macros */ -#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 -#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 -#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 - -/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ -#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 -#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 -#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 -#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 - -/* I2C SDA and SCL timing parameters for standard mode */ -#define IXGBE_I2C_T_HD_STA 4 -#define IXGBE_I2C_T_LOW 5 -#define IXGBE_I2C_T_HIGH 4 -#define IXGBE_I2C_T_SU_STA 5 -#define IXGBE_I2C_T_HD_DATA 5 -#define IXGBE_I2C_T_SU_DATA 1 -#define IXGBE_I2C_T_RISE 1 -#define IXGBE_I2C_T_FALL 1 -#define IXGBE_I2C_T_SU_STO 4 -#define IXGBE_I2C_T_BUF 5 - -#ifndef IXGBE_SFP_DETECT_RETRIES -#define IXGBE_SFP_DETECT_RETRIES 10 - -#endif /* IXGBE_SFP_DETECT_RETRIES */ -#define IXGBE_TN_LASI_STATUS_REG 0x9005 -#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 - -/* SFP+ SFF-8472 Compliance */ -#define IXGBE_SFF_SFF_8472_UNSUP 0x00 - -s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); -bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); -enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); -s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); -s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); -s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); -s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data); -s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 phy_data); -s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); -s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); -s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); -s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg); -s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw); - -/* PHY specific */ -s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up); -s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version); -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version); - -s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); -s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); -s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); -s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); -u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw); -s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); -s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, - u16 *list_offset, - u16 *data_offset); -s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); -s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); -s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data); -s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 eeprom_data); -void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); -s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, - u16 *val, bool lock); -s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, - u16 val, bool lock); -#endif /* _IXGBE_PHY_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_procfs.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_procfs.c deleted file mode 100644 index 54f0940a0ac8..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_procfs.c +++ /dev/null @@ -1,938 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include "ixgbe_common.h" -#include "ixgbe_type.h" - -#ifdef IXGBE_PROCFS -#ifndef IXGBE_SYSFS - -#include -#include -#include -#include -#include - -static struct proc_dir_entry *ixgbe_top_dir = NULL; - -static struct net_device_stats *procfs_get_stats(struct net_device *netdev) -{ -#ifndef HAVE_NETDEV_STATS_IN_NETDEV - struct ixgbe_adapter *adapter; -#endif - if (netdev == NULL) - return NULL; - -#ifdef HAVE_NETDEV_STATS_IN_NETDEV - /* only return the current stats */ - return &netdev->stats; -#else - adapter = netdev_priv(netdev); - - /* only return the current stats */ - return &adapter->net_stats; -#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ -} - -bool ixgbe_thermal_present(struct ixgbe_adapter *adapter) -{ - s32 status; - if (adapter == NULL) - return false; - status = ixgbe_init_thermal_sensor_thresh_generic(&(adapter->hw)); - if (status != IXGBE_SUCCESS) - return false; - - return true; -} - -static int ixgbe_fwbanner(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - return snprintf(page, count, "%s\n", adapter->eeprom_id); -} - -static int ixgbe_porttype(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - return snprintf(page, count, "%d\n", - test_bit(__IXGBE_DOWN, &adapter->state)); -} - -static int ixgbe_portspeed(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - int speed = 0; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_100_FULL: - speed = 1; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - speed = 10; - break; - case IXGBE_LINK_SPEED_10GB_FULL: - speed = 100; - break; - } - return snprintf(page, count, "%d\n", speed); -} - -static int ixgbe_wqlflag(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - return snprintf(page, count, "%d\n", adapter->wol); -} - -static int ixgbe_xflowctl(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct ixgbe_hw *hw; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "%d\n", hw->fc.current_mode); -} - -static int ixgbe_rxdrops(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device_stats *net_stats; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - net_stats = procfs_get_stats(adapter->netdev); - if (net_stats == NULL) - return snprintf(page, count, "error: no net stats\n"); - - return snprintf(page, count, "%lu\n", - net_stats->rx_dropped); -} - -static int ixgbe_rxerrors(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device_stats *net_stats; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - net_stats = procfs_get_stats(adapter->netdev); - if (net_stats == NULL) - return snprintf(page, count, "error: no net stats\n"); - - return snprintf(page, count, "%lu\n", net_stats->rx_errors); -} - -static int ixgbe_rxupacks(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPR)); -} - -static int ixgbe_rxmpacks(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPRC)); -} - -static int ixgbe_rxbpacks(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPRC)); -} - -static int ixgbe_txupacks(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPT)); -} - -static int ixgbe_txmpacks(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPTC)); -} - -static int ixgbe_txbpacks(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPTC)); -} - -static int ixgbe_txerrors(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device_stats *net_stats; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - net_stats = procfs_get_stats(adapter->netdev); - if (net_stats == NULL) - return snprintf(page, count, "error: no net stats\n"); - - return snprintf(page, count, "%lu\n", - net_stats->tx_errors); -} - -static int ixgbe_txdrops(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device_stats *net_stats; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - net_stats = procfs_get_stats(adapter->netdev); - if (net_stats == NULL) - return snprintf(page, count, "error: no net stats\n"); - - return snprintf(page, count, "%lu\n", - net_stats->tx_dropped); -} - -static int ixgbe_rxframes(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device_stats *net_stats; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - net_stats = procfs_get_stats(adapter->netdev); - if (net_stats == NULL) - return snprintf(page, count, "error: no net stats\n"); - - return snprintf(page, count, "%lu\n", - net_stats->rx_packets); -} - -static int ixgbe_rxbytes(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device_stats *net_stats; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - net_stats = procfs_get_stats(adapter->netdev); - if (net_stats == NULL) - return snprintf(page, count, "error: no net stats\n"); - - return snprintf(page, count, "%lu\n", - net_stats->rx_bytes); -} - -static int ixgbe_txframes(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device_stats *net_stats; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - net_stats = procfs_get_stats(adapter->netdev); - if (net_stats == NULL) - return snprintf(page, count, "error: no net stats\n"); - - return snprintf(page, count, "%lu\n", - net_stats->tx_packets); -} - -static int ixgbe_txbytes(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device_stats *net_stats; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - net_stats = procfs_get_stats(adapter->netdev); - if (net_stats == NULL) - return snprintf(page, count, "error: no net stats\n"); - - return snprintf(page, count, "%lu\n", - net_stats->tx_bytes); -} - -static int ixgbe_linkstat(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - int bitmask = 0; - u32 link_speed; - bool link_up = false; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - bitmask |= 1; - - if (hw->mac.ops.check_link) - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - else - /* always assume link is up, if no check link function */ - link_up = true; - if (link_up) - bitmask |= 2; - - if (adapter->old_lsc != adapter->lsc_int) { - bitmask |= 4; - adapter->old_lsc = adapter->lsc_int; - } - - return snprintf(page, count, "0x%X\n", bitmask); -} - -static int ixgbe_funcid(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct ixgbe_hw *hw; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "0x%X\n", hw->bus.func); -} - -static int ixgbe_funcvers(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void __always_unused *data) -{ - return snprintf(page, count, "%s\n", ixgbe_driver_version); -} - -static int ixgbe_macburn(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", - (unsigned int)hw->mac.perm_addr[0], - (unsigned int)hw->mac.perm_addr[1], - (unsigned int)hw->mac.perm_addr[2], - (unsigned int)hw->mac.perm_addr[3], - (unsigned int)hw->mac.perm_addr[4], - (unsigned int)hw->mac.perm_addr[5]); -} - -static int ixgbe_macadmn(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", - (unsigned int)hw->mac.addr[0], - (unsigned int)hw->mac.addr[1], - (unsigned int)hw->mac.addr[2], - (unsigned int)hw->mac.addr[3], - (unsigned int)hw->mac.addr[4], - (unsigned int)hw->mac.addr[5]); -} - -static int ixgbe_maclla1(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct ixgbe_hw *hw; - int rc; - u16 eeprom_buff[6]; - u16 first_word = 0x37; - const u16 word_count = ARRAY_SIZE(eeprom_buff); - - if (!adapter) - return snprintf(page, count, "error: no adapter\n"); - - hw = &adapter->hw; - - rc = hw->eeprom.ops.read_buffer(hw, first_word, 1, &first_word); - if (rc != 0) - return snprintf(page, count, "error: reading pointer to the EEPROM\n"); - - if (first_word != 0x0000 && first_word != 0xFFFF) { - rc = hw->eeprom.ops.read_buffer(hw, first_word, word_count, - eeprom_buff); - if (rc != 0) - return snprintf(page, count, "error: reading buffer\n"); - } else { - memset(eeprom_buff, 0, sizeof(eeprom_buff)); - } - - switch (hw->bus.func) { - case 0: - return snprintf(page, count, "0x%04X%04X%04X\n", - eeprom_buff[0], - eeprom_buff[1], - eeprom_buff[2]); - case 1: - return snprintf(page, count, "0x%04X%04X%04X\n", - eeprom_buff[3], - eeprom_buff[4], - eeprom_buff[5]); - } - return snprintf(page, count, "unexpected port %d\n", hw->bus.func); -} - -static int ixgbe_mtusize(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device *netdev; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - netdev = adapter->netdev; - if (netdev == NULL) - return snprintf(page, count, "error: no net device\n"); - - return snprintf(page, count, "%d\n", netdev->mtu); -} - -static int ixgbe_featflag(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - int bitmask = 0; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device *netdev; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - netdev = adapter->netdev; - if (netdev == NULL) - return snprintf(page, count, "error: no net device\n"); - if (adapter->netdev->features & NETIF_F_RXCSUM) - bitmask |= 1; - return snprintf(page, count, "%d\n", bitmask); -} - -static int ixgbe_lsominct(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void __always_unused *data) -{ - return snprintf(page, count, "%d\n", 1); -} - -static int ixgbe_prommode(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device *netdev; - - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - netdev = adapter->netdev; - if (netdev == NULL) - return snprintf(page, count, "error: no net device\n"); - - return snprintf(page, count, "%d\n", - netdev->flags & IFF_PROMISC); -} - -static int ixgbe_txdscqsz(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); -} - -static int ixgbe_rxdscqsz(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); -} - -static int ixgbe_rxqavg(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - int index; - int diff = 0; - u16 ntc; - u16 ntu; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - for (index = 0; index < adapter->num_rx_queues; index++) { - ntc = adapter->rx_ring[index]->next_to_clean; - ntu = adapter->rx_ring[index]->next_to_use; - - if (ntc >= ntu) - diff += (ntc - ntu); - else - diff += (adapter->rx_ring[index]->count - ntu + ntc); - } - if (adapter->num_rx_queues <= 0) - return snprintf(page, count, - "can't calculate, number of queues %d\n", - adapter->num_rx_queues); - return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues); -} - -static int ixgbe_txqavg(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - int index; - int diff = 0; - u16 ntc; - u16 ntu; - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - for (index = 0; index < adapter->num_tx_queues; index++) { - ntc = adapter->tx_ring[index]->next_to_clean; - ntu = adapter->tx_ring[index]->next_to_use; - - if (ntc >= ntu) - diff += (ntc - ntu); - else - diff += (adapter->tx_ring[index]->count - ntu + ntc); - } - if (adapter->num_tx_queues <= 0) - return snprintf(page, count, - "can't calculate, number of queues %d\n", - adapter->num_tx_queues); - return snprintf(page, count, "%d\n", - diff/adapter->num_tx_queues); -} - -static int ixgbe_iovotype(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void __always_unused *data) -{ - return snprintf(page, count, "2\n"); -} - -static int ixgbe_funcnbr(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - return snprintf(page, count, "%d\n", adapter->num_vfs); -} - -static int ixgbe_pciebnbr(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - if (adapter == NULL) - return snprintf(page, count, "error: no adapter\n"); - - return snprintf(page, count, "%d\n", adapter->pdev->bus->number); -} - -static int ixgbe_therm_location(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_therm_proc_data *therm_data = - (struct ixgbe_therm_proc_data *)data; - - if (therm_data == NULL) - return snprintf(page, count, "error: no therm_data\n"); - - return snprintf(page, count, "%d\n", therm_data->sensor_data->location); -} - - -static int ixgbe_therm_maxopthresh(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_therm_proc_data *therm_data = - (struct ixgbe_therm_proc_data *)data; - - if (therm_data == NULL) - return snprintf(page, count, "error: no therm_data\n"); - - return snprintf(page, count, "%d\n", - therm_data->sensor_data->max_op_thresh); -} - - -static int ixgbe_therm_cautionthresh(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - struct ixgbe_therm_proc_data *therm_data = - (struct ixgbe_therm_proc_data *)data; - - if (therm_data == NULL) - return snprintf(page, count, "error: no therm_data\n"); - - return snprintf(page, count, "%d\n", - therm_data->sensor_data->caution_thresh); -} - -static int ixgbe_therm_temp(char *page, char __always_unused **start, - off_t __always_unused off, int count, - int __always_unused *eof, void *data) -{ - s32 status; - struct ixgbe_therm_proc_data *therm_data = - (struct ixgbe_therm_proc_data *)data; - - if (therm_data == NULL) - return snprintf(page, count, "error: no therm_data\n"); - - status = ixgbe_get_thermal_sensor_data_generic(therm_data->hw); - if (status != IXGBE_SUCCESS) - snprintf(page, count, "error: status %d returned\n", status); - - return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); -} - - -struct ixgbe_proc_type { - char name[32]; - int (*read)(char*, char**, off_t, int, int*, void*); -}; - -struct ixgbe_proc_type ixgbe_proc_entries[] = { - {"fwbanner", &ixgbe_fwbanner}, - {"porttype", &ixgbe_porttype}, - {"portspeed", &ixgbe_portspeed}, - {"wqlflag", &ixgbe_wqlflag}, - {"xflowctl", &ixgbe_xflowctl}, - {"rxdrops", &ixgbe_rxdrops}, - {"rxerrors", &ixgbe_rxerrors}, - {"rxupacks", &ixgbe_rxupacks}, - {"rxmpacks", &ixgbe_rxmpacks}, - {"rxbpacks", &ixgbe_rxbpacks}, - {"txdrops", &ixgbe_txdrops}, - {"txerrors", &ixgbe_txerrors}, - {"txupacks", &ixgbe_txupacks}, - {"txmpacks", &ixgbe_txmpacks}, - {"txbpacks", &ixgbe_txbpacks}, - {"rxframes", &ixgbe_rxframes}, - {"rxbytes", &ixgbe_rxbytes}, - {"txframes", &ixgbe_txframes}, - {"txbytes", &ixgbe_txbytes}, - {"linkstat", &ixgbe_linkstat}, - {"funcid", &ixgbe_funcid}, - {"funcvers", &ixgbe_funcvers}, - {"macburn", &ixgbe_macburn}, - {"macadmn", &ixgbe_macadmn}, - {"maclla1", &ixgbe_maclla1}, - {"mtusize", &ixgbe_mtusize}, - {"featflag", &ixgbe_featflag}, - {"lsominct", &ixgbe_lsominct}, - {"prommode", &ixgbe_prommode}, - {"txdscqsz", &ixgbe_txdscqsz}, - {"rxdscqsz", &ixgbe_rxdscqsz}, - {"txqavg", &ixgbe_txqavg}, - {"rxqavg", &ixgbe_rxqavg}, - {"iovotype", &ixgbe_iovotype}, - {"funcnbr", &ixgbe_funcnbr}, - {"pciebnbr", &ixgbe_pciebnbr}, - {"", NULL} -}; - -struct ixgbe_proc_type ixgbe_internal_entries[] = { - {"location", &ixgbe_therm_location}, - {"temp", &ixgbe_therm_temp}, - {"cautionthresh", &ixgbe_therm_cautionthresh}, - {"maxopthresh", &ixgbe_therm_maxopthresh}, - {"", NULL} -}; - -void ixgbe_del_proc_entries(struct ixgbe_adapter *adapter) -{ - int index; - int i; - char buf[16]; /* much larger than the sensor number will ever be */ - - if (ixgbe_top_dir == NULL) - return; - - for (i = 0; i < IXGBE_MAX_SENSORS; i++) { - if (adapter->therm_dir[i] == NULL) - continue; - - for (index = 0; ; index++) { - if (ixgbe_internal_entries[index].read == NULL) - break; - - remove_proc_entry(ixgbe_internal_entries[index].name, - adapter->therm_dir[i]); - } - snprintf(buf, sizeof(buf), "sensor_%d", i); - remove_proc_entry(buf, adapter->info_dir); - } - - if (adapter->info_dir != NULL) { - for (index = 0; ; index++) { - if (ixgbe_proc_entries[index].read == NULL) - break; - remove_proc_entry(ixgbe_proc_entries[index].name, - adapter->info_dir); - } - remove_proc_entry("info", adapter->eth_dir); - } - - if (adapter->eth_dir != NULL) - remove_proc_entry(pci_name(adapter->pdev), ixgbe_top_dir); -} - -/* called from ixgbe_main.c */ -void ixgbe_procfs_exit(struct ixgbe_adapter *adapter) -{ - ixgbe_del_proc_entries(adapter); -} - -int ixgbe_procfs_topdir_init() -{ - ixgbe_top_dir = proc_mkdir("driver/ixgbe", NULL); - if (ixgbe_top_dir == NULL) - return -ENOMEM; - - return 0; -} - -void ixgbe_procfs_topdir_exit() -{ - remove_proc_entry("driver/ixgbe", NULL); -} - -/* called from ixgbe_main.c */ -int ixgbe_procfs_init(struct ixgbe_adapter *adapter) -{ - int rc = 0; - int index; - int i; - char buf[16]; /* much larger than the sensor number will ever be */ - - adapter->eth_dir = NULL; - adapter->info_dir = NULL; - for (i = 0; i < IXGBE_MAX_SENSORS; i++) - adapter->therm_dir[i] = NULL; - - if (ixgbe_top_dir == NULL) { - rc = -ENOMEM; - goto fail; - } - - adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), ixgbe_top_dir); - if (adapter->eth_dir == NULL) { - rc = -ENOMEM; - goto fail; - } - - adapter->info_dir = proc_mkdir("info", adapter->eth_dir); - if (adapter->info_dir == NULL) { - rc = -ENOMEM; - goto fail; - } - for (index = 0; ; index++) { - if (ixgbe_proc_entries[index].read == NULL) - break; - if (!(create_proc_read_entry(ixgbe_proc_entries[index].name, - 0444, - adapter->info_dir, - ixgbe_proc_entries[index].read, - adapter))) { - - rc = -ENOMEM; - goto fail; - } - } - if (ixgbe_thermal_present(adapter) == false) - goto exit; - - for (i = 0; i < IXGBE_MAX_SENSORS; i++) { - - if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == - 0) - continue; - - snprintf(buf, sizeof(buf), "sensor_%d", i); - adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); - if (adapter->therm_dir[i] == NULL) { - rc = -ENOMEM; - goto fail; - } - for (index = 0; ; index++) { - if (ixgbe_internal_entries[index].read == NULL) - break; - /* - * therm_data struct contains pointer the read func - * will be needing - */ - adapter->therm_data[i].hw = &adapter->hw; - adapter->therm_data[i].sensor_data = - &adapter->hw.mac.thermal_sensor_data.sensor[i]; - - if (!(create_proc_read_entry( - ixgbe_internal_entries[index].name, - 0444, - adapter->therm_dir[i], - ixgbe_internal_entries[index].read, - &adapter->therm_data[i]))) { - rc = -ENOMEM; - goto fail; - } - } - } - goto exit; - -fail: - ixgbe_del_proc_entries(adapter); -exit: - return rc; -} - -#endif /* !IXGBE_SYSFS */ -#endif /* IXGBE_PROCFS */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c deleted file mode 100644 index 0fe14217d403..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_ptp.c +++ /dev/null @@ -1,1437 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include - -/* - * The 82599 and the X540 do not have true 64bit nanosecond scale - * counter registers. Instead, SYSTIME is defined by a fixed point - * system which allows the user to define the scale counter increment - * value at every level change of the oscillator driving the SYSTIME - * value. For both devices the TIMINCA:IV field defines this - * increment. On the X540 device, 31 bits are provided. However on the - * 82599 only provides 24 bits. The time unit is determined by the - * clock frequency of the oscillator in combination with the TIMINCA - * register. When these devices link at 10Gb the oscillator has a - * period of 6.4ns. In order to convert the scale counter into - * nanoseconds the cyclecounter and timecounter structures are - * used. The SYSTIME registers need to be converted to ns values by use - * of only a right shift (division by power of 2). The following math - * determines the largest incvalue that will fit into the available - * bits in the TIMINCA register. - * - * PeriodWidth: Number of bits to store the clock period - * MaxWidth: The maximum width value of the TIMINCA register - * Period: The clock period for the oscillator - * round(): discard the fractional portion of the calculation - * - * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] - * - * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns - * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns - * - * The period also changes based on the link speed: - * At 10Gb link or no link, the period remains the same. - * At 1Gb link, the period is multiplied by 10. (64ns) - * At 100Mb link, the period is multiplied by 100. (640ns) - * - * The calculated value allows us to right shift the SYSTIME register - * value in order to quickly convert it into a nanosecond clock, - * while allowing for the maximum possible adjustment value. - * - * These diagrams are only for the 10Gb link period - * - * SYSTIMEH SYSTIMEL - * +--------------+ +--------------+ - * X540 | 32 | | 1 | 3 | 28 | - * *--------------+ +--------------+ - * \________ 36 bits ______/ fract - * - * +--------------+ +--------------+ - * 82599 | 32 | | 8 | 3 | 21 | - * *--------------+ +--------------+ - * \________ 43 bits ______/ fract - * - * The 36 bit X540 SYSTIME overflows every - * 2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds - * - * The 43 bit 82599 SYSTIME overflows every - * 2^43 * 10^-9 / 3600 = 2.4 hours - */ -#define IXGBE_INCVAL_10GB 0x66666666 -#define IXGBE_INCVAL_1GB 0x40000000 -#define IXGBE_INCVAL_100 0x50000000 - -#define IXGBE_INCVAL_SHIFT_10GB 28 -#define IXGBE_INCVAL_SHIFT_1GB 24 -#define IXGBE_INCVAL_SHIFT_100 21 - -#define IXGBE_INCVAL_SHIFT_82599 7 -#define IXGBE_INCPER_SHIFT_82599 24 - -#define IXGBE_OVERFLOW_PERIOD (HZ * 30) -#define IXGBE_PTP_TX_TIMEOUT (HZ) - -/* half of a one second clock period, for use with PPS signal. We have to use - * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in - * order to force at least 64bits of precision for shifting - */ -#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL - -/* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL - * which contain measurements of seconds and nanoseconds respectively. This - * matches the standard linux representation of time in the kernel. In addition, - * the X550 also has a SYSTIMER register which represents residue, or - * subnanosecond overflow adjustments. To control clock adjustment, the TIMINCA - * register is used, but it is unlike the X540 and 82599 devices. TIMINCA - * represents units of 2^-32 nanoseconds, and uses 31 bits for this, with the - * high bit representing whether the adjustent is positive or negative. Every - * clock cycle, the X550 will add 12.5 ns + TIMINCA which can result in a range - * of 12 to 13 nanoseconds adjustment. Unlike the 82599 and X540 devices, the - * X550's clock for purposes of SYSTIME generation is constant and not dependant - * on the link speed. - * - * SYSTIMEH SYSTIMEL SYSTIMER - * +--------------+ +--------------+ +-------------+ - * X550 | 32 | | 32 | | 32 | - * *--------------+ +--------------+ +-------------+ - * \____seconds___/ \_nanoseconds_/ \__2^-32 ns__/ - * - * This results in a full 96 bits to represent the clock, with 32 bits for - * seconds, 32 bits for nanoseconds (largest value is 0d999999999 or just under - * 1 second) and an additional 32 bits to measure sub nanosecond adjustments for - * underflow of adjustments. - * - * The 32 bits of seconds for the X550 overflows every - * 2^32 / ( 365.25 * 24 * 60 * 60 ) = ~136 years. - * - * In order to adjust the clock frequency for the X550, the TIMINCA register is - * provided. This register represents a + or minus nearly 0.5 ns adjustment to - * the base frequency. It is measured in 2^-32 ns units, with the high bit being - * the sign bit. This register enables software to calculate frequency - * adjustments and apply them directly to the clock rate. - * - * The math for converting ppb into TIMINCA values is fairly straightforward. - * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL - * - * This assumes that ppb is never high enough to create a value bigger than - * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this - * value is also simple. - * Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL - * - * For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is - * 12.5 nanoseconds. This means that the Max ppb is 39999999 - * Note: We subtract one in order to ensure no overflow, because the TIMINCA - * register can only hold slightly under 0.5 nanoseconds. - * - * Because TIMINCA is measured in 2^-32 ns units, we have to convert 12.5 ns - * into 2^-32 units, which is - * - * 12.5 * 2^32 = C80000000 - * - * Some revisions of hardware have a faster base frequency than the registers - * were defined for. To fix this, we use a timecounter structure with the - * proper mult and shift to convert the cycles into nanoseconds of time. - */ -#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL -#define INCVALUE_MASK 0x7FFFFFFF -#define ISGN 0x80000000 -#define MAX_TIMADJ 0x7FFFFFFF - -/** - * ixgbe_ptp_setup_sdp_X540 - * @adapter: the adapter private structure - * - * this function enables or disables the clock out feature on SDP0 for - * the X540 device. It will create a 1second periodic output that can - * be used as the PPS (via an interrupt). - * - * It calculates when the systime will be on an exact second, and then - * aligns the start of the PPS signal to that value. The shift is - * necessary because it can change based on the link speed. - */ -static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int shift = adapter->hw_cc.shift; - u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; - u64 ns = 0, clock_edge = 0; - - /* disable the pin first */ - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); - IXGBE_WRITE_FLUSH(hw); - - if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) - return; - - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - - /* - * enable the SDP0 pin as output, and connected to the - * native function for Timesync (ClockOut) - */ - esdp |= IXGBE_ESDP_SDP0_DIR | - IXGBE_ESDP_SDP0_NATIVE; - - /* - * enable the Clock Out feature on SDP0, and allow - * interrupts to occur when the pin changes - */ - tsauxc = IXGBE_TSAUXC_EN_CLK | - IXGBE_TSAUXC_SYNCLK | - IXGBE_TSAUXC_SDP0_INT; - - /* set to half clock period */ - clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); - clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); - - /* - * Account for the cyclecounter wrap-around value by - * using the converted ns value of the current time to - * check for when the next aligned second would occur. - */ - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; - ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge); - - div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); - clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); - - /* specify the initial clock start time */ - trgttiml = (u32)clock_edge; - trgttimh = (u32)(clock_edge >> 32); - - IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); - - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); - - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_ptp_read_X550 - read cycle counter value - * @hw_cc: cyclecounter structure - * - * This function reads SYSTIME registers. It is called by the cyclecounter - * structure to convert from internal representation into nanoseconds. We need - * this for X550 since some skews do not have expected clock frequency and - * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of - * "cycles", rather than seconds and nanoseconds. - */ -static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) -{ - struct ixgbe_adapter *adapter = - container_of(hw_cc, struct ixgbe_adapter, hw_cc); - struct ixgbe_hw *hw = &adapter->hw; - struct timespec64 ts; - - /* storage is 32 bits of 'billions of cycles' and 32 bits of 'cycles'. - * Some revisions of hardware run at a higher frequency and so the - * cycles are not guaranteed to be nanoseconds. The timespec64 created - * here is used for its math/conversions but does not necessarily - * represent nominal time. - * - * It should be noted that this cyclecounter will overflow at a - * non-bitmask field since we have to convert our billions of cycles - * into an actual cycles count. This results in some possible weird - * situations at high cycle counter stamps. However given that 32 bits - * of "seconds" is ~138 years this isn't a problem. Even at the - * increased frequency of some revisions, this is still ~103 years. - * Since the SYSTIME values start at 0 and we never write them, it is - * highly unlikely for the cyclecounter to overflow in practice. - */ - IXGBE_READ_REG(hw, IXGBE_SYSTIMR); - ts.tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); - ts.tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); - - return (u64)timespec64_to_ns(&ts); -} - -/** - * ixgbe_ptp_read_82599 - read raw cycle counter (to be used by time counter) - * @hw_cc: the cyclecounter structure - * - * this function reads the cyclecounter registers and is called by the - * cyclecounter structure used to construct a ns counter from the - * arbitrary fixed point registers - */ -static u64 ixgbe_ptp_read_82599(const struct cyclecounter *hw_cc) -{ - struct ixgbe_adapter *adapter = - container_of(hw_cc, struct ixgbe_adapter, hw_cc); - struct ixgbe_hw *hw = &adapter->hw; - u64 stamp = 0; - - stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; - - return stamp; -} - -/** - * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp - * @adapter: private adapter structure - * @hwtstamp: stack timestamp structure - * @systim: unsigned 64bit system time value - * - * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value - * which can be used by the stack's ptp functions. - * - * The lock is used to protect consistency of the cyclecounter and the SYSTIME - * registers. However, it does not need to protect against the Rx or Tx - * timestamp registers, as there can't be a new timestamp until the old one is - * unlatched by reading. - * - * In addition to the timestamp in hardware, some controllers need a software - * overflow cyclecounter, and this function takes this into account as well. - **/ -static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, - struct skb_shared_hwtstamps *hwtstamp, - u64 timestamp) -{ - unsigned long flags; - struct timespec64 systime; - u64 ns; - - memset(hwtstamp, 0, sizeof(*hwtstamp)); - - switch (adapter->hw.mac.type) { - /* X550 and later hardware supposedly represent time using a seconds - * and nanoseconds counter, instead of raw 64bits nanoseconds. We need - * to convert the timestamp into cycles before it can be fed to the - * cyclecounter. We need an actual cyclecounter because some revisions - * of hardware run at a higher frequency and thus the counter does - * not represent seconds/nanoseconds. Instead it can be thought of as - * cycles and billions of cycles. - */ - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* Upper 32 bits represent billions of cycles, lower 32 bits - * represent cycles. However, we use timespec64_to_ns for the - * correct math even though the units haven't been corrected - * yet. - */ - systime.tv_sec = timestamp >> 32; - systime.tv_nsec = timestamp & 0xFFFFFFFF; - - timestamp = timespec64_to_ns(&systime); - break; - default: - break; - } - - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - hwtstamp->hwtstamp = ns_to_ktime(ns); -} - -/** - * ixgbe_ptp_adjfreq_82599 - * @ptp: the ptp clock structure - * @ppb: parts per billion adjustment from base - * - * adjust the frequency of the ptp cycle counter by the - * indicated ppb from the base frequency. - */ -static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) -{ - struct ixgbe_adapter *adapter = - container_of(ptp, struct ixgbe_adapter, ptp_caps); - struct ixgbe_hw *hw = &adapter->hw; - u64 freq, incval; - u32 diff; - int neg_adj = 0; - - if (ppb < 0) { - neg_adj = 1; - ppb = -ppb; - } - - smp_mb(); - incval = ACCESS_ONCE(adapter->base_incval); - - freq = incval; - freq *= ppb; - diff = div_u64(freq, 1000000000ULL); - - incval = neg_adj ? (incval - diff) : (incval + diff); - - switch (hw->mac.type) { - case ixgbe_mac_X540: - if (incval > 0xFFFFFFFFULL) - e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval); - break; - case ixgbe_mac_82599EB: - if (incval > 0x00FFFFFFULL) - e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | - ((u32)incval & 0x00FFFFFFUL)); - break; - default: - break; - } - - return 0; -} - -/** - * ixgbe_ptp_adjfreq_X550 - * @ptp: the ptp clock structure - * @ppb: parts per billion adjustment from base - * - * adjust the frequency of the SYSTIME registers by the indicated ppb from base - * frequency - */ -static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) -{ - struct ixgbe_adapter *adapter = - container_of(ptp, struct ixgbe_adapter, ptp_caps); - struct ixgbe_hw *hw = &adapter->hw; - int neg_adj = 0; - u64 rate = IXGBE_X550_BASE_PERIOD; - u32 inca; - - if (ppb < 0) { - neg_adj = 1; - ppb = -ppb; - } - rate *= ppb; - rate = div_u64(rate, 1000000000ULL); - - /* warn if rate is too large */ - if (rate >= INCVALUE_MASK) - e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); - - inca = rate & INCVALUE_MASK; - if (neg_adj) - inca |= ISGN; - - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, inca); - - return 0; -} - -/** - * ixgbe_ptp_adjtime_timecounter - * @ptp: the ptp clock structure - * @delta: offset to adjust the cycle counter by - * - * adjust the timer by resetting the timecounter structure. - */ -static int ixgbe_ptp_adjtime_timecounter(struct ptp_clock_info *ptp, - s64 delta) -{ - struct ixgbe_adapter *adapter = - container_of(ptp, struct ixgbe_adapter, ptp_caps); - unsigned long flags; - - spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_adjtime(&adapter->hw_tc, delta); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - if (adapter->ptp_setup_sdp) - adapter->ptp_setup_sdp(adapter); - - return 0; -} - -/** - * ixgbe_ptp_gettime64_timecounter - * @ptp: the ptp clock structure - * @ts: timespec64 structure to hold the current time value - * - * read the timecounter and return the correct value on ns, - * after converting it into a struct timespec64. - */ -static int ixgbe_ptp_gettime64_timecounter(struct ptp_clock_info *ptp, struct timespec64 *ts) -{ - struct ixgbe_adapter *adapter = - container_of(ptp, struct ixgbe_adapter, ptp_caps); - unsigned long flags; - u64 ns; - - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_read(&adapter->hw_tc); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - *ts = ns_to_timespec64(ns); - - return 0; -} - -/** - * ixgbe_ptp_settime64_timecounter - * @ptp: the ptp clock structure - * @ts: the timespec64 containing the new time for the cycle counter - * - * reset the timecounter to use a new base value instead of the kernel - * wall timer value. - */ -static int ixgbe_ptp_settime64_timecounter(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - struct ixgbe_adapter *adapter = - container_of(ptp, struct ixgbe_adapter, ptp_caps); - u64 ns; - unsigned long flags; - - ns = timespec64_to_ns(ts); - - /* reset the timecounter */ - spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - if (adapter->ptp_setup_sdp) - adapter->ptp_setup_sdp(adapter); - return 0; -} - -#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 -static int ixgbe_ptp_gettime_timecounter(struct ptp_clock_info *ptp, struct timespec *ts) -{ - struct timespec64 ts64; - int err; - - err = ixgbe_ptp_gettime64_timecounter(ptp, &ts64); - if (err) - return err; - - *ts = timespec64_to_timespec(ts64); - - return 0; -} - -static int ixgbe_ptp_settime_timecounter(struct ptp_clock_info *ptp, - const struct timespec *ts) -{ - struct timespec64 ts64; - - ts64 = timespec_to_timespec64(*ts); - return ixgbe_ptp_settime64_timecounter(ptp, &ts64); -} -#endif - -/** - * ixgbe_ptp_feature_enable - * @ptp: the ptp clock structure - * @rq: the requested feature to change - * @on: whether to enable or disable the feature - * - * enable (or disable) ancillary features of the phc subsystem. - * our driver only supports the PPS feature on the X540 - */ -static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) -{ - struct ixgbe_adapter *adapter = - container_of(ptp, struct ixgbe_adapter, ptp_caps); - - /** - * When PPS is enabled, unmask the interrupt for the ClockOut - * feature, so that the interrupt handler can send the PPS - * event when the clock SDP triggers. Clear mask when PPS is - * disabled - */ - if (rq->type == PTP_CLK_REQ_PPS && adapter->ptp_setup_sdp) { - if (on) - adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; - else - adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - - adapter->ptp_setup_sdp(adapter); - return 0; - } - - return -ENOTSUPP; -} - -/** - * ixgbe_ptp_check_pps_event - * @adapter: the private adapter structure - * @eicr: the interrupt cause register value - * - * This function is called by the interrupt routine when checking for - * interrupts. It will check and handle a pps event. - */ -void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ptp_clock_event event; - - event.type = PTP_CLOCK_PPS; - - /* this check is necessary in case the interrupt was enabled via some - * alternative means (ex. debug_fs). Better to check here than - * everywhere that calls this function. - */ - if (!adapter->ptp_clock) - return; - - switch (hw->mac.type) { - case ixgbe_mac_X540: - ptp_clock_event(adapter->ptp_clock, &event); - break; - default: - break; - } -} - -/** - * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow - * @adapter: private adapter struct - * - * this watchdog task periodically reads the timecounter - * in order to prevent missing when the system time registers wrap - * around. This needs to be run approximately twice a minute for the fastest - * overflowing hardware. We run it for all hardware since it shouldn't have a - * large impact. - */ -void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) -{ - bool timeout = time_is_before_jiffies(adapter->last_overflow_check + - IXGBE_OVERFLOW_PERIOD); - struct timespec64 ts; - - if (timeout) { - ixgbe_ptp_gettime64_timecounter(&adapter->ptp_caps, &ts); - adapter->last_overflow_check = jiffies; - } -} - -/** - * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched - * @adapter: private network adapter structure - * - * this watchdog task is scheduled to detect error case where hardware has - * dropped an Rx packet that was timestamped when the ring is full. The - * particular error is rare but leaves the device in a state unable to timestamp - * any future packets. - */ -void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_ring *rx_ring; - u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); - unsigned long rx_event; - int n; - - /* if we don't have a valid timestamp in the registers, just update the - * timeout counter and exit - */ - if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) { - adapter->last_rx_ptp_check = jiffies; - return; - } - - /* determine the most recent watchdog or rx_timestamp event */ - rx_event = adapter->last_rx_ptp_check; - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - if (time_after(rx_ring->last_rx_timestamp, rx_event)) - rx_event = rx_ring->last_rx_timestamp; - } - - /* only need to read the high RXSTMP register to clear the lock */ - if (time_is_before_jiffies(rx_event + 5*HZ)) { - IXGBE_READ_REG(hw, IXGBE_RXSTMPH); - adapter->last_rx_ptp_check = jiffies; - - adapter->rx_hwtstamp_cleared++; - e_warn(drv, "clearing RX Timestamp hang"); - } -} - -/** - * ixgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state - * @adapter: the private adapter structure - * - * This function should be called whenever the state related to a Tx timestamp - * needs to be cleared. This helps ensure that all related bits are reset for - * the next Tx timestamp event. - */ -static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - IXGBE_READ_REG(hw, IXGBE_TXSTMPH); - if (adapter->ptp_tx_skb) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - } - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); -} - -/** - * ixgbe_ptp_tx_hang - detect error case where Tx timestamp never finishes - * @adapter: private network adapter structure - */ -void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter) -{ - bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + - IXGBE_PTP_TX_TIMEOUT); - - if (!adapter->ptp_tx_skb) - return; - - if (!test_bit(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) - return; - - /* If we haven't received a timestamp within the timeout, it is - * reasonable to assume that it will never occur, so we can unlock the - * timestamp bit when this occurs. - */ - if (timeout) { - cancel_work_sync(&adapter->ptp_tx_work); - ixgbe_ptp_clear_tx_timestamp(adapter); - adapter->tx_hwtstamp_timeouts++; - e_warn(drv, "clearing Tx timestamp hang\n"); - } -} - -/** - * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp - * @adapter: the private adapter struct - * - * if the timestamp is valid, we convert it into the timecounter ns - * value, then store that result into the shhwtstamps structure which - * is passed up the network stack - */ -static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) -{ - struct sk_buff *skb = adapter->ptp_tx_skb; - struct ixgbe_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps shhwtstamps; - u64 regval = 0; - - regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); - regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; - ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); - - /* Handle cleanup of the ptp_tx_skb ourselves, and unlock the state - * bit prior to notifying the stack via skb_tstamp_tx(). This prevents - * well behaved applications from attempting to timestamp again prior - * to the lock bit being clear. - */ - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - - /* Notify the stack and then free the skb after we've unlocked */ - skb_tstamp_tx(skb, &shhwtstamps); - dev_kfree_skb_any(skb); -} - -/** - * ixgbe_ptp_tx_hwtstamp_work - * @work: pointer to the work struct - * - * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware - * timestamp has been taken for the current skb. It is necesary, because the - * descriptor's "done" bit does not correlate with the timestamp event. - */ -static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) -{ - struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, - ptp_tx_work); - struct ixgbe_hw *hw = &adapter->hw; - bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + - IXGBE_PTP_TX_TIMEOUT); - u32 tsynctxctl; - - /* we have to have a valid skb to poll for a timestamp */ - if (!adapter->ptp_tx_skb) { - ixgbe_ptp_clear_tx_timestamp(adapter); - return; - } - - /* stop polling once we have a valid timestamp */ - tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); - if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) { - ixgbe_ptp_tx_hwtstamp(adapter); - return; - } - - /* check timeout last in case timestamp event just occurred */ - if (timeout) { - ixgbe_ptp_clear_tx_timestamp(adapter); - adapter->tx_hwtstamp_timeouts++; - e_warn(drv, "clearing Tx Timestamp hang"); - } else { - /* reschedule to keep checking until we timeout */ - schedule_work(&adapter->ptp_tx_work); - } -} - -/** - * ixgbe_ptp_rx_pktstamp - utility function to get RX time stamp from buffer - * @q_vector: structure containing interrupt and ring information - * @skb: the packet - * - * This function will be called by the Rx routine of the timestamp for this - * packet is stored in the buffer. The value is stored in little endian format - * starting at the end of the packet data. - */ -void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) -{ - __le64 regval; - - /* copy the bits out of the skb, and then trim the skb length */ - skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, ®val, IXGBE_TS_HDR_LEN); - __pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN); - - /* The timestamp is recorded in little endian format, and is stored at - * the end of the packet. - * - * DWORD: N N + 1 N + 2 - * Field: End of Packet SYSTIMH SYSTIML - */ - ixgbe_ptp_convert_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), - le64_to_cpu(regval)); -} - -/** - * ixgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp - * @q_vector: structure containing interrupt and ring information - * @skb: particular skb to send timestamp with - * - * if the timestamp is valid, we convert it into the timecounter ns - * value, then store that result into the shhwtstamps structure which - * is passed up the network stack - */ -void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) -{ - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; - u64 regval = 0; - u32 tsyncrxctl; - - /* we cannot process timestamps on a ring without a q_vector */ - if (!q_vector || !q_vector->adapter) - return; - - adapter = q_vector->adapter; - hw = &adapter->hw; - - /* - * Read the tsyncrxctl register afterwards in order to prevent taking an - * I/O hit on every packet. - */ - tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); - if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) - return; - - regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); - regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; - - ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); -} - -/** - * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration - * @adapter: pointer to adapter structure - * @ifreq: ioctl data - * - * This function returns the current timestamping settings. Rather than - * attempt to deconstruct registers to fill in the values, simply keep a copy - * of the old settings around, and return a copy when requested. - */ -int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) -{ - struct hwtstamp_config *config = &adapter->tstamp_config; - - return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? - -EFAULT : 0; -} - -/** - * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode - * @adapter: the private ixgbe adapter structure - * @config: the hwtstamp configuration requested - * - * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't cause any overhead - * when no packet needs it. At most one packet in the queue may be - * marked for time stamping, otherwise it would be impossible to tell - * for sure to which packet the hardware time stamp belongs. - * - * Incoming time stamping has to be configured via the hardware - * filters. Not all combinations are supported, in particular event - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". - * - * Since hardware always timestamps Path delay packets when timestamping V2 - * packets, regardless of the type specified in the register, only use V2 - * Event mode. This more accurately tells the user what the hardware is going - * to do anyways. - * - * Note: this may modify the hwtstamp configuration towards a more general - * mode, if required to support the specifically requested mode. - */ -static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, - struct hwtstamp_config *config) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; - u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; - u32 tsync_rx_mtrl = PTP_EV_PORT << 16; - bool is_l2 = false; - u32 regval; - - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - - switch (config->tx_type) { - case HWTSTAMP_TX_OFF: - tsync_tx_ctl = 0; - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config->rx_filter) { - case HWTSTAMP_FILTER_NONE: - tsync_rx_ctl = 0; - tsync_rx_mtrl = 0; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); - break; - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); - break; - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; - is_l2 = true; - config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: -#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL - case HWTSTAMP_FILTER_NTP_ALL: -#endif /* HAVE_HWTSTAMP_FILTER_NTP_ALL */ - case HWTSTAMP_FILTER_ALL: - /* The X550 controller is capable of timestamping all packets, - * which allows it to accept any filter. - */ - if (hw->mac.type >= ixgbe_mac_X550) { - tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; - config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; - break; - } - /* fall through */ - default: - /* register RXMTRL must be set in order to do V1 packets, - * therefore it is not possible to time stamp both V1 Sync and - * Delay_Req messages unless hardware supports timestamping all - * packets => return error - */ - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); - config->rx_filter = HWTSTAMP_FILTER_NONE; - return -ERANGE; - } - - if (hw->mac.type == ixgbe_mac_82598EB) { - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); - if (tsync_rx_ctl | tsync_tx_ctl) - return -ERANGE; - return 0; - } - - /* Per-packet timestamping only works if the filter is set to all - * packets. Since this is desired, always timestamp all packets as long - * as any Rx filter was configured. - */ - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* enable timestamping all packets only if at least some - * packets were requested. Otherwise, play nice and disable - * timestamping */ - if (config->rx_filter == HWTSTAMP_FILTER_NONE) - break; - - tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED | - IXGBE_TSYNCRXCTL_TYPE_ALL | - IXGBE_TSYNCRXCTL_TSIP_UT_EN; - config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; - is_l2 = true; - break; - default: - break; - } - - /* define ethertype filter for timestamping L2 packets */ - if (is_l2) - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), - (IXGBE_ETQF_FILTER_EN | /* enable filter */ - IXGBE_ETQF_1588 | /* enable timestamping */ - ETH_P_1588)); /* 1588 eth protocol type */ - else - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); - - /* enable/disable TX */ - regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); - regval &= ~IXGBE_TSYNCTXCTL_ENABLED; - regval |= tsync_tx_ctl; - IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval); - - /* enable/disable RX */ - regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); - regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK); - regval |= tsync_rx_ctl; - IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval); - - /* define which PTP packets are time stamped */ - IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl); - - IXGBE_WRITE_FLUSH(hw); - - /* clear TX/RX timestamp state, just to be sure */ - ixgbe_ptp_clear_tx_timestamp(adapter); - IXGBE_READ_REG(hw, IXGBE_RXSTMPH); - - return 0; -} - -/** - * ixgbe_ptp_set_ts_config - user entry point for timestamp mode - * @adapter: pointer to adapter struct - * @ifreq: ioctl data - * - * Set hardware to requested mode. If unsupported, return an error with no - * changes. Otherwise, store the mode for future reference. - */ -int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) -{ - struct hwtstamp_config config; - int err; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - err = ixgbe_ptp_set_timestamp_mode(adapter, &config); - if (err) - return err; - - /* save these settings for future reference */ - memcpy(&adapter->tstamp_config, &config, - sizeof(adapter->tstamp_config)); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, - u32 *shift, u32 *incval) -{ - /** - * Scale the NIC cycle counter by a large factor so that - * relatively small corrections to the frequency can be added - * or subtracted. The drawbacks of a large factor include - * (a) the clock register overflows more quickly, (b) the cycle - * counter structure must be able to convert the systime value - * to nanoseconds using only a multiplier and a right-shift, - * and (c) the value must fit within the timinca register space - * => math based on internal DMA clock rate and available bits - * - * Note that when there is no link, internal DMA clock is same as when - * link speed is 10Gb. Set the registers correctly even when link is - * down to preserve the clock setting - */ - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_100_FULL: - *shift = IXGBE_INCVAL_SHIFT_100; - *incval = IXGBE_INCVAL_100; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - *shift = IXGBE_INCVAL_SHIFT_1GB; - *incval = IXGBE_INCVAL_1GB; - break; - case IXGBE_LINK_SPEED_10GB_FULL: - default: - *shift = IXGBE_INCVAL_SHIFT_10GB; - *incval = IXGBE_INCVAL_10GB; - break; - } - - return; -} - -/** - * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw - * @adapter: pointer to the adapter structure - * - * This function should be called to set the proper values for the TIMINCA - * register and tell the cyclecounter structure what the tick rate of SYSTIME - * is. It does not directly modify SYSTIME registers or the timecounter - * structure. It should be called whenever a new TIMINCA value is necessary, - * such as during initialization or when the link speed changes. - */ -void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - unsigned long flags; - struct cyclecounter cc; - u32 incval = 0; - u32 tsauxc = 0, fuse0 = 0; - - /* For some of the boards below this mask is technically incorrect. - * The timestamp mask overflows at approximately 61bits. However the - * particular hardware does not overflow on an even bitmask value. - * Instead, it overflows due to conversion of upper 32bits billions of - * cycles. Timecounters are not really intended for this purpose so - * they do not properly function if the overflow point isn't 2^N-1. - * However, the actual SYSTIME values in question take ~138 years to - * overflow. In practice this means they won't actually overflow. A - * proper fix to this problem would require modification of the - * timecounter delta calculations. - */ - cc.mask = CLOCKSOURCE_MASK(64); - cc.mult = 1; - cc.shift = 0; - - switch (hw->mac.type) { - case ixgbe_mac_X550EM_x: - /* SYSTIME assumes X550EM_x board frequency is 300Mhz, and is - * designed to represent seconds and nanoseconds when this is - * the case. However, some revisions of hardware have a 400Mhz - * clock and we have to compensate for this frequency - * variation using corrected mult and shift values. - */ - fuse0 = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)); - if (!(fuse0 & IXGBE_FUSES0_300MHZ)) { - cc.mult = 3; - cc.shift = 2; - } - /* fallthrough */ - case ixgbe_mac_X550EM_a: - case ixgbe_mac_X550: - cc.read = ixgbe_ptp_read_X550; - - /* enable SYSTIME counter */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); - tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, - tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); - IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); - - IXGBE_WRITE_FLUSH(hw); - break; - case ixgbe_mac_X540: - cc.read = ixgbe_ptp_read_82599; - - ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); - break; - case ixgbe_mac_82599EB: - cc.read = ixgbe_ptp_read_82599; - - ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); - incval >>= IXGBE_INCVAL_SHIFT_82599; - cc.shift -= IXGBE_INCVAL_SHIFT_82599; - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | - incval); - break; - default: - /* other devices aren't supported */ - return; - } - - /* update the base incval used to calculate frequency adjustment */ - ACCESS_ONCE(adapter->base_incval) = incval; - smp_mb(); - - /* need lock to prevent incorrect read while modifying cyclecounter */ - spin_lock_irqsave(&adapter->tmreg_lock, flags); - memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); -} - -/** - * ixgbe_ptp_reset - * @adapter: the ixgbe private board structure - * - * When the MAC resets, all of the hardware configuration for timesync is - * reset. This function should be called to re-enable the device for PTP, - * using the last known settings. However, we do lose the current clock time, - * so we fallback to resetting it based on the kernel's realtime clock. - * - * This function will maintain the hwtstamp_config settings, and it retriggers - * the SDP output if it's enabled. - */ -void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - unsigned long flags; - - /* reset the hardware timestamping mode */ - ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); - - /* 82598 does not support PTP */ - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - ixgbe_ptp_start_cyclecounter(adapter); - - spin_lock_irqsave(&adapter->tmreg_lock, flags); - timecounter_init(&adapter->hw_tc, &adapter->hw_cc, - ktime_to_ns(ktime_get_real())); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - adapter->last_overflow_check = jiffies; - - /* - * Now that the shift has been calculated and the systime - * registers reset, (re-)enable the Clock out feature - */ - if (adapter->ptp_setup_sdp) - adapter->ptp_setup_sdp(adapter); -} - -/** - * ixgbe_ptp_create_clock - * @adapter: the ixgbe private adapter structure - * - * This functino performs setup of the user entry point function table and - * initalizes the PTP clock device used by userspace to access the clock-like - * features of the PTP core. It will be called by ixgbe_ptp_init, and may - * re-use a previously initialized clock (such as during a suspend/resume - * cycle). - */ - -static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - long err; - - /* do nothing if we already have a clock device */ - if (!IS_ERR_OR_NULL(adapter->ptp_clock)) - return 0; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: - snprintf(adapter->ptp_caps.name, - sizeof(adapter->ptp_caps.name), - "%s", netdev->name); - adapter->ptp_caps.owner = THIS_MODULE; - adapter->ptp_caps.max_adj = 250000000; - adapter->ptp_caps.n_alarm = 0; - adapter->ptp_caps.n_ext_ts = 0; - adapter->ptp_caps.n_per_out = 0; - adapter->ptp_caps.pps = 1; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; - adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; -#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; - adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; -#else - adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; - adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; -#endif - adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; - adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540; - break; - case ixgbe_mac_82599EB: - snprintf(adapter->ptp_caps.name, - sizeof(adapter->ptp_caps.name), - "%s", netdev->name); - adapter->ptp_caps.owner = THIS_MODULE; - adapter->ptp_caps.max_adj = 250000000; - adapter->ptp_caps.n_alarm = 0; - adapter->ptp_caps.n_ext_ts = 0; - adapter->ptp_caps.n_per_out = 0; - adapter->ptp_caps.pps = 0; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; - adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; -#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; - adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; -#else - adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; - adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; -#endif - adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; - break; - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); - adapter->ptp_caps.owner = THIS_MODULE; - adapter->ptp_caps.max_adj = 30000000; - adapter->ptp_caps.n_alarm = 0; - adapter->ptp_caps.n_ext_ts = 0; - adapter->ptp_caps.n_per_out = 0; - adapter->ptp_caps.pps = 0; - adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; - adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; -#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; - adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; -#else - adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; - adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; -#endif - adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; - adapter->ptp_setup_sdp = NULL; - break; - default: - adapter->ptp_clock = NULL; - adapter->ptp_setup_sdp = NULL; - return -EOPNOTSUPP; - } - - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, - pci_dev_to_dev(adapter->pdev)); - if (IS_ERR(adapter->ptp_clock)) { - err = PTR_ERR(adapter->ptp_clock); - adapter->ptp_clock = NULL; - e_dev_err("ptp_clock_register failed\n"); - return err; - } else if (adapter->ptp_clock) - e_dev_info("registered PHC device on %s\n", netdev->name); - - /* Set the default timestamp mode to disabled here. We do this in - * create_clock instead of initialization, because we don't want to - * override the previous settings during a suspend/resume cycle. - */ - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; - adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; - - return 0; -} - -/** - * ixgbe_ptp_init - * @adapter: the ixgbe private adapter structure - * - * This function performs the required steps for enabling ptp - * support. If ptp support has already been loaded it simply calls the - * cyclecounter init routine and exits. - */ -void ixgbe_ptp_init(struct ixgbe_adapter *adapter) -{ - /* initialize the spin lock first, since the user might call the clock - * functions any time after we've initialized the ptp clock device. - */ - spin_lock_init(&adapter->tmreg_lock); - - /* obtain a ptp clock device, or re-use an existing device */ - if (ixgbe_ptp_create_clock(adapter)) - return; - - /* we have a clock, so we can intialize work for timestamps now */ - INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work); - - /* reset the ptp related hardware bits */ - ixgbe_ptp_reset(adapter); - - /* enter the IXGBE_PTP_RUNNING state */ - set_bit(__IXGBE_PTP_RUNNING, &adapter->state); - - return; -} - -/** - * ixgbe_ptp_suspend - stop ptp work items - * @adapter: pointer to adapter struct - * - * This function suspends ptp activity, and prevents more work from being - * generated, but does not destroy the clock device. - */ -void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) -{ - /* leave the IXGBE_PTP_RUNNING STATE */ - if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; - if (adapter->ptp_setup_sdp) - adapter->ptp_setup_sdp(adapter); - - cancel_work_sync(&adapter->ptp_tx_work); - ixgbe_ptp_clear_tx_timestamp(adapter); -} - -/** - * ixgbe_ptp_stop - destroy the ptp_clock device - * @adapter: pointer to adapter struct - * - * Completely destroy the ptp_clock device, and disable all PTP related - * features. Intended to be run when the device is being closed. - */ -void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) -{ - /* first, suspend ptp activity */ - ixgbe_ptp_suspend(adapter); - - /* now destroy the ptp clock device */ - if (adapter->ptp_clock) { - ptp_clock_unregister(adapter->ptp_clock); - adapter->ptp_clock = NULL; - e_dev_info("removed PHC on %s\n", - adapter->netdev->name); - } -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c deleted file mode 100644 index 4b996de9d2b4..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.c +++ /dev/null @@ -1,1881 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ixgbe.h" -#include "ixgbe_type.h" -#include "ixgbe_sriov.h" - -#ifdef CONFIG_PCI_IOV -static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, - unsigned int num_vfs) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct vf_macvlans *mv_list; - int num_vf_macvlans, i; - - num_vf_macvlans = hw->mac.num_rar_entries - - (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); - if (!num_vf_macvlans) - return; - - mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), - GFP_KERNEL); - if (mv_list) { - /* Initialize list of VF macvlans */ - INIT_LIST_HEAD(&adapter->vf_mvs.l); - for (i = 0; i < num_vf_macvlans; i++) { - mv_list[i].vf = -1; - mv_list[i].free = true; - list_add(&mv_list[i].l, &adapter->vf_mvs.l); - } - adapter->mv_list = mv_list; - } -} - -static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, - unsigned int num_vfs) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; - - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; - if (!adapter->ring_feature[RING_F_VMDQ].limit) - adapter->ring_feature[RING_F_VMDQ].limit = 1; - - /* Allocate memory for per VF control structures */ - adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), - GFP_KERNEL); - if (!adapter->vfinfo) - return -ENOMEM; - - /* Initialize default switching mode VEB */ - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - - /* set adapter->num_vfs only after allocating vfinfo to avoid - * NULL pointer issues when accessing adapter->vfinfo - */ - adapter->num_vfs = num_vfs; - - ixgbe_alloc_vf_macvlans(adapter, num_vfs); - - adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; - - /* enable L2 switch and replication */ - adapter->flags |= IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | - IXGBE_FLAG_SRIOV_REPLICATION_ENABLE; - - /* limit traffic classes based on VFs enabled */ - if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && - (adapter->num_vfs < 16)) { - adapter->dcb_cfg.num_tcs.pg_tcs = - IXGBE_DCB_MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = - IXGBE_DCB_MAX_TRAFFIC_CLASS; - } else if (adapter->num_vfs < 32) { - adapter->dcb_cfg.num_tcs.pg_tcs = 4; - adapter->dcb_cfg.num_tcs.pfc_tcs = 4; - } else { - adapter->dcb_cfg.num_tcs.pg_tcs = 1; - adapter->dcb_cfg.num_tcs.pfc_tcs = 1; - } - adapter->dcb_cfg.vt_mode = true; - -#ifdef IXGBE_DISABLE_VF_MQ - /* We do not support RSS w/ SR-IOV */ - adapter->ring_feature[RING_F_RSS].limit = 1; -#endif - - /* Disable RSC when in SR-IOV mode */ - adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | - IXGBE_FLAG2_RSC_ENABLED); - - for (i = 0; i < adapter->num_vfs; i++) { - /* enable spoof checking for all VFs */ - adapter->vfinfo[i].spoofchk_enabled = true; - -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN - /* We support VF RSS querying only for 82599 and x540 - * devices at the moment. These devices share RSS - * indirection table and RSS hash key with PF therefore - * we want to disable the querying by default. - */ - adapter->vfinfo[i].rss_query_enabled = 0; - -#endif - /* Untrust all VFs */ - adapter->vfinfo[i].trusted = false; - - /* set the default xcast mode */ - adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; - } - - e_dev_info("SR-IOV enabled with %d VFs\n", num_vfs); - if (hw->mac.type < ixgbe_mac_X550) - e_dev_info("configure port vlans to keep your VFs secure\n"); - - return 0; -} - -/** - * ixgbe_get_vfs - Find and take references to all vf devices - * @adapter: Pointer to adapter struct - */ -static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - u16 vendor = pdev->vendor; - struct pci_dev *vfdev; - int vf = 0; - u16 vf_id; - int pos; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) - return; - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); - - vfdev = pci_get_device(vendor, vf_id, NULL); - for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { - if (!vfdev->is_virtfn) - continue; - if (vfdev->physfn != pdev) - continue; - if (vf >= adapter->num_vfs) - continue; - pci_dev_get(vfdev); - adapter->vfinfo[vf].vfdev = vfdev; - ++vf; - } -} - -/* Note this function is called when the user wants to enable SR-IOV - * VFs using the now deprecated module parameter - */ -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) -{ - int pre_existing_vfs = 0; - unsigned int num_vfs; - - pre_existing_vfs = pci_num_vf(adapter->pdev); - if (!pre_existing_vfs && !adapter->max_vfs) - return; - - /* If there are pre-existing VFs then we have to force - * use of that many - over ride any module parameter value. - * This may result from the user unloading the PF driver - * while VFs were assigned to guest VMs or because the VFs - * have been created via the new PCI SR-IOV sysfs interface. - */ - if (pre_existing_vfs) { - num_vfs = pre_existing_vfs; - dev_warn(&adapter->pdev->dev, - "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); - } else { - int err; - /* - * The 82599 supports up to 64 VFs per physical function - * but this implementation limits allocation to 63 so that - * basic networking resources are still available to the - * physical function. If the user requests greater thn - * 63 VFs then it is an error - reset to default of zero. - */ - num_vfs = min_t(unsigned int, adapter->max_vfs, - IXGBE_MAX_VFS_DRV_LIMIT); - - err = pci_enable_sriov(adapter->pdev, num_vfs); - if (err) { - e_err(probe, "Failed to enable PCI sriov: %d\n", err); - return; - } - } - - if (!__ixgbe_enable_sriov(adapter, num_vfs)) { - ixgbe_get_vfs(adapter); - return; - } - - /* If we have gotten to this point then there is no memory available - * to manage the VF devices - print message and bail. - */ - e_err(probe, "Unable to allocate memory for VF Data Storage - SRIOV disabled\n"); - ixgbe_disable_sriov(adapter); -} - -#endif /* CONFIG_PCI_IOV */ -int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) -{ - unsigned int num_vfs = adapter->num_vfs, vf; - struct ixgbe_hw *hw = &adapter->hw; - u32 gpie; - u32 vmdctl; - - /* set num VFs to 0 to prevent access to vfinfo */ - adapter->num_vfs = 0; - - /* put the reference to all of the vf devices */ - for (vf = 0; vf < num_vfs; ++vf) { - struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; - - if (!vfdev) - continue; - adapter->vfinfo[vf].vfdev = NULL; - pci_dev_put(vfdev); - } - - /* free VF control structures */ - kfree(adapter->vfinfo); - adapter->vfinfo = NULL; - - /* free macvlan list */ - kfree(adapter->mv_list); - adapter->mv_list = NULL; - - /* if SR-IOV is already disabled then there is nothing to do */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return 0; - - /* Turn off malicious driver detection */ - if ((hw->mac.ops.disable_mdd) && - (!(adapter->flags & IXGBE_FLAG_MDD_ENABLED))) - hw->mac.ops.disable_mdd(hw); - -#ifdef CONFIG_PCI_IOV - /* - * If our VFs are assigned we cannot shut down SR-IOV - * without causing issues, so just leave the hardware - * available but disabled - */ - if (pci_vfs_assigned(adapter->pdev)) { - e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); - return -EPERM; - } - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(adapter->pdev); -#endif - - /* turn off device IOV mode */ - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); - gpie &= ~IXGBE_GPIE_VTMODE_MASK; - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - - /* set default pool back to 0 */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - IXGBE_WRITE_FLUSH(hw); - - /* Disable VMDq flag so device will be set in VM mode */ - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) - adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; - - adapter->ring_feature[RING_F_VMDQ].offset = 0; - - /* take a breather then clean up driver data */ - msleep(100); - - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - return 0; -} - -static int ixgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, int __maybe_unused num_vfs) -{ -#ifdef CONFIG_PCI_IOV - struct ixgbe_adapter *adapter = pci_get_drvdata(dev); - int err = 0; - u8 num_tc; - int i; - int pre_existing_vfs = pci_num_vf(dev); - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_CAPABLE)) { - e_dev_warn("SRIOV not supported on this device\n"); - return -EOPNOTSUPP; - } - - if (adapter->num_vfs == num_vfs) - return -EINVAL; - - if (pre_existing_vfs && pre_existing_vfs != num_vfs) - err = ixgbe_disable_sriov(adapter); - else if (pre_existing_vfs && pre_existing_vfs == num_vfs) - goto out; - - if (err) - goto err_out; - - /* While the SR-IOV capability structure reports total VFs to be - * 64 we limit the actual number that can be allocated as below - * so that some transmit/receive resources can be reserved to the - * PF. The PCI bus driver already checks for other values out of - * range. - * Num_TCs MAX_VFs - * 1 63 - * <=4 31 - * >4 15 - */ - num_tc = netdev_get_num_tc(adapter->netdev); - - if (num_tc > 4) { - if (num_vfs > IXGBE_MAX_VFS_8TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC); - err = -EPERM; - goto err_out; - } - } else if ((num_tc > 1) && (num_tc <= 4)) { - if (num_vfs > IXGBE_MAX_VFS_4TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC); - err = -EPERM; - goto err_out; - } - } else { - if (num_vfs > IXGBE_MAX_VFS_1TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC); - err = -EPERM; - goto err_out; - } - } - - err = __ixgbe_enable_sriov(adapter, num_vfs); - if (err) - goto err_out; - - for (i = 0; i < adapter->num_vfs; i++) - ixgbe_vf_configuration(dev, (i | 0x10000000)); - - /* reset before enabling SRIOV to avoid mailbox issues */ - ixgbe_sriov_reinit(adapter); - - err = pci_enable_sriov(dev, num_vfs); - if (err) { - e_dev_warn("Failed to enable PCI sriov: %d\n", err); - goto err_out; - } - ixgbe_get_vfs(adapter); - -out: - return num_vfs; - -err_out: - return err; -#endif - return 0; -} - -static int ixgbe_pci_sriov_disable(struct pci_dev *dev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(dev); - int err; -#ifdef CONFIG_PCI_IOV - u32 current_flags = adapter->flags; -#endif - - if (!adapter->num_vfs && !pci_num_vf(dev)) - return -EINVAL; - - err = ixgbe_disable_sriov(adapter); - - /* Only reinit if no error and state changed */ -#ifdef CONFIG_PCI_IOV - if (!err && current_flags != adapter->flags) - ixgbe_sriov_reinit(adapter); -#endif - - return err; -} - -int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) -{ - if (num_vfs == 0) - return ixgbe_pci_sriov_disable(dev); - else - return ixgbe_pci_sriov_enable(dev, num_vfs); -} - -static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) - >> IXGBE_VT_MSGINFO_SHIFT; - u16 *hash_list = (u16 *)&msgbuf[1]; - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - u32 vector_bit; - u32 vector_reg; - u32 mta_reg; - u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - - /* only so many hash values supported */ - entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); - - /* salt away the number of multi cast addresses assigned - * to this VF for later use to restore when the PF multi cast - * list changes - */ - vfinfo->num_vf_mc_hashes = entries; - - /* VFs are limited to using the MTA hash table for their multicast - * addresses */ - for (i = 0; i < entries; i++) - vfinfo->vf_mc_hashes[i] = hash_list[i]; - - for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { - vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; - vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; - mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); - IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); - } - vmolr |= IXGBE_VMOLR_ROMPE; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); - - return 0; -} - -#ifdef CONFIG_PCI_IOV -void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct vf_data_storage *vfinfo; - int i, j; - u32 vector_bit; - u32 vector_reg; - u32 mta_reg; - - for (i = 0; i < adapter->num_vfs; i++) { - u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); - vfinfo = &adapter->vfinfo[i]; - for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { - hw->addr_ctrl.mta_in_use++; - vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; - vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; - mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); - IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); - } - if (vfinfo->num_vf_mc_hashes) - vmolr |= IXGBE_VMOLR_ROMPE; - else - vmolr &= ~IXGBE_VMOLR_ROMPE; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); - } - - /* Restore any VF macvlans */ - ixgbe_full_sync_mac_table(adapter); -} -#endif /* CONFIG_PCI_IOV */ - -int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err; - -#ifndef HAVE_VLAN_RX_REGISTER - /* If VLAN overlaps with one the PF is currently monitoring make - * sure that we are able to allocate a VLVF entry. This may be - * redundant but it guarantees PF will maintain visibility to - * the VLAN. - */ - if (add && test_bit(vid, adapter->active_vlans)) { - err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); - if (err) - return err; - } -#endif - - err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); -#ifndef HAVE_VLAN_RX_REGISTER - - if (add && !err) - return err; - - /* If we failed to add the VF VLAN or we are removing the VF VLAN - * we may need to drop the PF pool bit in order to allow us to free - * up the VLVF resources. - */ - if (test_bit(vid, adapter->active_vlans) || - (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - ixgbe_update_pf_promisc_vlvf(adapter, vid); -#endif - - return err; -} -static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 max_frs; - - /* - * For 82599EB we have to keep all PFs and VFs operating with - * the same max_frame value in order to avoid sending an oversize - * frame to a VF. In order to guarantee this is handled correctly - * for all cases we have several special exceptions to take into - * account before we can enable the VF for receive - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - - struct net_device *dev = adapter->netdev; - int pf_max_frame = dev->mtu + ETH_HLEN; - u32 reg_offset, vf_shift, vfre; - s32 err = 0; - -#if IS_ENABLED(CONFIG_FCOE) - if (dev->features & NETIF_F_FCOE_MTU) - pf_max_frame = max_t(int, pf_max_frame, - IXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif /* CONFIG_FCOE */ - - switch (adapter->vfinfo[vf].vf_api) { - case ixgbe_mbox_api_11: - case ixgbe_mbox_api_12: - case ixgbe_mbox_api_13: - /* Version 1.1 supports jumbo frames on VFs if PF has - * jumbo frames enabled which means legacy VFs are - * disabled - */ - if (pf_max_frame > ETH_FRAME_LEN) - break; - /* fall through */ - default: - /* If the PF or VF are running w/ jumbo frames enabled - * we need to shut down the VF Rx path as we cannot - * support jumbo frames on legacy VFs - */ - if ((pf_max_frame > ETH_FRAME_LEN) || - (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) - err = -EINVAL; - break; - } - - /* determine VF receive enable location */ - vf_shift = vf % 32; - reg_offset = vf / 32; - - /* enable or disable receive depending on error */ - vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - if (err) - vfre &= ~(1 << vf_shift); - else - vfre |= 1 << vf_shift; - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); - - if (err) { - e_err(drv, "VF max_frame %d out of range\n", max_frame); - return err; - } - } - - /* pull current max frame size from hardware */ - max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); - max_frs &= IXGBE_MHADD_MFS_MASK; - max_frs >>= IXGBE_MHADD_MFS_SHIFT; - - if (max_frs < max_frame) { - max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); - } - - e_info(hw, "VF requests change max MTU to %d\n", max_frame); - - return 0; -} - -void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) -{ - u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - vmolr |= IXGBE_VMOLR_BAM; - if (aupe) - vmolr |= IXGBE_VMOLR_AUPE; - else - vmolr &= ~IXGBE_VMOLR_AUPE; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); -} - -static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, - u16 vid, u16 qos, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; - - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); -} - -static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); -} - -static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlvfb_mask, pool_mask, i; - - /* create mask for VF and other pools */ - pool_mask = (u32)~BIT(VMDQ_P(0) % 32); - vlvfb_mask = BIT(vf % 32); - - /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ - for (i = IXGBE_VLVF_ENTRIES; i--;) { - u32 bits[2], vlvfb, vid, vfta, vlvf; - u32 word = i * 2 + vf / 32; - u32 mask; - - vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); - - /* if our bit isn't set we can skip it */ - if (!(vlvfb & vlvfb_mask)) - continue; - - /* clear our bit from vlvfb */ - vlvfb ^= vlvfb_mask; - - /* create 64b mask to check to see if we should clear VLVF */ - bits[word % 2] = vlvfb; - bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); - - /* if other pools are present, just remove ourselves */ - if (bits[(VMDQ_P(0) / 32) ^ 1] || - (bits[VMDQ_P(0) / 32] & pool_mask)) - goto update_vlvfb; - - /* if PF is present, leave VFTA */ - if (bits[0] || bits[1]) - goto update_vlvf; - - /* if we cannot determine VLAN just remove ourselves */ - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); - if (!vlvf) - goto update_vlvfb; - - vid = vlvf & VLAN_VID_MASK; - mask = BIT(vid % 32); - - /* clear bit from VFTA */ - vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); - if (vfta & mask) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); -update_vlvf: - /* clear POOL selection enable */ - IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); - - if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) - vlvfb = 0; -update_vlvfb: - /* clear pool bits */ - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); - } -} - -static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, - int vf, int index, unsigned char *mac_addr) -{ - struct list_head *pos; - struct vf_macvlans *entry; - s32 retval = 0; - - if (index <= 1) { - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); - if (entry->vf == vf) { - entry->vf = -1; - entry->free = true; - entry->is_macvlan = false; - ixgbe_del_mac_filter(adapter, - entry->vf_macvlan, vf); - } - } - } - - /* - * If index was zero then we were asked to clear the uc list - * for the VF. We're done. - */ - if (!index) - return 0; - - entry = NULL; - - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); - if (entry->free) - break; - } - - /* - * If we traversed the entire list and didn't find a free entry - * then we're out of space on the RAR table. Also entry may - * be NULL because the original memory allocation for the list - * failed, which is not fatal but does mean we can't support - * VF requests for MACVLAN because we couldn't allocate - * memory for the list management required. - */ - if (!entry || !entry->free) - return -ENOSPC; - - retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); - if (retval >= 0) { - entry->free = false; - entry->is_macvlan = true; - entry->vf = vf; - memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); - } - - return retval; -} - -static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - u8 num_tcs = netdev_get_num_tc(adapter->netdev); - - /* remove VLAN filters belonging to this VF */ - ixgbe_clear_vf_vlans(adapter, vf); - - /* add back PF assigned VLAN or VLAN 0 */ - ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); - - /* reset offloads to defaults */ - ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); - - /* set outgoing tags for VFs */ - if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { - ixgbe_clear_vmvir(adapter, vf); - } else { - if (vfinfo->pf_qos || !num_tcs) - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - vfinfo->pf_qos, vf); - else - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - adapter->default_up, vf); - } - - /* reset multicast table array for vf */ - adapter->vfinfo[vf].num_vf_mc_hashes = 0; - - /* Flush and reset the mta with the new values */ - ixgbe_set_rx_mode(adapter->netdev); - - ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); - - /* reset VF api back to unknown */ - adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; -} - -int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, - int vf, unsigned char *mac_addr) -{ - s32 retval = 0; - - ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); - if (retval >= 0) - memcpy(adapter->vfinfo[vf].vf_mac_addresses, - mac_addr, ETH_ALEN); - else - memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); - - return retval; -} - -#ifdef CONFIG_PCI_IOV -int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) -{ - unsigned char vf_mac_addr[6]; - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - unsigned int vfn = (event_mask & 0x3f); - bool enable = ((event_mask & 0x10000000U) != 0); - - if (enable) { - memset(vf_mac_addr, 0, ETH_ALEN); - memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); - } - - return 0; -} -#endif /* CONFIG_PCI_IOV */ - -static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, - u32 qde) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; - u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); - u32 reg; - int i; - - for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { - /* flush previous write */ - IXGBE_WRITE_FLUSH(hw); - - /* drop enable should always be set in SRIOV mode*/ - reg = IXGBE_QDE_WRITE | qde; - reg |= i << IXGBE_QDE_IDX_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); - } - -} - -static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; - unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; - u32 reg, reg_offset, vf_shift; - u32 msgbuf[4] = {0, 0, 0, 0}; - u8 *addr = (u8 *)(&msgbuf[1]); - u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); - int i; - - e_info(probe, "VF Reset msg received from vf %d\n", vf); - - /* reset the filters for the device */ - ixgbe_vf_reset_event(adapter, vf); - - /* set vf mac address */ - if (!is_zero_ether_addr(vf_mac)) - ixgbe_set_vf_mac(adapter, vf, vf_mac); - - vf_shift = vf % 32; - reg_offset = vf / 32; - - /* enable transmit for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= 1 << vf_shift; - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); - - /* force drop enable for all VF Rx queues */ - ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); - - /* enable receive for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= 1 << vf_shift; - /* - * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. - * For more info take a look at ixgbe_set_vf_lpe - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - struct net_device *dev = adapter->netdev; - int pf_max_frame = dev->mtu + ETH_HLEN; - -#if IS_ENABLED(CONFIG_FCOE) - if (dev->features & NETIF_F_FCOE_MTU) - pf_max_frame = max_t(int, pf_max_frame, - IXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif /* CONFIG_FCOE */ - - if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~(1 << vf_shift); - } - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); - - /* enable VF mailbox for further messages */ - adapter->vfinfo[vf].clear_to_send = true; - - reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); - reg |= (1 << vf_shift); - IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); - - /* - * Reset the VFs TDWBAL and TDWBAH registers - * which are not cleared by an FLR - */ - for (i = 0; i < q_per_pool; i++) { - IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); - IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); - } - - /* reply to reset with ack and vf mac address */ - msgbuf[0] = IXGBE_VF_RESET; - if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { - msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; - memcpy(addr, vf_mac, ETH_ALEN); - } else { - msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; - dev_warn(pci_dev_to_dev(adapter->pdev), - "VF %d has no MAC address assigned, you may have to assign one manually\n", - vf); - } - - /* - * Piggyback the multicast filter type so VF can compute the - * correct vectors - */ - msgbuf[3] = hw->mac.mc_filter_type; - ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); - - return 0; -} - -static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - u8 *new_mac = ((u8 *)(&msgbuf[1])); - - if (!is_valid_ether_addr(new_mac)) { - e_warn(drv, "VF %d attempted to set invalid mac\n", vf); - return -1; - } - - if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && - memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, - ETH_ALEN)) { - u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; - e_warn(drv, - "VF %d attempted to set a new MAC address but it already has an administratively set MAC address %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", - vf, pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); - e_warn(drv, "Check the VF driver and if it is not using the correct MAC address you may need to reload the VF driver\n"); - return -1; - } - return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; -} - -static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; - u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); - u8 tcs = netdev_get_num_tc(adapter->netdev); - int err = 0; - - if (adapter->vfinfo[vf].pf_vlan || tcs) { - e_warn(drv, - "VF %d attempted to override administratively set VLAN configuration\n" - "Reload the VF driver to resume operations\n", - vf); - return -1; - } - - /* VLAN 0 is a special case, don't allow it to be removed */ - if (!vid && !add) - return 0; - - err = ixgbe_set_vf_vlan(adapter, add, vid, vf); - - if (err) - return err; - -#ifdef HAVE_VLAN_RX_REGISTER - /* in case of promiscuous mode any VLAN filter set for a VF must - * also have the PF pool added to it. - */ - if (add && adapter->netdev->flags & IFF_PROMISC) { - err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); - if (err) - return err; - } - -#ifdef CONFIG_PCI_IOV - /* Go through all the checks to see if the VLAN filter should - * be wiped completely. - */ - if (!add && adapter->netdev->flags & IFF_PROMISC) { - struct ixgbe_hw *hw = &adapter->hw; - u32 bits, vlvf; - s32 reg_ndx; - - reg_ndx = ixgbe_find_vlvf_entry(hw, vid); - if (reg_ndx < 0) - goto out; - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ - if (VMDQ_P(0) < 32) { - bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); - bits &= ~(1 << VMDQ_P(0)); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(reg_ndx * 2) + 1); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(reg_ndx * 2) + 1); - bits &= ~(1 << (VMDQ_P(0) - 32)); - bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); - } - - /* If the filter was removed then ensure PF pool bit - * is cleared if the PF only added itself to the pool - * because the PF is in promiscuous mode. - */ - if ((vlvf & VLAN_VID_MASK) == vid && !bits) - err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); - } - -out: -#endif /* CONFIG_PCI_IOV */ -#else /* HAVE_VLAN_RX_REGISTER */ - return 0; -#endif /* HAVE_VLAN_RX_REGISTER */ - return err; -} - -static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - u8 *new_mac = ((u8 *)(&msgbuf[1])); - int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> - IXGBE_VT_MSGINFO_SHIFT; - int err; - - if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && - index > 0) { - e_warn(drv, - "VF %d requested MACVLAN filter but is administratively denied\n", - vf); - return -1; - } - - /* An non-zero index indicates the VF is setting a filter */ - if (index) { - if (!is_valid_ether_addr(new_mac)) { - e_warn(drv, "VF %d attempted to set invalid mac\n", vf); - return -1; - } - - /* - * If the VF is allowed to set MAC filters then turn off - * anti-spoofing to avoid false positives. - */ - if (adapter->vfinfo[vf].spoofchk_enabled) { - struct ixgbe_hw *hw = &adapter->hw; - - hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - } - } - - err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); - if (err == -ENOSPC) - e_warn(drv, - "VF %d has requested a MACVLAN filter but there is no space for it\n", - vf); - - return err < 0; -} - -static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - int api = msgbuf[1]; - - switch (api) { - case ixgbe_mbox_api_10: - case ixgbe_mbox_api_11: - case ixgbe_mbox_api_12: - case ixgbe_mbox_api_13: - adapter->vfinfo[vf].vf_api = api; - return 0; - default: - break; - } - - e_info(drv, "VF %d requested invalid api version %u\n", vf, api); - - return -1; -} - -static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - struct net_device *dev = adapter->netdev; - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; - unsigned int default_tc = 0; - u8 num_tcs = netdev_get_num_tc(dev); - - /* verify the PF is supporting the correct APIs */ - switch (adapter->vfinfo[vf].vf_api) { - case ixgbe_mbox_api_20: - case ixgbe_mbox_api_11: - case ixgbe_mbox_api_12: - case ixgbe_mbox_api_13: - break; - default: - return -1; - } - - /* only allow 1 Tx queue for bandwidth limiting */ - msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); - msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); - - /* if TCs > 1 determine which TC belongs to default user priority */ - if (num_tcs > 1) - default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); - - /* notify VF of need for VLAN tag stripping, and correct queue */ - if (num_tcs) - msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; - else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) - msgbuf[IXGBE_VF_TRANS_VLAN] = 1; - else - msgbuf[IXGBE_VF_TRANS_VLAN] = 0; - - /* notify VF of default queue */ - msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; - - return 0; -} - -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN -static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) -{ - u32 i, j; - u32 *out_buf = &msgbuf[1]; - const u8 *reta = adapter->rss_indir_tbl; - u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); - - /* Check if operation is permitted */ - if (!adapter->vfinfo[vf].rss_query_enabled) - return -EPERM; - - /* verify the PF is supporting the correct API */ - switch (adapter->vfinfo[vf].vf_api) { - case ixgbe_mbox_api_12: - case ixgbe_mbox_api_13: - break; - default: - return -EOPNOTSUPP; - } - - /* This mailbox command is supported (required) only for 82599 and x540 - * VFs which support up to 4 RSS queues. Therefore we will compress the - * RETA by saving only 2 bits from each entry. This way we will be able - * to transfer the whole RETA in a single mailbox operation. - */ - for (i = 0; i < reta_size / 16; i++) { - out_buf[i] = 0; - for (j = 0; j < 16; j++) - out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); - } - - return 0; -} - -static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - u32 *rss_key = &msgbuf[1]; - - /* Check if the operation is permitted */ - if (!adapter->vfinfo[vf].rss_query_enabled) - return -EPERM; - - /* verify the PF is supporting the correct API */ - switch (adapter->vfinfo[vf].vf_api) { - case ixgbe_mbox_api_12: - case ixgbe_mbox_api_13: - break; - default: - return -EOPNOTSUPP; - } - - memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); - - return 0; -} -#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ - -static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - int xcast_mode = msgbuf[1]; - u32 vmolr, fctrl, disable, enable; - - /* verify the PF is supporting the correct APIs */ - switch (adapter->vfinfo[vf].vf_api) { - case ixgbe_mbox_api_12: - /* promisc introduced in 1.3 version */ - if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) - return -EOPNOTSUPP; - /* Fall threw */ - case ixgbe_mbox_api_13: - break; - default: - return -EOPNOTSUPP; - } - - if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && - !adapter->vfinfo[vf].trusted) { - xcast_mode = IXGBEVF_XCAST_MODE_MULTI; - } - - if (adapter->vfinfo[vf].xcast_mode == xcast_mode) - goto out; - - switch (xcast_mode) { - case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = 0; - break; - case IXGBEVF_XCAST_MODE_MULTI: - disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; - break; - case IXGBEVF_XCAST_MODE_ALLMULTI: - disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; - break; - case IXGBEVF_XCAST_MODE_PROMISC: - if (hw->mac.type <= ixgbe_mac_82599EB) - return -EOPNOTSUPP; - - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - if (!(fctrl & IXGBE_FCTRL_UPE)) { - /* VF promisc requires PF in promisc */ - e_warn(drv, - "Enabling VF promisc requires PF in promisc\n"); - return -EPERM; - } - - disable = 0; - enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - break; - default: - return -EOPNOTSUPP; - } - - vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - vmolr &= ~disable; - vmolr |= enable; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); - - adapter->vfinfo[vf].xcast_mode = xcast_mode; - -out: - msgbuf[1] = xcast_mode; - - return 0; -} - -static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) -{ - u32 mbx_size = IXGBE_VFMAILBOX_SIZE; - u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; - struct ixgbe_hw *hw = &adapter->hw; - s32 retval; - - retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); - - if (retval) { - pr_err("Error receiving message from VF\n"); - return retval; - } - - /* this is a message we already processed, do nothing */ - if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) - return retval; - - /* flush the ack before we write any messages back */ - IXGBE_WRITE_FLUSH(hw); - - if (msgbuf[0] == IXGBE_VF_RESET) - return ixgbe_vf_reset_msg(adapter, vf); - - /* - * until the vf completes a virtual function reset it should not be - * allowed to start any configuration. - */ - - if (!adapter->vfinfo[vf].clear_to_send) { - msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; - ixgbe_write_mbx(hw, msgbuf, 1, vf); - return retval; - } - - switch ((msgbuf[0] & 0xFFFF)) { - case IXGBE_VF_SET_MAC_ADDR: - retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); - break; - case IXGBE_VF_SET_MULTICAST: - retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); - break; - case IXGBE_VF_SET_VLAN: - retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); - break; - case IXGBE_VF_SET_LPE: - if (msgbuf[1] > IXGBE_MAX_JUMBO_FRAME_SIZE) { - e_err(drv, "VF max_frame %d out of range\n", msgbuf[1]); - return -EINVAL; - } - retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf); - break; - case IXGBE_VF_SET_MACVLAN: - retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); - break; - case IXGBE_VF_API_NEGOTIATE: - retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); - break; - case IXGBE_VF_GET_QUEUES: - retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); - break; -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN - case IXGBE_VF_GET_RETA: - retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); - break; - case IXGBE_VF_GET_RSS_KEY: - retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); - break; -#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ - case IXGBE_VF_UPDATE_XCAST_MODE: - retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); - break; - default: - e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); - retval = IXGBE_ERR_MBX; - break; - } - - /* notify the VF of the results of what it sent us */ - if (retval) - msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; - else - msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; - - msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; - - ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); - - return retval; -} - -static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 msg = IXGBE_VT_MSGTYPE_NACK; - - /* if device isn't clear to send it shouldn't be reading either */ - if (!adapter->vfinfo[vf].clear_to_send) - ixgbe_write_mbx(hw, &msg, 1, vf); -} - -#define Q_BITMAP_DEPTH 2 -static void ixgbe_check_mdd_event(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vf_bitmap[Q_BITMAP_DEPTH] = { 0 }; - u32 j, i; - u32 ping; - - if (!hw->mac.ops.mdd_event) - return; - - /* Did we have a malicious event */ - hw->mac.ops.mdd_event(hw, vf_bitmap); - - /* Log any blocked queues and release lock */ - for (i = 0; i < Q_BITMAP_DEPTH; i++) { - for (j = 0; j < 32 && vf_bitmap[i]; j++) { - u32 vf; - - if (!(vf_bitmap[i] & (1 << j))) - continue; - - /* The VF that malicious event occurred on */ - vf = j + (i * 32); - - dev_warn(pci_dev_to_dev(adapter->pdev), - "Malicious event on VF %d tx:%x rx:%x\n", vf, - IXGBE_READ_REG(hw, IXGBE_LVMMC_TX), - IXGBE_READ_REG(hw, IXGBE_LVMMC_RX)); - - /* restart the vf */ - if (hw->mac.ops.restore_mdd_vf) { - hw->mac.ops.restore_mdd_vf(hw, vf); - - /* get the VF to rebuild its queues */ - adapter->vfinfo[vf].clear_to_send = 0; - ping = IXGBE_PF_CONTROL_MSG | - IXGBE_VT_MSGTYPE_CTS; - ixgbe_write_mbx(hw, &ping, 1, vf); - } - } - } -} - -void ixgbe_msg_task(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vf; - - if (adapter->flags & IXGBE_FLAG_MDD_ENABLED && adapter->vfinfo) - ixgbe_check_mdd_event(adapter); - - for (vf = 0; vf < adapter->num_vfs; vf++) { - /* process any reset requests */ - if (!ixgbe_check_for_rst(hw, vf)) - ixgbe_vf_reset_event(adapter, vf); - - /* process any messages pending */ - if (!ixgbe_check_for_msg(hw, vf)) - ixgbe_rcv_msg_from_vf(adapter, vf); - - /* process any acks */ - if (!ixgbe_check_for_ack(hw, vf)) - ixgbe_rcv_ack_from_vf(adapter, vf); - } -} - -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - /* disable transmit and receive for all vfs */ - IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); - - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); -} - -#ifdef HAVE_NDO_SET_VF_TRUST -static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 ping; - - ping = IXGBE_PF_CONTROL_MSG; - if (adapter->vfinfo[vf].clear_to_send) - ping |= IXGBE_VT_MSGTYPE_CTS; - ixgbe_write_mbx(hw, &ping, 1, vf); -} - -#endif -void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 ping; - int i; - - for (i = 0 ; i < adapter->num_vfs; i++) { - ping = IXGBE_PF_CONTROL_MSG; - if (adapter->vfinfo[i].clear_to_send) - ping |= IXGBE_VT_MSGTYPE_CTS; - ixgbe_write_mbx(hw, &ping, 1, i); - } -} - -#ifdef HAVE_NDO_SET_VF_TRUST -int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (vf >= adapter->num_vfs) - return -EINVAL; - - /* nothing to do */ - if (adapter->vfinfo[vf].trusted == setting) - return 0; - - adapter->vfinfo[vf].trusted = setting; - - /* reset VF to reconfigure features */ - adapter->vfinfo[vf].clear_to_send = false; - ixgbe_ping_vf(adapter, vf); - - e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); - - return 0; -} - -#endif -#ifdef IFLA_VF_MAX -int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - s32 retval = 0; - - if (vf >= adapter->num_vfs) - return -EINVAL; - - if (is_valid_ether_addr(mac)) { - dev_info(pci_dev_to_dev(adapter->pdev), "setting MAC %pM on VF %d\n", - mac, vf); - dev_info(pci_dev_to_dev(adapter->pdev), "Reload the VF driver to make this change effective.\n"); - - retval = ixgbe_set_vf_mac(adapter, vf, mac); - if (retval >= 0) { - /* pf_set_mac is used in ESX5.1 and base driver but not in ESX5.5 */ - adapter->vfinfo[vf].pf_set_mac = true; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(pci_dev_to_dev(adapter->pdev), "The VF MAC address has been set, but the PF device is not up.\n"); - dev_warn(pci_dev_to_dev(adapter->pdev), "Bring the PF device up before attempting to use the VF device.\n"); - } - } else { - dev_warn(pci_dev_to_dev(adapter->pdev), "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); - } - } else if (is_zero_ether_addr(mac)) { - unsigned char *vf_mac_addr = - adapter->vfinfo[vf].vf_mac_addresses; - - /* nothing to do */ - if (is_zero_ether_addr(vf_mac_addr)) - return 0; - - dev_info(pci_dev_to_dev(adapter->pdev), "removing MAC on VF %d\n", - vf); - - retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf); - if (retval >= 0) { - adapter->vfinfo[vf].pf_set_mac = false; - memcpy(vf_mac_addr, mac, ETH_ALEN); - } else { - dev_warn(pci_dev_to_dev(adapter->pdev), "Could NOT remove the VF MAC address.\n"); - } - } else { - retval = -EINVAL; - } - return retval; -} - -static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, - int vf, u16 vlan, u8 qos) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err; - - err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); - if (err) - goto out; - - /* Revoke tagless access via VLAN 0 */ - ixgbe_set_vf_vlan(adapter, false, 0, vf); - - ixgbe_set_vmvir(adapter, vlan, qos, vf); - ixgbe_set_vmolr(hw, vf, false); - - /* enable hide vlan on X550 */ - if (hw->mac.type >= ixgbe_mac_X550) - ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | - IXGBE_QDE_HIDE_VLAN); - adapter->vfinfo[vf].pf_vlan = vlan; - adapter->vfinfo[vf].pf_qos = qos; - dev_info(pci_dev_to_dev(adapter->pdev), - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(pci_dev_to_dev(adapter->pdev), "The VF VLAN has been set, but the PF device is not up.\n"); - dev_warn(pci_dev_to_dev(adapter->pdev), "Bring the PF device up before attempting to use the VF device.\n"); - } - -out: - return err; -} - -static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err; - - err = ixgbe_set_vf_vlan(adapter, false, - adapter->vfinfo[vf].pf_vlan, vf); - /* Restore tagless access via VLAN 0 */ - ixgbe_set_vf_vlan(adapter, true, 0, vf); - ixgbe_clear_vmvir(adapter, vf); - ixgbe_set_vmolr(hw, vf, true); - - /* disable hide VLAN on X550 */ - if (hw->mac.type >= ixgbe_mac_X550) - ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); - adapter->vfinfo[vf].pf_vlan = 0; - adapter->vfinfo[vf].pf_qos = 0; - - return err; -} - -#ifdef IFLA_VF_MAX -#ifdef IFLA_VF_VLAN_INFO_MAX -int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, - u8 qos, __be16 vlan_proto) -#else -int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) -#endif -{ - int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - /* VLAN IDs accepted range 0-4094 */ - if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) - return -EINVAL; -#ifdef IFLA_VF_VLAN_INFO_MAX - if (vlan_proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; -#endif - if (vlan || qos) { - /* - * Check if there is already a port VLAN set, if so - * we have to delete the old one first before we - * can set the new one. The usage model had - * previously assumed the user would delete the - * old port VLAN before setting a new one but this - * is not necessarily the case. - */ - if (adapter->vfinfo[vf].pf_vlan) - err = ixgbe_disable_port_vlan(adapter, vf); - if (err) - goto out; - err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); - - } else { - err = ixgbe_disable_port_vlan(adapter, vf); - } -out: - return err; -} -#endif /* IFLA_VF_MAX */ - -static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) -{ - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_100_FULL: - return 100; - case IXGBE_LINK_SPEED_1GB_FULL: - return 1000; - case IXGBE_LINK_SPEED_10GB_FULL: - return 10000; - default: - return 0; - } -} - -static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) -{ - struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; - struct ixgbe_hw *hw = &adapter->hw; - u32 bcnrc_val = 0; - u16 queue, queues_per_pool; - u16 tx_rate = adapter->vfinfo[vf].tx_rate; - - if (tx_rate) { - /* start with base link speed value */ - bcnrc_val = adapter->vf_rate_link_speed; - - /* Calculate the rate factor values to set */ - bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; - bcnrc_val /= tx_rate; - - /* clear everything but the rate factor */ - bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | - IXGBE_RTTBCNRC_RF_DEC_MASK; - - /* enable the rate scheduler */ - bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; - } - - /* - * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM - * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported - * and 0x004 otherwise. - */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); - break; - case ixgbe_mac_X540: - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); - break; - default: - break; - } - - /* determine how many queues per pool based on VMDq mask */ - queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); - - /* write value for all Tx queues belonging to VF */ - for (queue = 0; queue < queues_per_pool; queue++) { - unsigned int reg_idx = (vf * queues_per_pool) + queue; - - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); - } -} - -void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) -{ - int i; - - /* VF Tx rate limit was not set */ - if (!adapter->vf_rate_link_speed) - return; - - if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { - adapter->vf_rate_link_speed = 0; - dev_info(pci_dev_to_dev(adapter->pdev), - "Link speed has been changed. VF Transmit rate is disabled\n"); - } - - for (i = 0; i < adapter->num_vfs; i++) { - if (!adapter->vf_rate_link_speed) - adapter->vfinfo[i].tx_rate = 0; - - ixgbe_set_vf_rate_limit(adapter, i); - } -} - -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, - int vf, - int __always_unused min_tx_rate, - int max_tx_rate) -#else -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) -#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int link_speed; - - /* verify VF is active */ - if (vf >= adapter->num_vfs) - return -EINVAL; - - /* verify link is up */ - if (!adapter->link_up) - return -EINVAL; - - /* verify we are linked at 10Gbps */ - link_speed = ixgbe_link_mbps(adapter); - if (link_speed != 10000) - return -EINVAL; - - /* rate limit cannot be less than 10Mbs or greater than link speed */ - if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) - return -EINVAL; - - /* store values */ - adapter->vf_rate_link_speed = link_speed; - adapter->vfinfo[vf].tx_rate = max_tx_rate; - - /* update hardware configuration */ - ixgbe_set_vf_rate_limit(adapter, vf); - - return 0; -} - -int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (vf >= adapter->num_vfs) - return -EINVAL; - - adapter->vfinfo[vf].spoofchk_enabled = setting; - - /* configure MAC spoofing */ - hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); - - /* configure VLAN spoofing */ - hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); - - /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be - * calling set_ethertype_anti_spoofing for each VF in loop below - */ - if (hw->mac.ops.set_ethertype_anti_spoofing) { - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), - (IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_TX_ANTISPOOF | - IXGBE_ETH_P_LLDP)); - - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), - (IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_TX_ANTISPOOF | - ETH_P_PAUSE)); - - hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); - } - return 0; -} - -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN -int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, - bool setting) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - /* This operation is currently supported only for 82599 and x540 - * devices. - */ - if (adapter->hw.mac.type < ixgbe_mac_82599EB || - adapter->hw.mac.type >= ixgbe_mac_X550) - return -EOPNOTSUPP; - - if (vf >= adapter->num_vfs) - return -EINVAL; - - adapter->vfinfo[vf].rss_query_enabled = setting; - - return 0; -} - -#endif -int ixgbe_ndo_get_vf_config(struct net_device *netdev, - int vf, struct ifla_vf_info *ivi) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (vf >= adapter->num_vfs) - return -EINVAL; - ivi->vf = vf; - memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); - -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE - ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; - ivi->min_tx_rate = 0; -#else - ivi->tx_rate = adapter->vfinfo[vf].tx_rate; -#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - - ivi->vlan = adapter->vfinfo[vf].pf_vlan; - ivi->qos = adapter->vfinfo[vf].pf_qos; -#ifdef HAVE_VF_SPOOFCHK_CONFIGURE - ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; -#endif -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN - ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; -#endif -#ifdef HAVE_NDO_SET_VF_TRUST - ivi->trusted = adapter->vfinfo[vf].trusted; -#endif - return 0; -} -#endif /* IFLA_VF_MAX */ - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h deleted file mode 100644 index 5d080ba60b02..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sriov.h +++ /dev/null @@ -1,92 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -#ifndef _IXGBE_SRIOV_H_ -#define _IXGBE_SRIOV_H_ - -/* ixgbe driver limit the max number of VFs could be enabled to - * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) - */ -#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) -#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VFS_DRV_LIMIT -#define IXGBE_MAX_VFS_4TC 31 -#define IXGBE_MAX_VFS_8TC 15 - -void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); -int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); -void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe); -void ixgbe_msg_task(struct ixgbe_adapter *adapter); -int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, - int vf, unsigned char *mac_addr); -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); -void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); -#ifdef IFLA_VF_MAX -int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); -#ifdef IFLA_VF_VLAN_INFO_MAX -int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, - u8 qos, __be16 vlan_proto); -#else -int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, - u8 qos); -#endif -#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, - int max_tx_rate); -#else -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); -#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ -#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN -int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, - bool setting); -#endif -#ifdef HAVE_NDO_SET_VF_TRUST -int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); -#endif -int ixgbe_ndo_get_vf_config(struct net_device *netdev, - int vf, struct ifla_vf_info *ivi); -#endif /* IFLA_VF_MAX */ -int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); -#ifdef CONFIG_PCI_IOV -int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); -int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); -#endif -int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); -#ifdef IFLA_VF_MAX -void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); -#endif /* IFLA_VF_MAX */ -void ixgbe_dump_registers(struct ixgbe_adapter *adapter); - -/* - * These are defined in ixgbe_type.h on behalf of the VF driver - * but we need them here unwrapped for the PF driver. - */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 -#define IXGBE_DEV_ID_X550_VF 0x1565 -#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 -#endif /* _IXGBE_SRIOV_H_ */ - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c deleted file mode 100644 index 5d30be5e891f..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_sysfs.c +++ /dev/null @@ -1,257 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include "ixgbe_common.h" -#include "ixgbe_type.h" - -#ifdef IXGBE_SYSFS - -#include -#include -#include -#include -#include -#include -#include -#ifdef IXGBE_HWMON -#include -#endif - -#ifdef IXGBE_HWMON -/* hwmon callback functions */ -static ssize_t ixgbe_hwmon_show_location(struct device __always_unused *dev, - struct device_attribute *attr, - char *buf) -{ - struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, - dev_attr); - return sprintf(buf, "loc%u\n", - ixgbe_attr->sensor->location); -} - -static ssize_t ixgbe_hwmon_show_temp(struct device __always_unused *dev, - struct device_attribute *attr, - char *buf) -{ - struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, - dev_attr); - unsigned int value; - - /* reset the temp field */ - ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw); - - value = ixgbe_attr->sensor->temp; - - /* display millidegree */ - value *= 1000; - - return sprintf(buf, "%u\n", value); -} - -static ssize_t ixgbe_hwmon_show_cautionthresh(struct device __always_unused *dev, - struct device_attribute *attr, - char *buf) -{ - struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, - dev_attr); - unsigned int value = ixgbe_attr->sensor->caution_thresh; - - /* display millidegree */ - value *= 1000; - - return sprintf(buf, "%u\n", value); -} - -static ssize_t ixgbe_hwmon_show_maxopthresh(struct device __always_unused *dev, - struct device_attribute *attr, - char *buf) -{ - struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, - dev_attr); - unsigned int value = ixgbe_attr->sensor->max_op_thresh; - - /* display millidegree */ - value *= 1000; - - return sprintf(buf, "%u\n", value); -} - -/** - * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. - * @adapter: pointer to the adapter structure - * @offset: offset in the eeprom sensor data table - * @type: type of sensor data to display - * - * For each file we want in hwmon's sysfs interface we need a device_attribute - * This is included in our hwmon_attr struct that contains the references to - * the data structures we need to get the data to display. - */ -static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, - unsigned int offset, int type) { - int rc; - unsigned int n_attr; - struct hwmon_attr *ixgbe_attr; - - n_attr = adapter->ixgbe_hwmon_buff.n_hwmon; - ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr]; - - switch (type) { - case IXGBE_HWMON_TYPE_LOC: - ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location; - snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), - "temp%u_label", offset); - break; - case IXGBE_HWMON_TYPE_TEMP: - ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp; - snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), - "temp%u_input", offset); - break; - case IXGBE_HWMON_TYPE_CAUTION: - ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh; - snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), - "temp%u_max", offset); - break; - case IXGBE_HWMON_TYPE_MAX: - ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh; - snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), - "temp%u_crit", offset); - break; - default: - rc = -EPERM; - return rc; - } - - /* These always the same regardless of type */ - ixgbe_attr->sensor = - &adapter->hw.mac.thermal_sensor_data.sensor[offset]; - ixgbe_attr->hw = &adapter->hw; - ixgbe_attr->dev_attr.store = NULL; - ixgbe_attr->dev_attr.attr.mode = S_IRUGO; - ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; - - rc = device_create_file(pci_dev_to_dev(adapter->pdev), - &ixgbe_attr->dev_attr); - - if (rc == 0) - ++adapter->ixgbe_hwmon_buff.n_hwmon; - - return rc; -} -#endif /* IXGBE_HWMON */ - -static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter __maybe_unused *adapter) -{ -#ifdef IXGBE_HWMON - int i; - - if (adapter == NULL) - return; - - for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) { - device_remove_file(pci_dev_to_dev(adapter->pdev), - &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr); - } - - kfree(adapter->ixgbe_hwmon_buff.hwmon_list); - - if (adapter->ixgbe_hwmon_buff.device) - hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device); -#endif /* IXGBE_HWMON */ -} - -/* called from ixgbe_main.c */ -void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) -{ - ixgbe_sysfs_del_adapter(adapter); -} - -/* called from ixgbe_main.c */ -int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) -{ - int rc = 0; -#ifdef IXGBE_HWMON - struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff; - unsigned int i; - int n_attrs; - -#endif /* IXGBE_HWMON */ - if (adapter == NULL) - goto err; - -#ifdef IXGBE_HWMON - /* If this method isn't defined we don't support thermals */ - if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { - goto no_thermal; - } - - /* Don't create thermal hwmon interface if no sensors present */ - if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) - goto no_thermal; - - /* - * Allocation space for max attributs - * max num sensors * values (loc, temp, max, caution) - */ - n_attrs = IXGBE_MAX_SENSORS * 4; - ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), - GFP_KERNEL); - if (!ixgbe_hwmon->hwmon_list) { - rc = -ENOMEM; - goto err; - } - - ixgbe_hwmon->device = hwmon_device_register(pci_dev_to_dev(adapter->pdev)); - if (IS_ERR(ixgbe_hwmon->device)) { - rc = PTR_ERR(ixgbe_hwmon->device); - goto err; - } - - for (i = 0; i < IXGBE_MAX_SENSORS; i++) { - /* - * Only create hwmon sysfs entries for sensors that have - * meaningful data for. - */ - if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) - continue; - - /* Bail if any hwmon attr struct fails to initialize */ - rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); - rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); - rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); - rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); - if (rc) - goto err; - } - -no_thermal: -#endif /* IXGBE_HWMON */ - goto exit; - -err: - ixgbe_sysfs_del_adapter(adapter); -exit: - return rc; -} -#endif /* IXGBE_SYSFS */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h deleted file mode 100644 index b5a5365d28c5..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_type.h +++ /dev/null @@ -1,4337 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_TYPE_H_ -#define _IXGBE_TYPE_H_ - -/* - * The following is a brief description of the error categories used by the - * ERROR_REPORT* macros. - * - * - IXGBE_ERROR_INVALID_STATE - * This category is for errors which represent a serious failure state that is - * unexpected, and could be potentially harmful to device operation. It should - * not be used for errors relating to issues that can be worked around or - * ignored. - * - * - IXGBE_ERROR_POLLING - * This category is for errors related to polling/timeout issues and should be - * used in any case where the timeout occured, or a failure to obtain a lock, or - * failure to receive data within the time limit. - * - * - IXGBE_ERROR_CAUTION - * This category should be used for reporting issues that may be the cause of - * other errors, such as temperature warnings. It should indicate an event which - * could be serious, but hasn't necessarily caused problems yet. - * - * - IXGBE_ERROR_SOFTWARE - * This category is intended for errors due to software state preventing - * something. The category is not intended for errors due to bad arguments, or - * due to unsupported features. It should be used when a state occurs which - * prevents action but is not a serious issue. - * - * - IXGBE_ERROR_ARGUMENT - * This category is for when a bad or invalid argument is passed. It should be - * used whenever a function is called and error checking has detected the - * argument is wrong or incorrect. - * - * - IXGBE_ERROR_UNSUPPORTED - * This category is for errors which are due to unsupported circumstances or - * configuration issues. It should not be used when the issue is due to an - * invalid argument, but for when something has occurred that is unsupported - * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) - */ - -#include "ixgbe_osdep.h" - -/* Override this by setting IOMEM in your ixgbe_osdep.h header */ -#ifndef IOMEM -#define IOMEM -#endif - -/* Vendor ID */ -#define IXGBE_INTEL_VENDOR_ID 0x8086 - -/* Device IDs */ -#define IXGBE_DEV_ID_82598 0x10B6 -#define IXGBE_DEV_ID_82598_BX 0x1508 -#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 -#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 -#define IXGBE_DEV_ID_82598AT 0x10C8 -#define IXGBE_DEV_ID_82598AT2 0x150B -#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB -#define IXGBE_DEV_ID_82598EB_CX4 0x10DD -#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC -#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 -#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 -#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 -#define IXGBE_DEV_ID_82599_KX4 0x10F7 -#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 -#define IXGBE_DEV_ID_82599_KR 0x1517 -#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 -#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C -#define IXGBE_DEV_ID_82599_CX4 0x10F9 -#define IXGBE_DEV_ID_82599_SFP 0x10FB -#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 -#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 -#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 -#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 -#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 -#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B -#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 -#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D -#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 -#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 -#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE -#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A -#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 -#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 -#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D -#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A -#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 -#define IXGBE_DEV_ID_82599EN_SFP 0x1557 -#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 -#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC -#define IXGBE_DEV_ID_82599_T3_LOM 0x151C -#define IXGBE_DEV_ID_82599_LS 0x154F -#define IXGBE_DEV_ID_X540T 0x1528 -#define IXGBE_DEV_ID_X540T1 0x1560 -#define IXGBE_DEV_ID_X550T 0x1563 -#define IXGBE_DEV_ID_X550T1 0x15D1 -/* Placeholder value, pending official value. */ -#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 -#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 -#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 -#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 -#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 -#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 -#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA -#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC -#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE -#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 -#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 -#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA -#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB -#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC -#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD -#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE -#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 - -#define IXGBE_CAT(r,m) IXGBE_##r##m - -#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)]) - -/* General Registers */ -#define IXGBE_CTRL 0x00000 -#define IXGBE_STATUS 0x00008 -#define IXGBE_CTRL_EXT 0x00018 -#define IXGBE_ESDP 0x00020 -#define IXGBE_EODSDP 0x00028 -#define IXGBE_I2CCTL_82599 0x00028 -#define IXGBE_I2CCTL IXGBE_I2CCTL_82599 -#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599 -#define IXGBE_I2CCTL_X550 0x15F5C -#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 -#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 -#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL) -#define IXGBE_PHY_GPIO 0x00028 -#define IXGBE_MAC_GPIO 0x00030 -#define IXGBE_PHYINT_STATUS0 0x00100 -#define IXGBE_PHYINT_STATUS1 0x00104 -#define IXGBE_PHYINT_STATUS2 0x00108 -#define IXGBE_LEDCTL 0x00200 -#define IXGBE_FRTIMER 0x00048 -#define IXGBE_TCPTIMER 0x0004C -#define IXGBE_CORESPARE 0x00600 -#define IXGBE_EXVET 0x05078 - -/* NVM Registers */ -#define IXGBE_EEC 0x10010 -#define IXGBE_EEC_X540 IXGBE_EEC -#define IXGBE_EEC_X550 IXGBE_EEC -#define IXGBE_EEC_X550EM_x IXGBE_EEC -#define IXGBE_EEC_X550EM_a 0x15FF8 -#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC) - -#define IXGBE_EERD 0x10014 -#define IXGBE_EEWR 0x10018 - -#define IXGBE_FLA 0x1001C -#define IXGBE_FLA_X540 IXGBE_FLA -#define IXGBE_FLA_X550 IXGBE_FLA -#define IXGBE_FLA_X550EM_x IXGBE_FLA -#define IXGBE_FLA_X550EM_a 0x15F68 -#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA) - -#define IXGBE_EEMNGCTL 0x10110 -#define IXGBE_EEMNGDATA 0x10114 -#define IXGBE_FLMNGCTL 0x10118 -#define IXGBE_FLMNGDATA 0x1011C -#define IXGBE_FLMNGCNT 0x10120 -#define IXGBE_FLOP 0x1013C - -#define IXGBE_GRC 0x10200 -#define IXGBE_GRC_X540 IXGBE_GRC -#define IXGBE_GRC_X550 IXGBE_GRC -#define IXGBE_GRC_X550EM_x IXGBE_GRC -#define IXGBE_GRC_X550EM_a 0x15F64 -#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC) - -#define IXGBE_SRAMREL 0x10210 -#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL -#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL -#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL -#define IXGBE_SRAMREL_X550EM_a 0x15F6C -#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL) - -#define IXGBE_PHYDBG 0x10218 - -/* General Receive Control */ -#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ -#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ - -#define IXGBE_VPDDIAG0 0x10204 -#define IXGBE_VPDDIAG1 0x10208 - -/* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN 0x00000001 -#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN -#define IXGBE_I2C_CLK_IN_X550 0x00004000 -#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 -#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 -#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) - -#define IXGBE_I2C_CLK_OUT 0x00000002 -#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT -#define IXGBE_I2C_CLK_OUT_X550 0x00000200 -#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 -#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 -#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) - -#define IXGBE_I2C_DATA_IN 0x00000004 -#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN -#define IXGBE_I2C_DATA_IN_X550 0x00001000 -#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 -#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 -#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) - -#define IXGBE_I2C_DATA_OUT 0x00000008 -#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT -#define IXGBE_I2C_DATA_OUT_X550 0x00000400 -#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 -#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 -#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) - -#define IXGBE_I2C_DATA_OE_N_EN 0 -#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN -#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 -#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 -#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 -#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) - -#define IXGBE_I2C_BB_EN 0 -#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN -#define IXGBE_I2C_BB_EN_X550 0x00000100 -#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 -#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 - -#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) - -#define IXGBE_I2C_CLK_OE_N_EN 0 -#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN -#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 -#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 -#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 -#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) -#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 - -#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 -#define IXGBE_EMC_INTERNAL_DATA 0x00 -#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 -#define IXGBE_EMC_DIODE1_DATA 0x01 -#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 -#define IXGBE_EMC_DIODE2_DATA 0x23 -#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A - -#define IXGBE_MAX_SENSORS 3 - -struct ixgbe_thermal_diode_data { - u8 location; - u8 temp; - u8 caution_thresh; - u8 max_op_thresh; -}; - -struct ixgbe_thermal_sensor_data { - struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; -}; - -#define NVM_OROM_OFFSET 0x17 -#define NVM_OROM_BLK_LOW 0x83 -#define NVM_OROM_BLK_HI 0x84 -#define NVM_OROM_PATCH_MASK 0xFF -#define NVM_OROM_SHIFT 8 - -#define NVM_VER_MASK 0x00FF /* version mask */ -#define NVM_VER_SHIFT 8 /* version bit shift */ -#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ -#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ -#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ -#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ -#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ -#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ -#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ -#define NVM_ETK_OFF_HI 0x2E /* version high order word */ -#define NVM_ETK_SHIFT 16 /* high version word shift */ -#define NVM_VER_INVALID 0xFFFF -#define NVM_ETK_VALID 0x8000 -#define NVM_INVALID_PTR 0xFFFF -#define NVM_VER_SIZE 32 /* version sting size */ - -struct ixgbe_nvm_version { - u32 etk_id; - u8 nvm_major; - u16 nvm_minor; - u8 nvm_id; - - bool oem_valid; - u8 oem_major; - u8 oem_minor; - u16 oem_release; - - bool or_valid; - u8 or_major; - u16 or_build; - u8 or_patch; - -}; - -/* Interrupt Registers */ -#define IXGBE_EICR 0x00800 -#define IXGBE_EICS 0x00808 -#define IXGBE_EIMS 0x00880 -#define IXGBE_EIMC 0x00888 -#define IXGBE_EIAC 0x00810 -#define IXGBE_EIAM 0x00890 -#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) -#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) -#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) -#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) -/* 82599 EITR is only 12 bits, with the lower 3 always zero */ -/* - * 82598 EITR is 16 bits but set the limits based on the max - * supported by all ixgbe hardware - */ -#define IXGBE_MAX_INT_RATE 488281 -#define IXGBE_MIN_INT_RATE 956 -#define IXGBE_MAX_EITR 0x00000FF8 -#define IXGBE_MIN_EITR 8 -#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ - (0x012300 + (((_i) - 24) * 4))) -#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 -#define IXGBE_EITR_LLI_MOD 0x00008000 -#define IXGBE_EITR_CNT_WDIS 0x80000000 -#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ -#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ -#define IXGBE_EITRSEL 0x00894 -#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ -#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ -#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) -#define IXGBE_GPIE 0x00898 - -/* Flow Control Registers */ -#define IXGBE_FCADBUL 0x03210 -#define IXGBE_FCADBUH 0x03214 -#define IXGBE_FCAMACL 0x04328 -#define IXGBE_FCAMACH 0x0432C -#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_PFCTOP 0x03008 -#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ -#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ -#define IXGBE_FCRTV 0x032A0 -#define IXGBE_FCCFG 0x03D00 -#define IXGBE_TFCS 0x0CE00 - -/* Receive DMA Registers */ -#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ - (0x0D000 + (((_i) - 64) * 0x40))) -#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ - (0x0D004 + (((_i) - 64) * 0x40))) -#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ - (0x0D008 + (((_i) - 64) * 0x40))) -#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ - (0x0D010 + (((_i) - 64) * 0x40))) -#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ - (0x0D018 + (((_i) - 64) * 0x40))) -#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ - (0x0D028 + (((_i) - 64) * 0x40))) -#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ - (0x0D02C + (((_i) - 64) * 0x40))) -#define IXGBE_RSCDBU 0x03028 -#define IXGBE_RDDCC 0x02F20 -#define IXGBE_RXMEMWRAP 0x03190 -#define IXGBE_STARCTRL 0x03024 -/* - * Split and Replication Receive Control Registers - * 00-15 : 0x02100 + n*4 - * 16-64 : 0x01014 + n*0x40 - * 64-127: 0x0D014 + (n-64)*0x40 - */ -#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ - (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ - (0x0D014 + (((_i) - 64) * 0x40)))) -/* - * Rx DCA Control Register: - * 00-15 : 0x02200 + n*4 - * 16-64 : 0x0100C + n*0x40 - * 64-127: 0x0D00C + (n-64)*0x40 - */ -#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ - (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ - (0x0D00C + (((_i) - 64) * 0x40)))) -#define IXGBE_RDRXCTL 0x02F00 -/* 8 of these 0x03C00 - 0x03C1C */ -#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) -#define IXGBE_RXCTRL 0x03000 -#define IXGBE_DROPEN 0x03D04 -#define IXGBE_RXPBSIZE_SHIFT 10 -#define IXGBE_RXPBSIZE_MASK 0x000FFC00 - -/* Receive Registers */ -#define IXGBE_RXCSUM 0x05000 -#define IXGBE_RFCTL 0x05008 -#define IXGBE_DRECCCTL 0x02F08 -#define IXGBE_DRECCCTL_DISABLE 0 -#define IXGBE_DRECCCTL2 0x02F8C - -/* Multicast Table Array - 128 entries */ -#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) -#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x0A200 + ((_i) * 8))) -#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x0A204 + ((_i) * 8))) -#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) -#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) -/* Packet split receive type */ -#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ - (0x0EA00 + ((_i) * 4))) -/* array of 4096 1-bit vlan filters */ -#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) -/*array of 4096 4-bit vlan vmdq indices */ -#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) -#define IXGBE_FCTRL 0x05080 -#define IXGBE_VLNCTRL 0x05088 -#define IXGBE_MCSTCTRL 0x05090 -#define IXGBE_MRQC 0x05818 -#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ -#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ -#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ -#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ -#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ -#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ -#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ -#define IXGBE_RQTC 0x0EC70 -#define IXGBE_MTQC 0x08120 -#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ -#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ -#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ -#define IXGBE_PFFLPL 0x050B0 -#define IXGBE_PFFLPH 0x050B4 -#define IXGBE_VT_CTL 0x051B0 -#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ -/* 64 Mailboxes, 16 DW each */ -#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) -#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ -#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ -#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) -#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) -#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) -#define IXGBE_QDE 0x2F04 -#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ -#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ -#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) -#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) -#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) -#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) -#define IXGBE_LVMMC_RX 0x2FA8 -#define IXGBE_LVMMC_TX 0x8108 -#define IXGBE_LMVM_RX 0x2FA4 -#define IXGBE_LMVM_TX 0x8124 -#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ -#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ -#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ -#define IXGBE_RXFECCERR0 0x051B8 -#define IXGBE_LLITHRESH 0x0EC90 -#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_IMIRVP 0x05AC0 -#define IXGBE_VMD_CTL 0x0581C -#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ -#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ -#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ - -/* Registers for setting up RSS on X550 with SRIOV - * _p - pool number (0..63) - * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) - */ -#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) -#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) -#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) - -/* Flow Director registers */ -#define IXGBE_FDIRCTRL 0x0EE00 -#define IXGBE_FDIRHKEY 0x0EE68 -#define IXGBE_FDIRSKEY 0x0EE6C -#define IXGBE_FDIRDIP4M 0x0EE3C -#define IXGBE_FDIRSIP4M 0x0EE40 -#define IXGBE_FDIRTCPM 0x0EE44 -#define IXGBE_FDIRUDPM 0x0EE48 -#define IXGBE_FDIRSCTPM 0x0EE78 -#define IXGBE_FDIRIP6M 0x0EE74 -#define IXGBE_FDIRM 0x0EE70 - -/* Flow Director Stats registers */ -#define IXGBE_FDIRFREE 0x0EE38 -#define IXGBE_FDIRLEN 0x0EE4C -#define IXGBE_FDIRUSTAT 0x0EE50 -#define IXGBE_FDIRFSTAT 0x0EE54 -#define IXGBE_FDIRMATCH 0x0EE58 -#define IXGBE_FDIRMISS 0x0EE5C - -/* Flow Director Programming registers */ -#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ -#define IXGBE_FDIRIPSA 0x0EE18 -#define IXGBE_FDIRIPDA 0x0EE1C -#define IXGBE_FDIRPORT 0x0EE20 -#define IXGBE_FDIRVLAN 0x0EE24 -#define IXGBE_FDIRHASH 0x0EE28 -#define IXGBE_FDIRCMD 0x0EE2C - -/* Transmit DMA registers */ -#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/ -#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) -#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) -#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) -#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) -#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) -#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) -#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) -#define IXGBE_DTXCTL 0x07E00 - -#define IXGBE_DMATXCTL 0x04A80 -#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ -#define IXGBE_PFDTXGSWC 0x08220 -#define IXGBE_DTXMXSZRQ 0x08100 -#define IXGBE_DTXTCPFLGL 0x04A88 -#define IXGBE_DTXTCPFLGH 0x04A8C -#define IXGBE_LBDRPEN 0x0CA00 -#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ - -#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ -#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ -#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ -#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ -#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ -#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ - -#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ - -/* Anti-spoofing defines */ -#define IXGBE_SPOOF_MACAS_MASK 0xFF -#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 -#define IXGBE_SPOOF_VLANAS_SHIFT 8 -#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 -#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 -#define IXGBE_PFVFSPOOF_REG_COUNT 8 -/* 16 of these (0-15) */ -#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) -/* Tx DCA Control register : 128 of these (0-127) */ -#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) -#define IXGBE_TIPG 0x0CB00 -#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_MNGTXMAP 0x0CD10 -#define IXGBE_TIPG_FIBER_DEFAULT 3 -#define IXGBE_TXPBSIZE_SHIFT 10 - -/* Wake up registers */ -#define IXGBE_WUC 0x05800 -#define IXGBE_WUFC 0x05808 -#define IXGBE_WUS 0x05810 -#define IXGBE_IPAV 0x05838 -#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ -#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ - -#define IXGBE_WUPL 0x05900 -#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ -#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */ -#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */ -#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ - -/* masks for accessing VXLAN and GENEVE UDP ports */ -#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ -#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ -#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ - -#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 - -#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ -/* Ext Flexible Host Filter Table */ -#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) -#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100)) - -/* Four Flexible Filters are supported */ -#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 - -/* Six Flexible Filters are supported */ -#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6 -/* Eight Flexible Filters are supported */ -#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8 -#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 - -/* Each Flexible Filter is at most 128 (0x80) bytes in length */ -#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 -#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ -#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ - -/* Definitions for power management and wakeup registers */ -/* Wake Up Control */ -#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ -#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ -#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ - -/* Wake Up Filter Control */ -#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ -#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ -#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ -#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ - -#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ -#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ -#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ -#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ -#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ -#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ -#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ -#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ -#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */ -#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */ -#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */ -/* Mask for Ext. flex filters */ -#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 -#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */ -#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */ -#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */ -#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ - -/* Wake Up Status */ -#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC -#define IXGBE_WUS_MAG IXGBE_WUFC_MAG -#define IXGBE_WUS_EX IXGBE_WUFC_EX -#define IXGBE_WUS_MC IXGBE_WUFC_MC -#define IXGBE_WUS_BC IXGBE_WUFC_BC -#define IXGBE_WUS_ARP IXGBE_WUFC_ARP -#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 -#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 -#define IXGBE_WUS_MNG IXGBE_WUFC_MNG -#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 -#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 -#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 -#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 -#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 -#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 -#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS -#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK -/* Proxy Status */ -#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */ -#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */ -#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */ -#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */ -#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */ -#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */ - -/* Proxying Filter Control */ -#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */ -#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */ -#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */ -#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ -#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */ -#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */ -#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */ - -#define IXGBE_WUPL_LENGTH_MASK 0xFFFF - -/* DCB registers */ -#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8 -#define IXGBE_RMCS 0x03D00 -#define IXGBE_DPMCS 0x07F40 -#define IXGBE_PDPMCS 0x0CD00 -#define IXGBE_RUPPBMR 0x050A0 -#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ -#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ -#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ - -/* Power Management */ -/* DMA Coalescing configuration */ -struct ixgbe_dmac_config { - u16 watchdog_timer; /* usec units */ - bool fcoe_en; - u32 link_speed; - u8 fcoe_tc; - u8 num_tcs; -}; - -/* - * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed. - * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 == - * 87500 bytes [85KB] - */ -#define IXGBE_DMACRXT_10G 0x55 -#define IXGBE_DMACRXT_1G 0x09 -#define IXGBE_DMACRXT_100M 0x01 - -/* DMA Coalescing registers */ -#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */ -#define IXGBE_DMACR 0x02400 /* Control register */ -#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */ -/* DMA Coalescing register fields */ -#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */ -#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */ -#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */ -#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000 -#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16 -#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */ -#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */ -#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */ -#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */ -#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */ - -/* EEE registers */ -#define IXGBE_EEER 0x043A0 /* EEE register */ -#define IXGBE_EEE_STAT 0x04398 /* EEE Status */ -#define IXGBE_EEE_SU 0x04380 /* EEE Set up */ -#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26 -#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */ -#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */ - -/* EEE register fields */ -#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */ -#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */ -#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */ -#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */ -#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */ - -/* Security Control Registers */ -#define IXGBE_SECTXCTRL 0x08800 -#define IXGBE_SECTXSTAT 0x08804 -#define IXGBE_SECTXBUFFAF 0x08808 -#define IXGBE_SECTXMINIFG 0x08810 -#define IXGBE_SECRXCTRL 0x08D00 -#define IXGBE_SECRXSTAT 0x08D04 - -/* Security Bit Fields and Masks */ -#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 -#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 -#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 - -#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 -#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 - -#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 -#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 - -#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 -#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 - -/* LinkSec (MacSec) Registers */ -#define IXGBE_LSECTXCAP 0x08A00 -#define IXGBE_LSECRXCAP 0x08F00 -#define IXGBE_LSECTXCTRL 0x08A04 -#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ -#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ -#define IXGBE_LSECTXSA 0x08A10 -#define IXGBE_LSECTXPN0 0x08A14 -#define IXGBE_LSECTXPN1 0x08A18 -#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ -#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ -#define IXGBE_LSECRXCTRL 0x08F04 -#define IXGBE_LSECRXSCL 0x08F08 -#define IXGBE_LSECRXSCH 0x08F0C -#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ -#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ -#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) -#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ -#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ -#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ -#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ -#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ -#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ -#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ -#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ -#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ -#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ -#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ -#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ -#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ -#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ -#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ -#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ -#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ -#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ -#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ - -/* LinkSec (MacSec) Bit Fields and Masks */ -#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 -#define IXGBE_LSECTXCAP_SUM_SHIFT 16 -#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 -#define IXGBE_LSECRXCAP_SUM_SHIFT 16 - -#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 -#define IXGBE_LSECTXCTRL_DISABLE 0x0 -#define IXGBE_LSECTXCTRL_AUTH 0x1 -#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 -#define IXGBE_LSECTXCTRL_AISCI 0x00000020 -#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 -#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 - -#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C -#define IXGBE_LSECRXCTRL_EN_SHIFT 2 -#define IXGBE_LSECRXCTRL_DISABLE 0x0 -#define IXGBE_LSECRXCTRL_CHECK 0x1 -#define IXGBE_LSECRXCTRL_STRICT 0x2 -#define IXGBE_LSECRXCTRL_DROP 0x3 -#define IXGBE_LSECRXCTRL_PLSH 0x00000040 -#define IXGBE_LSECRXCTRL_RP 0x00000080 -#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 - -/* IpSec Registers */ -#define IXGBE_IPSTXIDX 0x08900 -#define IXGBE_IPSTXSALT 0x08904 -#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXIDX 0x08E00 -#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXSPI 0x08E14 -#define IXGBE_IPSRXIPIDX 0x08E18 -#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXSALT 0x08E2C -#define IXGBE_IPSRXMOD 0x08E30 - -#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 - -/* DCB registers */ -#define IXGBE_RTRPCS 0x02430 -#define IXGBE_RTTDCS 0x04900 -#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_RTTPCS 0x0CD00 -#define IXGBE_RTRUP2TC 0x03020 -#define IXGBE_RTTUP2TC 0x0C800 -#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDQSEL 0x04904 -#define IXGBE_RTTDT1C 0x04908 -#define IXGBE_RTTDT1S 0x0490C -#define IXGBE_RTTDTECC 0x04990 -#define IXGBE_RTTDTECC_NO_BCN 0x00000100 - -#define IXGBE_RTTBCNRC 0x04984 -#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 -#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF -#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 -#define IXGBE_RTTBCNRC_RF_INT_MASK \ - (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) -#define IXGBE_RTTBCNRM 0x04980 - -/* FCoE DMA Context Registers */ -/* FCoE Direct DMA Context */ -#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) -#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ -#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ -#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ -#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ -#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ -#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ -#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ -#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ -#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ -#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 -#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 -#define IXGBE_FCBUFF_OFFSET_SHIFT 16 -#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ -#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ -#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ -#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ -#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 -/* FCoE SOF/EOF */ -#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ -#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ -#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ -#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ -/* FCoE Filter Context Registers */ -#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */ -#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */ -#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16 -/* FCoE Direct Filter Context */ -#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) -#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) -#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ -#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ -#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ -#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ -#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ -#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ -#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ -#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ -#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ -#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ -/* FCoE Receive Control */ -#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ -#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ -#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ -#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ -#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ -#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ -#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ -#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ -#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ -#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ -#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 -/* FCoE Redirection */ -#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ -#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ -#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ -#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ -#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ -#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ -#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ -#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ -/* Higher 7 bits for the queue index */ -#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 -#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 - -/* Stats registers */ -#define IXGBE_CRCERRS 0x04000 -#define IXGBE_ILLERRC 0x04004 -#define IXGBE_ERRBC 0x04008 -#define IXGBE_MSPDC 0x04010 -#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ -#define IXGBE_MLFC 0x04034 -#define IXGBE_MRFC 0x04038 -#define IXGBE_RLEC 0x04040 -#define IXGBE_LXONTXC 0x03F60 -#define IXGBE_LXONRXC 0x0CF60 -#define IXGBE_LXOFFTXC 0x03F68 -#define IXGBE_LXOFFRXC 0x0CF68 -#define IXGBE_LXONRXCNT 0x041A4 -#define IXGBE_LXOFFRXCNT 0x041A8 -#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ -#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ -#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ -#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ -#define IXGBE_PRC64 0x0405C -#define IXGBE_PRC127 0x04060 -#define IXGBE_PRC255 0x04064 -#define IXGBE_PRC511 0x04068 -#define IXGBE_PRC1023 0x0406C -#define IXGBE_PRC1522 0x04070 -#define IXGBE_GPRC 0x04074 -#define IXGBE_BPRC 0x04078 -#define IXGBE_MPRC 0x0407C -#define IXGBE_GPTC 0x04080 -#define IXGBE_GORCL 0x04088 -#define IXGBE_GORCH 0x0408C -#define IXGBE_GOTCL 0x04090 -#define IXGBE_GOTCH 0x04094 -#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ -#define IXGBE_RUC 0x040A4 -#define IXGBE_RFC 0x040A8 -#define IXGBE_ROC 0x040AC -#define IXGBE_RJC 0x040B0 -#define IXGBE_MNGPRC 0x040B4 -#define IXGBE_MNGPDC 0x040B8 -#define IXGBE_MNGPTC 0x0CF90 -#define IXGBE_TORL 0x040C0 -#define IXGBE_TORH 0x040C4 -#define IXGBE_TPR 0x040D0 -#define IXGBE_TPT 0x040D4 -#define IXGBE_PTC64 0x040D8 -#define IXGBE_PTC127 0x040DC -#define IXGBE_PTC255 0x040E0 -#define IXGBE_PTC511 0x040E4 -#define IXGBE_PTC1023 0x040E8 -#define IXGBE_PTC1522 0x040EC -#define IXGBE_MPTC 0x040F0 -#define IXGBE_BPTC 0x040F4 -#define IXGBE_XEC 0x04120 -#define IXGBE_SSVPC 0x08780 - -#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) -#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ - (0x08600 + ((_i) * 4))) -#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) - -#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ -#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ -#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */ -#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ -#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ -#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ -#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ -#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ -#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ -#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ -#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ -#define IXGBE_O2BGPTC 0x041C4 -#define IXGBE_O2BSPC 0x087B0 -#define IXGBE_B2OSPC 0x041C0 -#define IXGBE_B2OGPRC 0x02F90 -#define IXGBE_BUPRC 0x04180 -#define IXGBE_BMPRC 0x04184 -#define IXGBE_BBPRC 0x04188 -#define IXGBE_BUPTC 0x0418C -#define IXGBE_BMPTC 0x04190 -#define IXGBE_BBPTC 0x04194 -#define IXGBE_BCRCERRS 0x04198 -#define IXGBE_BXONRXC 0x0419C -#define IXGBE_BXOFFRXC 0x041E0 -#define IXGBE_BXONTXC 0x041E4 -#define IXGBE_BXOFFTXC 0x041E8 - -/* Management */ -#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MANC 0x05820 -#define IXGBE_MFVAL 0x05824 -#define IXGBE_MANC2H 0x05860 -#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MIPAF 0x058B0 -#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ -#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ -#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ -#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_LSWFW 0x15F14 -#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */ -#define IXGBE_BMCIPVAL 0x05060 -#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001 -#define IXGBE_BMCIP_IPADDR_VALID 0x00000002 - -/* Management Bit Fields and Masks */ -#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */ -#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ -#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */ -#define IXGBE_MANC_EN_BMC2OS_SHIFT 28 - -/* Firmware Semaphore Register */ -#define IXGBE_FWSM_MODE_MASK 0xE -#define IXGBE_FWSM_TS_ENABLED 0x1 -#define IXGBE_FWSM_FW_MODE_PT 0x4 - -/* ARC Subsystem registers */ -#define IXGBE_HICR 0x15F00 -#define IXGBE_FWSTS 0x15F0C -#define IXGBE_HSMC0R 0x15F04 -#define IXGBE_HSMC1R 0x15F08 -#define IXGBE_SWSR 0x15F10 -#define IXGBE_HFDR 0x15FE8 -#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ - -#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ -/* Driver sets this bit when done to put command in RAM */ -#define IXGBE_HICR_C 0x02 -#define IXGBE_HICR_SV 0x04 /* Status Validity */ -#define IXGBE_HICR_FW_RESET_ENABLE 0x40 -#define IXGBE_HICR_FW_RESET 0x80 - -/* PCI-E registers */ -#define IXGBE_GCR 0x11000 -#define IXGBE_GTV 0x11004 -#define IXGBE_FUNCTAG 0x11008 -#define IXGBE_GLT 0x1100C -#define IXGBE_PCIEPIPEADR 0x11004 -#define IXGBE_PCIEPIPEDAT 0x11008 -#define IXGBE_GSCL_1 0x11010 -#define IXGBE_GSCL_2 0x11014 -#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1 -#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2 -#define IXGBE_GSCL_3 0x11018 -#define IXGBE_GSCL_4 0x1101C -#define IXGBE_GSCN_0 0x11020 -#define IXGBE_GSCN_1 0x11024 -#define IXGBE_GSCN_2 0x11028 -#define IXGBE_GSCN_3 0x1102C -#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0 -#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1 -#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2 -#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3 -#define IXGBE_FACTPS 0x10150 -#define IXGBE_FACTPS_X540 IXGBE_FACTPS -#define IXGBE_GSCL_1_X550 0x11800 -#define IXGBE_GSCL_2_X550 0x11804 -#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550 -#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550 -#define IXGBE_GSCN_0_X550 0x11820 -#define IXGBE_GSCN_1_X550 0x11824 -#define IXGBE_GSCN_2_X550 0x11828 -#define IXGBE_GSCN_3_X550 0x1182C -#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550 -#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550 -#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550 -#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550 -#define IXGBE_FACTPS_X550 IXGBE_FACTPS -#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS -#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550 -#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550 -#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550 -#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550 -#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550 -#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550 -#define IXGBE_FACTPS_X550EM_a 0x15FEC -#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS) - -#define IXGBE_PCIEANACTL 0x11040 -#define IXGBE_SWSM 0x10140 -#define IXGBE_SWSM_X540 IXGBE_SWSM -#define IXGBE_SWSM_X550 IXGBE_SWSM -#define IXGBE_SWSM_X550EM_x IXGBE_SWSM -#define IXGBE_SWSM_X550EM_a 0x15F70 -#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM) - -#define IXGBE_FWSM 0x10148 -#define IXGBE_FWSM_X540 IXGBE_FWSM -#define IXGBE_FWSM_X550 IXGBE_FWSM -#define IXGBE_FWSM_X550EM_x IXGBE_FWSM -#define IXGBE_FWSM_X550EM_a 0x15F74 -#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM) - -#define IXGBE_SWFW_SYNC IXGBE_GSSR -#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC -#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC -#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC -#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 -#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) - -#define IXGBE_GSSR 0x10160 -#define IXGBE_MREVID 0x11064 -#define IXGBE_DCA_ID 0x11070 -#define IXGBE_DCA_CTRL 0x11074 - -/* PCI-E registers 82599-Specific */ -#define IXGBE_GCR_EXT 0x11050 -#define IXGBE_GSCL_5_82599 0x11030 -#define IXGBE_GSCL_6_82599 0x11034 -#define IXGBE_GSCL_7_82599 0x11038 -#define IXGBE_GSCL_8_82599 0x1103C -#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599 -#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599 -#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599 -#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599 -#define IXGBE_PHYADR_82599 0x11040 -#define IXGBE_PHYDAT_82599 0x11044 -#define IXGBE_PHYCTL_82599 0x11048 -#define IXGBE_PBACLR_82599 0x11068 -#define IXGBE_CIAA 0x11088 -#define IXGBE_CIAD 0x1108C -#define IXGBE_CIAA_82599 IXGBE_CIAA -#define IXGBE_CIAD_82599 IXGBE_CIAD -#define IXGBE_CIAA_X540 IXGBE_CIAA -#define IXGBE_CIAD_X540 IXGBE_CIAD -#define IXGBE_GSCL_5_X550 0x11810 -#define IXGBE_GSCL_6_X550 0x11814 -#define IXGBE_GSCL_7_X550 0x11818 -#define IXGBE_GSCL_8_X550 0x1181C -#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550 -#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550 -#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550 -#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550 -#define IXGBE_CIAA_X550 0x11508 -#define IXGBE_CIAD_X550 0x11510 -#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 -#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 -#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550 -#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550 -#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550 -#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550 -#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 -#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 -#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) -#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD) -#define IXGBE_PICAUSE 0x110B0 -#define IXGBE_PIENA 0x110B8 -#define IXGBE_CDQ_MBR_82599 0x110B4 -#define IXGBE_PCIESPARE 0x110BC -#define IXGBE_MISC_REG_82599 0x110F0 -#define IXGBE_ECC_CTRL_0_82599 0x11100 -#define IXGBE_ECC_CTRL_1_82599 0x11104 -#define IXGBE_ECC_STATUS_82599 0x110E0 -#define IXGBE_BAR_CTRL_82599 0x110F4 - -/* PCI Express Control */ -#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 -#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 -#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 -#define IXGBE_GCR_CAP_VER2 0x00040000 - -#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 -#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 -#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 -#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 -#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 -#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ - IXGBE_GCR_EXT_VT_MODE_64) -#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003 -/* Time Sync Registers */ -#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ -#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ -#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ -#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ -#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ -#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ -#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ -#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ -#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ -#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ -#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ -#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ -#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ -#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ -#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ -#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ -#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ -#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ -#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ -#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ -#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ -#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ -#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ -#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ -#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ -#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ -#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ -#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ -#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ -#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */ -#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ - -/* Diagnostic Registers */ -#define IXGBE_RDSTATCTL 0x02C20 -#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ -#define IXGBE_RDHMPN 0x02F08 -#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) -#define IXGBE_RDPROBE 0x02F20 -#define IXGBE_RDMAM 0x02F30 -#define IXGBE_RDMAD 0x02F34 -#define IXGBE_TDHMPN 0x07F08 -#define IXGBE_TDHMPN2 0x082FC -#define IXGBE_TXDESCIC 0x082CC -#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) -#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) -#define IXGBE_TDPROBE 0x07F20 -#define IXGBE_TXBUFCTRL 0x0C600 -#define IXGBE_TXBUFDATA0 0x0C610 -#define IXGBE_TXBUFDATA1 0x0C614 -#define IXGBE_TXBUFDATA2 0x0C618 -#define IXGBE_TXBUFDATA3 0x0C61C -#define IXGBE_RXBUFCTRL 0x03600 -#define IXGBE_RXBUFDATA0 0x03610 -#define IXGBE_RXBUFDATA1 0x03614 -#define IXGBE_RXBUFDATA2 0x03618 -#define IXGBE_RXBUFDATA3 0x0361C -#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_RFVAL 0x050A4 -#define IXGBE_MDFTC1 0x042B8 -#define IXGBE_MDFTC2 0x042C0 -#define IXGBE_MDFTFIFO1 0x042C4 -#define IXGBE_MDFTFIFO2 0x042C8 -#define IXGBE_MDFTS 0x042CC -#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ -#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ -#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ -#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ -#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ -#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ -#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ -#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ -#define IXGBE_PCIEECCCTL 0x1106C -#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ -#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ -#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ -#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ -#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ -#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ -#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ -#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ -#define IXGBE_PCIEECCCTL0 0x11100 -#define IXGBE_PCIEECCCTL1 0x11104 -#define IXGBE_RXDBUECC 0x03F70 -#define IXGBE_TXDBUECC 0x0CF70 -#define IXGBE_RXDBUEST 0x03F74 -#define IXGBE_TXDBUEST 0x0CF74 -#define IXGBE_PBTXECC 0x0C300 -#define IXGBE_PBRXECC 0x03300 -#define IXGBE_GHECCR 0x110B0 - -/* MAC Registers */ -#define IXGBE_PCS1GCFIG 0x04200 -#define IXGBE_PCS1GLCTL 0x04208 -#define IXGBE_PCS1GLSTA 0x0420C -#define IXGBE_PCS1GDBG0 0x04210 -#define IXGBE_PCS1GDBG1 0x04214 -#define IXGBE_PCS1GANA 0x04218 -#define IXGBE_PCS1GANLP 0x0421C -#define IXGBE_PCS1GANNP 0x04220 -#define IXGBE_PCS1GANLPNP 0x04224 -#define IXGBE_HLREG0 0x04240 -#define IXGBE_HLREG1 0x04244 -#define IXGBE_PAP 0x04248 -#define IXGBE_MACA 0x0424C -#define IXGBE_APAE 0x04250 -#define IXGBE_ARD 0x04254 -#define IXGBE_AIS 0x04258 -#define IXGBE_MSCA 0x0425C -#define IXGBE_MSRWD 0x04260 -#define IXGBE_MLADD 0x04264 -#define IXGBE_MHADD 0x04268 -#define IXGBE_MAXFRS 0x04268 -#define IXGBE_TREG 0x0426C -#define IXGBE_PCSS1 0x04288 -#define IXGBE_PCSS2 0x0428C -#define IXGBE_XPCSS 0x04290 -#define IXGBE_MFLCN 0x04294 -#define IXGBE_SERDESC 0x04298 -#define IXGBE_MAC_SGMII_BUSY 0x04298 -#define IXGBE_MACS 0x0429C -#define IXGBE_AUTOC 0x042A0 -#define IXGBE_LINKS 0x042A4 -#define IXGBE_LINKS2 0x04324 -#define IXGBE_AUTOC2 0x042A8 -#define IXGBE_AUTOC3 0x042AC -#define IXGBE_ANLP1 0x042B0 -#define IXGBE_ANLP2 0x042B4 -#define IXGBE_MACC 0x04330 -#define IXGBE_ATLASCTL 0x04800 -#define IXGBE_MMNGC 0x042D0 -#define IXGBE_ANLPNP1 0x042D4 -#define IXGBE_ANLPNP2 0x042D8 -#define IXGBE_KRPCSFC 0x042E0 -#define IXGBE_KRPCSS 0x042E4 -#define IXGBE_FECS1 0x042E8 -#define IXGBE_FECS2 0x042EC -#define IXGBE_SMADARCTL 0x14F10 -#define IXGBE_MPVC 0x04318 -#define IXGBE_SGMIIC 0x04314 - -/* Statistics Registers */ -#define IXGBE_RXNFGPC 0x041B0 -#define IXGBE_RXNFGBCL 0x041B4 -#define IXGBE_RXNFGBCH 0x041B8 -#define IXGBE_RXDGPC 0x02F50 -#define IXGBE_RXDGBCL 0x02F54 -#define IXGBE_RXDGBCH 0x02F58 -#define IXGBE_RXDDGPC 0x02F5C -#define IXGBE_RXDDGBCL 0x02F60 -#define IXGBE_RXDDGBCH 0x02F64 -#define IXGBE_RXLPBKGPC 0x02F68 -#define IXGBE_RXLPBKGBCL 0x02F6C -#define IXGBE_RXLPBKGBCH 0x02F70 -#define IXGBE_RXDLPBKGPC 0x02F74 -#define IXGBE_RXDLPBKGBCL 0x02F78 -#define IXGBE_RXDLPBKGBCH 0x02F7C -#define IXGBE_TXDGPC 0x087A0 -#define IXGBE_TXDGBCL 0x087A4 -#define IXGBE_TXDGBCH 0x087A8 - -#define IXGBE_RXDSTATCTRL 0x02F40 - -/* Copper Pond 2 link timeout */ -#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 - -/* Omer CORECTL */ -#define IXGBE_CORECTL 0x014F00 -/* BARCTRL */ -#define IXGBE_BARCTRL 0x110F4 -#define IXGBE_BARCTRL_FLSIZE 0x0700 -#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 -#define IXGBE_BARCTRL_CSRSIZE 0x2000 - -/* RSCCTL Bit Masks */ -#define IXGBE_RSCCTL_RSCEN 0x01 -#define IXGBE_RSCCTL_MAXDESC_1 0x00 -#define IXGBE_RSCCTL_MAXDESC_4 0x04 -#define IXGBE_RSCCTL_MAXDESC_8 0x08 -#define IXGBE_RSCCTL_MAXDESC_16 0x0C -#define IXGBE_RSCCTL_TS_DIS 0x02 - -/* RSCDBU Bit Masks */ -#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F -#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 - -/* RDRXCTL Bit Masks */ -#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */ -#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ -#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */ -#define IXGBE_RDRXCTL_MVMEN 0x00000020 -#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020 -#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ -#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080 -#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ -#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ -#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ -#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ -#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ -#define IXGBE_RDRXCTL_MBINTEN 0x10000000 -#define IXGBE_RDRXCTL_MDP_EN 0x20000000 - -/* RQTC Bit Masks and Shifts */ -#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) -#define IXGBE_RQTC_TC0_MASK (0x7 << 0) -#define IXGBE_RQTC_TC1_MASK (0x7 << 4) -#define IXGBE_RQTC_TC2_MASK (0x7 << 8) -#define IXGBE_RQTC_TC3_MASK (0x7 << 12) -#define IXGBE_RQTC_TC4_MASK (0x7 << 16) -#define IXGBE_RQTC_TC5_MASK (0x7 << 20) -#define IXGBE_RQTC_TC6_MASK (0x7 << 24) -#define IXGBE_RQTC_TC7_MASK (0x7 << 28) - -/* PSRTYPE.RQPL Bit masks and shift */ -#define IXGBE_PSRTYPE_RQPL_MASK 0x7 -#define IXGBE_PSRTYPE_RQPL_SHIFT 29 - -/* CTRL Bit Masks */ -#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ -#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ -#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) - -/* FACTPS */ -#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ -#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ - -/* MHADD Bit Masks */ -#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 -#define IXGBE_MHADD_MFS_SHIFT 16 - -/* Extended Device Control */ -#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ -#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ -#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ -#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ - -/* Direct Cache Access (DCA) definitions */ -#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ -#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ - -#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ -#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ - -#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ -#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ - -#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ -#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ -#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ - -/* MSCA Bit Masks */ -#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */ -#define IXGBE_MSCA_NP_ADDR_SHIFT 0 -#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */ -#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */ -#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ -#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ -#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ -#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ -#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ -#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */ -#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */ -#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/ -#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ -#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ -#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */ -#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */ -#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ -#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */ - -/* MSRWD bit masks */ -#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF -#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 -#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 -#define IXGBE_MSRWD_READ_DATA_SHIFT 16 - -/* Atlas registers */ -#define IXGBE_ATLAS_PDN_LPBK 0x24 -#define IXGBE_ATLAS_PDN_10G 0xB -#define IXGBE_ATLAS_PDN_1G 0xC -#define IXGBE_ATLAS_PDN_AN 0xD - -/* Atlas bit masks */ -#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 -#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 -#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 -#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 -#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 - -/* Omer bit masks */ -#define IXGBE_CORECTL_WRITE_CMD 0x00010000 - -/* Device Type definitions for new protocol MDIO commands */ -#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 -#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 -#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 -#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 -#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ -#define IXGBE_TWINAX_DEV 1 - -#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ - -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 - -#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ -#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ -#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ -#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ -#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ -#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ -#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */ -#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */ -#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */ -#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ -#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ -#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ -#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ -#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ -#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ -#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ -#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ -#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ -#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ -#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ -#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ -#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ -#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ -#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */ -#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */ -#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */ - -#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ -#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ -#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ -#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ -#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ -#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ -#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ -#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ -#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ -#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ -#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ -#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */ -#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ -#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ -#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ -#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ -#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ -#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ -#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ -#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */ -#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ -#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ -#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ -#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */ -#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */ - -#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */ -#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */ -#define IXGBE_PCRC8ECH_MASK 0x1F -#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */ -#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */ - -/* MII clause 22/28 definitions */ -#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 - -#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/ -#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ - -#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ - -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ -#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ -#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ - -#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ -#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ -#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ -#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ -#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ -#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ -#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ -#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 -#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 -#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ -#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ -#define IXGBE_MII_RESTART 0x200 -#define IXGBE_MII_AUTONEG_COMPLETE 0x20 -#define IXGBE_MII_AUTONEG_LINK_UP 0x04 -#define IXGBE_MII_AUTONEG_REG 0x0 - -#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 -#define IXGBE_MAX_PHY_ADDR 32 - -/* PHY IDs*/ -#define TN1010_PHY_ID 0x00A19410 -#define TNX_FW_REV 0xB -#define X540_PHY_ID 0x01540200 -#define X550_PHY_ID2 0x01540223 -#define X550_PHY_ID3 0x01540221 -#define X557_PHY_ID 0x01540240 -#define X557_PHY_ID2 0x01540250 -#define AQ_FW_REV 0x20 -#define QT2022_PHY_ID 0x0043A400 -#define ATH_PHY_ID 0x03429050 - -/* PHY Types */ -#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0 -#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0 - -/* Special PHY Init Routine */ -#define IXGBE_PHY_INIT_OFFSET_NL 0x002B -#define IXGBE_PHY_INIT_END_NL 0xFFFF -#define IXGBE_CONTROL_MASK_NL 0xF000 -#define IXGBE_DATA_MASK_NL 0x0FFF -#define IXGBE_CONTROL_SHIFT_NL 12 -#define IXGBE_DELAY_NL 0 -#define IXGBE_DATA_NL 1 -#define IXGBE_CONTROL_NL 0x000F -#define IXGBE_CONTROL_EOL_NL 0x0FFF -#define IXGBE_CONTROL_SOL_NL 0x0000 - -/* General purpose Interrupt Enable */ -#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ -#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ -#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ -#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ -#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ -#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ -#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 -#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 -#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 -#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 -#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 -#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 -#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 -#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 -#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 -#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) -#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) -#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) - -#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ -#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ -#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ -#define IXGBE_GPIE_EIAME 0x40000000 -#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 -#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 -#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ -#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ -#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ -#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ - -/* Packet Buffer Initialization */ -#define IXGBE_MAX_PACKET_BUFFERS 8 - -#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ -#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ -#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ -#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ -#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ -#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ -#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ -#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ - -#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ -#define IXGBE_MAX_PB 8 - -/* Packet buffer allocation strategies */ -enum { - PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ -#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL - PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ -#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED -}; - -/* Transmit Flow Control status */ -#define IXGBE_TFCS_TXOFF 0x00000001 -#define IXGBE_TFCS_TXOFF0 0x00000100 -#define IXGBE_TFCS_TXOFF1 0x00000200 -#define IXGBE_TFCS_TXOFF2 0x00000400 -#define IXGBE_TFCS_TXOFF3 0x00000800 -#define IXGBE_TFCS_TXOFF4 0x00001000 -#define IXGBE_TFCS_TXOFF5 0x00002000 -#define IXGBE_TFCS_TXOFF6 0x00004000 -#define IXGBE_TFCS_TXOFF7 0x00008000 - -/* TCP Timer */ -#define IXGBE_TCPTIMER_KS 0x00000100 -#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 -#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 -#define IXGBE_TCPTIMER_LOOP 0x00000800 -#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF - -/* HLREG0 Bit Masks */ -#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ -#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ -#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ -#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ -#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ -#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ -#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ -#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ -#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ -#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ -#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ -#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ -#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ -#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ -#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ - -/* VMD_CTL bitmasks */ -#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 -#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 - -/* VT_CTL bitmasks */ -#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ -#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ -#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ -#define IXGBE_VT_CTL_POOL_SHIFT 7 -#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) - -/* VMOLR bitmasks */ -#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ -#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ -#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ -#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ -#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ -#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ -#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ - -/* VFRE bitmask */ -#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF - -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ - -/* RDHMPN and TDHMPN bitmasks */ -#define IXGBE_RDHMPN_RDICADDR 0x007FF800 -#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 -#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 -#define IXGBE_TDHMPN_TDICADDR 0x003FF800 -#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 -#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 - -#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 -#define IXGBE_RDMAM_DWORD_SHIFT 9 -#define IXGBE_RDMAM_DESC_COMP_FIFO 1 -#define IXGBE_RDMAM_DFC_CMD_FIFO 2 -#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 -#define IXGBE_RDMAM_TCN_STATUS_RAM 4 -#define IXGBE_RDMAM_WB_COLL_FIFO 5 -#define IXGBE_RDMAM_QSC_CNT_RAM 6 -#define IXGBE_RDMAM_QSC_FCOE_RAM 7 -#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 -#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA -#define IXGBE_RDMAM_QSC_RSC_RAM 0xB -#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 -#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 -#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 -#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 -#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 -#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 -#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 -#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 -#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 -#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 -#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 -#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 -#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 -#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 -#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 -#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 -#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 -#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 -#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 -#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 - -#define IXGBE_TXDESCIC_READY 0x80000000 - -/* Receive Checksum Control */ -#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ -#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ - -/* FCRTL Bit Masks */ -#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ -#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ - -/* PAP bit masks*/ -#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ - -/* RMCS Bit Masks */ -#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */ -/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ -#define IXGBE_RMCS_RAC 0x00000004 -/* Deficit Fixed Prio ena */ -#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC -#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ -#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ -#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ - -/* FCCFG Bit Masks */ -#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ -#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ - -/* Interrupt register bitmasks */ - -/* Extended Interrupt Cause Read */ -#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ -#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ -#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ -#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ -#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ -#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ -#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ -#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ -#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ -#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ -#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ -#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ -#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ -#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ -#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */ -#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */ -#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */ -#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 -#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 -#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 -#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 -#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 -#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 -#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 -#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 -#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 -#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) -#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) -#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) - -#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ -#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ -#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ -#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ - -/* Extended Interrupt Cause Set */ -#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ -#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ -#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) -#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) -#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) -#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ -#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -/* Extended Interrupt Mask Set */ -#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ -#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ -#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ -#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) -#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) -#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) -#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ -#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -/* Extended Interrupt Mask Clear */ -#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ -#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ -#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) -#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) -#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) -#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ -#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -#define IXGBE_EIMS_ENABLE_MASK ( \ - IXGBE_EIMS_RTX_QUEUE | \ - IXGBE_EIMS_LSC | \ - IXGBE_EIMS_TCP_TIMER | \ - IXGBE_EIMS_OTHER) - -/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ -#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ -#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ -#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ -#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ -#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ -#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ -#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ -#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ -#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ -#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ -#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ -#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ -#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ -#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ -#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ -#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ -#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ -#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */ -#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ -#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ -#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ -#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ -#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ - -#define IXGBE_MAX_FTQF_FILTERS 128 -#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 -#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 -#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 -#define IXGBE_FTQF_PROTOCOL_SCTP 2 -#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 -#define IXGBE_FTQF_PRIORITY_SHIFT 2 -#define IXGBE_FTQF_POOL_MASK 0x0000003F -#define IXGBE_FTQF_POOL_SHIFT 8 -#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F -#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 -#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E -#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D -#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B -#define IXGBE_FTQF_DEST_PORT_MASK 0x17 -#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F -#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 -#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 - -/* Interrupt clear mask */ -#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF - -/* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_REG_NUM 25 -#define IXGBE_IVAR_REG_NUM_82599 64 -#define IXGBE_IVAR_TXRX_ENTRY 96 -#define IXGBE_IVAR_RX_ENTRY 64 -#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) -#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) -#define IXGBE_IVAR_TX_ENTRY 32 - -#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ -#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ - -#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) - -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ - -/* ETYPE Queue Filter/Select Bit Masks */ -#define IXGBE_MAX_ETQF_FILTERS 8 -#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ -#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ -#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ -#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ -#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ -#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ -#define IXGBE_ETQF_POOL_SHIFT 20 - -#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ -#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 -#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ -#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ - -/* - * ETQF filter list: one static filter per filter consumer. This is - * to avoid filter collisions later. Add new filters - * here!! - * - * Current filters: - * EAPOL 802.1x (0x888e): Filter 0 - * FCoE (0x8906): Filter 2 - * 1588 (0x88f7): Filter 3 - * FIP (0x8914): Filter 4 - * LLDP (0x88CC): Filter 5 - * LACP (0x8809): Filter 6 - * FC (0x8808): Filter 7 - */ -#define IXGBE_ETQF_FILTER_EAPOL 0 -#define IXGBE_ETQF_FILTER_FCOE 2 -#define IXGBE_ETQF_FILTER_1588 3 -#define IXGBE_ETQF_FILTER_FIP 4 -#define IXGBE_ETQF_FILTER_LLDP 5 -#define IXGBE_ETQF_FILTER_LACP 6 -#define IXGBE_ETQF_FILTER_FC 7 -/* VLAN Control Bit Masks */ -#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ -#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ -#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ -#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ -#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ - -/* VLAN pool filtering masks */ -#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ -#define IXGBE_VLVF_ENTRIES 64 -#define IXGBE_VLVF_VLANID_MASK 0x00000FFF -/* Per VF Port VLAN insertion rules */ -#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ -#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ - -#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ - -/* STATUS Bit Masks */ -#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ -#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ -#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */ - -#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ -#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ - -/* ESDP Bit Masks */ -#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ -#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ -#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ -#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ -#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ -#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ -#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ -#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */ -#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ -#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ -#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */ -#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */ -#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */ -#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ -#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */ -#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */ -#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */ -#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ - - -/* LEDCTL Bit Masks */ -#define IXGBE_LED_IVRT_BASE 0x00000040 -#define IXGBE_LED_BLINK_BASE 0x00000080 -#define IXGBE_LED_MODE_MASK_BASE 0x0000000F -#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) -#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) -#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) -#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) -#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) -#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8) -#define IXGBE_X557_MAX_LED_INDEX 3 -#define IXGBE_X557_LED_PROVISIONING 0xC430 - -/* LED modes */ -#define IXGBE_LED_LINK_UP 0x0 -#define IXGBE_LED_LINK_10G 0x1 -#define IXGBE_LED_MAC 0x2 -#define IXGBE_LED_FILTER 0x3 -#define IXGBE_LED_LINK_ACTIVE 0x4 -#define IXGBE_LED_LINK_1G 0x5 -#define IXGBE_LED_ON 0xE -#define IXGBE_LED_OFF 0xF - -/* AUTOC Bit Masks */ -#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 -#define IXGBE_AUTOC_KX4_SUPP 0x80000000 -#define IXGBE_AUTOC_KX_SUPP 0x40000000 -#define IXGBE_AUTOC_PAUSE 0x30000000 -#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 -#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 -#define IXGBE_AUTOC_RF 0x08000000 -#define IXGBE_AUTOC_PD_TMR 0x06000000 -#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 -#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 -#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 -#define IXGBE_AUTOC_FECA 0x00040000 -#define IXGBE_AUTOC_FECR 0x00020000 -#define IXGBE_AUTOC_KR_SUPP 0x00010000 -#define IXGBE_AUTOC_AN_RESTART 0x00001000 -#define IXGBE_AUTOC_FLU 0x00000001 -#define IXGBE_AUTOC_LMS_SHIFT 13 -#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) - -#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 -#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 -#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 -#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 -#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) - -#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 -#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 -#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 -#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 -#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 - -#define IXGBE_MACC_FLU 0x00000001 -#define IXGBE_MACC_FSV_10G 0x00030000 -#define IXGBE_MACC_FS 0x00040000 -#define IXGBE_MAC_RX2TX_LPBK 0x00000002 - -/* Veto Bit definiton */ -#define IXGBE_MMNGC_MNG_VETO 0x00000001 - -/* LINKS Bit Masks */ -#define IXGBE_LINKS_KX_AN_COMP 0x80000000 -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED 0x20000000 -#define IXGBE_LINKS_MODE 0x18000000 -#define IXGBE_LINKS_RX_MODE 0x06000000 -#define IXGBE_LINKS_TX_MODE 0x01800000 -#define IXGBE_LINKS_XGXS_EN 0x00400000 -#define IXGBE_LINKS_SGMII_EN 0x02000000 -#define IXGBE_LINKS_PCS_1G_EN 0x00200000 -#define IXGBE_LINKS_1G_AN_EN 0x00100000 -#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 -#define IXGBE_LINKS_1G_SYNC 0x00040000 -#define IXGBE_LINKS_10G_ALIGN 0x00020000 -#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 -#define IXGBE_LINKS_TL_FAULT 0x00001000 -#define IXGBE_LINKS_SIGNAL 0x00000F00 - -#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 -#define IXGBE_LINKS_SPEED_100_82599 0x10000000 -#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000 -#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ -#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ - -#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 - -/* PCS1GLSTA Bit Masks */ -#define IXGBE_PCS1GLSTA_LINK_OK 1 -#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 -#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 -#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 -#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 -#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 -#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 - -#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 -#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 - -/* PCS1GLCTL Bit Masks */ -#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ -#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 -#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 -#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 -#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 -#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 - -/* ANLP1 Bit Masks */ -#define IXGBE_ANLP1_PAUSE 0x0C00 -#define IXGBE_ANLP1_SYM_PAUSE 0x0400 -#define IXGBE_ANLP1_ASM_PAUSE 0x0800 -#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 - -/* SW Semaphore Register bitmasks */ -#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ -#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ - -/* SW_FW_SYNC/GSSR definitions */ -#define IXGBE_GSSR_EEP_SM 0x0001 -#define IXGBE_GSSR_PHY0_SM 0x0002 -#define IXGBE_GSSR_PHY1_SM 0x0004 -#define IXGBE_GSSR_MAC_CSR_SM 0x0008 -#define IXGBE_GSSR_FLASH_SM 0x0010 -#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 -#define IXGBE_GSSR_SW_MNG_SM 0x0400 -#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ -#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */ -#define IXGBE_GSSR_I2C_MASK 0x1800 -#define IXGBE_GSSR_NVM_PHY_MASK 0xF - -/* FW Status register bitmask */ -#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ - -/* EEC Register */ -#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ -#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ -#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ -#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ -#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ -#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ -#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ -#define IXGBE_EEC_FWE_SHIFT 4 -#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ -#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ -#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ -#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ -#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ -#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ -#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ -/* EEPROM Addressing bits based on type (0-small, 1-large) */ -#define IXGBE_EEC_ADDR_SIZE 0x00000400 -#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ -#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ - -#define IXGBE_EEC_SIZE_SHIFT 11 -#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 -#define IXGBE_EEPROM_OPCODE_BITS 8 - -/* FLA Register */ -#define IXGBE_FLA_LOCKED 0x00000040 - -/* Part Number String Length */ -#define IXGBE_PBANUM_LENGTH 11 - -/* Checksum and EEPROM pointers */ -#define IXGBE_PBANUM_PTR_GUARD 0xFAFA -#define IXGBE_EEPROM_CHECKSUM 0x3F -#define IXGBE_EEPROM_SUM 0xBABA -#define IXGBE_EEPROM_CTRL_4 0x45 -#define IXGBE_EE_CTRL_4_INST_ID 0x10 -#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 -#define IXGBE_PCIE_ANALOG_PTR 0x03 -#define IXGBE_ATLAS0_CONFIG_PTR 0x04 -#define IXGBE_PHY_PTR 0x04 -#define IXGBE_ATLAS1_CONFIG_PTR 0x05 -#define IXGBE_OPTION_ROM_PTR 0x05 -#define IXGBE_PCIE_GENERAL_PTR 0x06 -#define IXGBE_PCIE_CONFIG0_PTR 0x07 -#define IXGBE_PCIE_CONFIG1_PTR 0x08 -#define IXGBE_CORE0_PTR 0x09 -#define IXGBE_CORE1_PTR 0x0A -#define IXGBE_MAC0_PTR 0x0B -#define IXGBE_MAC1_PTR 0x0C -#define IXGBE_CSR0_CONFIG_PTR 0x0D -#define IXGBE_CSR1_CONFIG_PTR 0x0E -#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 -#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 -#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 -#define IXGBE_PCIE_CONFIG_SIZE 0x08 -#define IXGBE_EEPROM_LAST_WORD 0x41 -#define IXGBE_FW_PTR 0x0F -#define IXGBE_PBANUM0_PTR 0x15 -#define IXGBE_PBANUM1_PTR 0x16 -#define IXGBE_ALT_MAC_ADDR_PTR 0x37 -#define IXGBE_FREE_SPACE_PTR 0X3E - -/* External Thermal Sensor Config */ -#define IXGBE_ETS_CFG 0x26 -#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 -#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 -#define IXGBE_ETS_TYPE_MASK 0x0038 -#define IXGBE_ETS_TYPE_SHIFT 3 -#define IXGBE_ETS_TYPE_EMC 0x000 -#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 -#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 -#define IXGBE_ETS_DATA_LOC_SHIFT 10 -#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 -#define IXGBE_ETS_DATA_INDEX_SHIFT 8 -#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF - -#define IXGBE_SAN_MAC_ADDR_PTR 0x28 -#define IXGBE_DEVICE_CAPS 0x2C -#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11 -#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04 - -#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 -#define IXGBE_MAX_MSIX_VECTORS_82599 0x40 -#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 -#define IXGBE_MAX_MSIX_VECTORS_82598 0x13 - -/* MSI-X capability fields masks */ -#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF - -/* Legacy EEPROM word offsets */ -#define IXGBE_ISCSI_BOOT_CAPS 0x0033 -#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 -#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 - -/* EEPROM Commands - SPI */ -#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ -#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 -#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ -#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ -#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ -#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ -/* EEPROM reset Write Enable latch */ -#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 -#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ -#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ -#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ -#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ -#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ - -/* EEPROM Read Register */ -#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ -#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ -#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ -#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ -#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ - -#define NVM_INIT_CTRL_3 0x38 -#define NVM_INIT_CTRL_3_LPLU 0x8 -#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 -#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 - -#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 - -#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 -#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ -#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ -#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ -#define IXGBE_EEPROM_CCD_BIT 2 - -#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS -#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ -#endif - -#ifndef IXGBE_EERD_EEWR_ATTEMPTS -/* Number of 5 microseconds we wait for EERD read and - * EERW write to complete */ -#define IXGBE_EERD_EEWR_ATTEMPTS 100000 -#endif - -#ifndef IXGBE_FLUDONE_ATTEMPTS -/* # attempts we wait for flush update to complete */ -#define IXGBE_FLUDONE_ATTEMPTS 20000 -#endif - -#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ -#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ -#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ -#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ - -#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 -#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 -#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 -#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 -#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7) -#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 -#define IXGBE_FW_LESM_STATE_1 0x1 -#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ -#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 -#define IXGBE_FW_PATCH_VERSION_4 0x7 -#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ -#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ -#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ -#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ -#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ -#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ -#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ - -/* FW header offset */ -#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 -#define IXGBE_X540_FW_MODULE_MASK 0x7FFF -/* 4KB multiplier */ -#define IXGBE_X540_FW_MODULE_LENGTH 0x1000 -/* version word 2 (month & day) */ -#define IXGBE_X540_FW_PATCH_VERSION_2 0x5 -/* version word 3 (silicon compatibility & year) */ -#define IXGBE_X540_FW_PATCH_VERSION_3 0x6 -/* version word 4 (major & minor numbers) */ -#define IXGBE_X540_FW_PATCH_VERSION_4 0x7 - -#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ -#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ -#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ - -/* PCI Bus Info */ -#define IXGBE_PCI_DEVICE_STATUS 0xAA -#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 -#define IXGBE_PCI_LINK_STATUS 0xB2 -#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 -#define IXGBE_PCI_LINK_WIDTH 0x3F0 -#define IXGBE_PCI_LINK_WIDTH_1 0x10 -#define IXGBE_PCI_LINK_WIDTH_2 0x20 -#define IXGBE_PCI_LINK_WIDTH_4 0x40 -#define IXGBE_PCI_LINK_WIDTH_8 0x80 -#define IXGBE_PCI_LINK_SPEED 0xF -#define IXGBE_PCI_LINK_SPEED_2500 0x1 -#define IXGBE_PCI_LINK_SPEED_5000 0x2 -#define IXGBE_PCI_LINK_SPEED_8000 0x3 -#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E -#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 -#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 - -#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf -#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 -#define IXGBE_PCIDEVCTRL2_50_100us 0x1 -#define IXGBE_PCIDEVCTRL2_1_2ms 0x2 -#define IXGBE_PCIDEVCTRL2_16_32ms 0x5 -#define IXGBE_PCIDEVCTRL2_65_130ms 0x6 -#define IXGBE_PCIDEVCTRL2_260_520ms 0x9 -#define IXGBE_PCIDEVCTRL2_1_2s 0xa -#define IXGBE_PCIDEVCTRL2_4_8s 0xd -#define IXGBE_PCIDEVCTRL2_17_34s 0xe - -/* Number of 100 microseconds we wait for PCI Express master disable */ -#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 - -/* Check whether address is multicast. This is little-endian specific check.*/ -#define IXGBE_IS_MULTICAST(Address) \ - (bool)(((u8 *)(Address))[0] & ((u8)0x01)) - -/* Check whether an address is broadcast. */ -#define IXGBE_IS_BROADCAST(Address) \ - ((((u8 *)(Address))[0] == ((u8)0xff)) && \ - (((u8 *)(Address))[1] == ((u8)0xff))) - -/* RAH */ -#define IXGBE_RAH_VIND_MASK 0x003C0000 -#define IXGBE_RAH_VIND_SHIFT 18 -#define IXGBE_RAH_AV 0x80000000 -#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF - -/* Header split receive */ -#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 -#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E -#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 -#define IXGBE_RFCTL_RSC_DIS 0x00000020 -#define IXGBE_RFCTL_NFSW_DIS 0x00000040 -#define IXGBE_RFCTL_NFSR_DIS 0x00000080 -#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 -#define IXGBE_RFCTL_NFS_VER_SHIFT 8 -#define IXGBE_RFCTL_NFS_VER_2 0 -#define IXGBE_RFCTL_NFS_VER_3 1 -#define IXGBE_RFCTL_NFS_VER_4 2 -#define IXGBE_RFCTL_IPV6_DIS 0x00000400 -#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 -#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 -#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 -#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 - -/* Transmit Config masks */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ -#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ -#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ -/* Enable short packet padding to 64 bytes */ -#define IXGBE_TX_PAD_ENABLE 0x00000400 -#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ -/* This allows for 16K packets + 4k for vlan */ -#define IXGBE_MAX_FRAME_SZ 0x40040000 - -#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ -#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ - -/* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */ -#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ - -#define IXGBE_TSAUXC_EN_CLK 0x00000004 -#define IXGBE_TSAUXC_SYNCLK 0x00000008 -#define IXGBE_TSAUXC_SDP0_INT 0x00000040 -#define IXGBE_TSAUXC_EN_TT0 0x00000001 -#define IXGBE_TSAUXC_EN_TT1 0x00000002 -#define IXGBE_TSAUXC_ST0 0x00000010 -#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 - -#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0 -#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080 -#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100 - -#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ -#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ - -#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ -#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ -#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 -#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 -#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 -#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 -#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A -#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ -#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ -#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */ - -#define IXGBE_TSIM_SYS_WRAP 0x00000001 -#define IXGBE_TSIM_TXTS 0x00000002 -#define IXGBE_TSIM_TADJ 0x00000080 - -#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP -#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS -#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ - -#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF -#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 -#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01 -#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02 -#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 -#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 - -#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 -#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 -#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 -#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 -#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 -#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 -#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 -#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 -#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 -#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00 -#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 - -#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ -#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ -#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ -#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ -#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ -#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ -/* Receive Priority Flow Control Enable */ -#define IXGBE_FCTRL_RPFCE 0x00004000 -#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ -#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ -#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ -#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ -#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ -#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */ -#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */ - -/* Multiple Receive Queue Control */ -#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ -#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ -#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ -#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ -#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ -#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ -#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ -#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ -#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ -#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ -#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ -#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */ -#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 -#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 -#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 -#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 -#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 -#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 - -/* Queue Drop Enable */ -#define IXGBE_QDE_ENABLE 0x00000001 -#define IXGBE_QDE_HIDE_VLAN 0x00000002 -#define IXGBE_QDE_IDX_MASK 0x00007F00 -#define IXGBE_QDE_IDX_SHIFT 8 -#define IXGBE_QDE_WRITE 0x00010000 -#define IXGBE_QDE_READ 0x00020000 - -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ - -#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 -#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 -#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 -#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 -#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 -/* Multiple Transmit Queue Command Register */ -#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ -#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ -#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ -#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ -#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ -#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ -#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ - -/* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ -#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ -#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ -#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ -#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */ -#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ -#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ -#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ -#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 - -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ -#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ -#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */ - -/* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 - -/* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6) - * + at bit 8 offset (<< 8) - * = (<< 2) - */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 -#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 - -#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 -#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF - -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 - -/* RSS Hash results */ -#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 -#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 -#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 -#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 -#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 -#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 -#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 -#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 -#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 - -/* RSS Packet Types as indicated in the receive descriptor. */ -#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ -#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ -#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ -#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */ -#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ -#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ -#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ -#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ -#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ -#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ - -/* Security Processing bit Indication */ -#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 -#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 - -/* Masks to determine if packets should be dropped due to frame errors */ -#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXD_ERR_CE | \ - IXGBE_RXD_ERR_LE | \ - IXGBE_RXD_ERR_PE | \ - IXGBE_RXD_ERR_OSE | \ - IXGBE_RXD_ERR_USE) - -#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXDADV_ERR_CE | \ - IXGBE_RXDADV_ERR_LE | \ - IXGBE_RXDADV_ERR_PE | \ - IXGBE_RXDADV_ERR_OSE | \ - IXGBE_RXDADV_ERR_USE) - -#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE - -/* Multicast bit mask */ -#define IXGBE_MCSTCTRL_MFE 0x4 - -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 - -/* Vlan-specific macros */ -#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ -#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ -#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ -#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT - -/* SR-IOV specific macros */ -#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) -#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) -#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) -#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) -/* Translated register #defines */ -#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P))) -#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P))) -#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P))) -#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P))) -#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P))) -#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P))) -#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P))) -#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P))) -#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P))) -#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P))) -#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P))) -#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P))) -#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ - (0x012300 + (((P) - 24) * 4))) -#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P))) -#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P))) -#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P))) -#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P))) -#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \ - : (0x0D000 + (0x40 * ((P) - 64)))) -#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \ - : (0x0D004 + (0x40 * ((P) - 64)))) -#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \ - : (0x0D008 + (0x40 * ((P) - 64)))) -#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \ - : (0x0D010 + (0x40 * ((P) - 64)))) -#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \ - : (0x0D018 + (0x40 * ((P) - 64)))) -#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \ - : (0x0D028 + (0x40 * ((P) - 64)))) -#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \ - : (0x0D014 + (0x40 * ((P) - 64)))) -#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) -#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) -#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) -#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P))) -#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) -#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) -#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) -#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) -#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) -#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \ - : (0x0D00C + (0x40 * ((P) - 64)))) -#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P))) -#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x))) -#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x))) -#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x))) -#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x))) -#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x))) -#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x))) -#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x))) - -#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ - (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) -#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ - (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) - -#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \ - (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) -#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \ - (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) - -/* Little Endian defines */ -#ifndef __le16 -#define __le16 u16 -#endif -#ifndef __le32 -#define __le32 u32 -#endif -#ifndef __le64 -#define __le64 u64 - -#endif -#ifndef __be16 -/* Big Endian defines */ -#define __be16 u16 -#define __be32 u32 -#define __be64 u64 - -#endif -enum ixgbe_fdir_pballoc_type { - IXGBE_FDIR_PBALLOC_NONE = 0, - IXGBE_FDIR_PBALLOC_64K = 1, - IXGBE_FDIR_PBALLOC_128K = 2, - IXGBE_FDIR_PBALLOC_256K = 3, -}; - -/* Flow Director register values */ -#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 -#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 -#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 -#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 -#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 -#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 -#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 -#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 -#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00 -#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 -#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 -#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 -#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ -#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ -#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 -#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000 -#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 -#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 -#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 - -#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 -#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 -#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 -#define IXGBE_FDIRM_VLANID 0x00000001 -#define IXGBE_FDIRM_VLANP 0x00000002 -#define IXGBE_FDIRM_POOL 0x00000004 -#define IXGBE_FDIRM_L4P 0x00000008 -#define IXGBE_FDIRM_FLEX 0x00000010 -#define IXGBE_FDIRM_DIPv6 0x00000020 -#define IXGBE_FDIRM_L3P 0x00000040 - -#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */ -#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */ -#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */ -#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */ -#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */ - -#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF -#define IXGBE_FDIRFREE_FREE_SHIFT 0 -#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 -#define IXGBE_FDIRFREE_COLL_SHIFT 16 -#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F -#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 -#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 -#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 -#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF -#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 -#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 -#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 -#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF -#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 -#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 -#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 -#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 -#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 -#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 -#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 - -#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 -#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 -#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 -#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 -#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 -#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 -#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 -#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 -#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 -#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 -#define IXGBE_FDIRCMD_IPV6 0x00000080 -#define IXGBE_FDIRCMD_CLEARHT 0x00000100 -#define IXGBE_FDIRCMD_DROP 0x00000200 -#define IXGBE_FDIRCMD_INT 0x00000400 -#define IXGBE_FDIRCMD_LAST 0x00000800 -#define IXGBE_FDIRCMD_COLLISION 0x00001000 -#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 -#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 -#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 -#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23 -#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 -#define IXGBE_FDIR_INIT_DONE_POLL 10 -#define IXGBE_FDIRCMD_CMD_POLL 10 -#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 -#define IXGBE_FDIR_DROP_QUEUE 127 - -/* Manageablility Host Interface defines */ -#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ -#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ -#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ -#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ -#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ -#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ -#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ - -/* CEM Support */ -#define FW_CEM_HDR_LEN 0x4 -#define FW_CEM_CMD_DRIVER_INFO 0xDD -#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 -#define FW_CEM_CMD_RESERVED 0X0 -#define FW_CEM_UNUSED_VER 0x0 -#define FW_CEM_MAX_RETRIES 3 -#define FW_CEM_RESP_STATUS_SUCCESS 0x1 -#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ -#define FW_READ_SHADOW_RAM_CMD 0x31 -#define FW_READ_SHADOW_RAM_LEN 0x6 -#define FW_WRITE_SHADOW_RAM_CMD 0x33 -#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ -#define FW_SHADOW_RAM_DUMP_CMD 0x36 -#define FW_SHADOW_RAM_DUMP_LEN 0 -#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ -#define FW_NVM_DATA_OFFSET 3 -#define FW_MAX_READ_BUFFER_SIZE 1024 -#define FW_DISABLE_RXEN_CMD 0xDE -#define FW_DISABLE_RXEN_LEN 0x1 -#define FW_PHY_MGMT_REQ_CMD 0x20 -#define FW_PHY_TOKEN_REQ_CMD 0xA -#define FW_PHY_TOKEN_REQ_LEN 2 -#define FW_PHY_TOKEN_REQ 0 -#define FW_PHY_TOKEN_REL 1 -#define FW_PHY_TOKEN_OK 1 -#define FW_PHY_TOKEN_RETRY 0x80 -#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ -#define FW_PHY_TOKEN_WAIT 5 /* seconds */ -#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) -#define FW_INT_PHY_REQ_CMD 0xB -#define FW_INT_PHY_REQ_LEN 10 -#define FW_INT_PHY_REQ_READ 0 -#define FW_INT_PHY_REQ_WRITE 1 -#define FW_PHY_ACT_REQ_CMD 5 -#define FW_PHY_ACT_DATA_COUNT 4 -#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) -#define FW_PHY_ACT_INIT_PHY 1 -#define FW_PHY_ACT_SETUP_LINK 2 -#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0) -#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1) -#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2) -#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3) -#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4) -#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5) -#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6) -#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7) -#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8) -#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9) -#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10) -#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 -#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \ - FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) -#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u -#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u -#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u -#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u -#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18) -#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19) -#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20) -#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22) -#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0) -#define FW_PHY_ACT_GET_LINK_INFO 3 -#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19) -#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20) -#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21) -#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22) -#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24) -#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25) -#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28) -#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29) -#define FW_PHY_ACT_FORCE_LINK_DOWN 4 -#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0) -#define FW_PHY_ACT_PHY_SW_RESET 5 -#define FW_PHY_ACT_PHY_HW_RESET 6 -#define FW_PHY_ACT_GET_PHY_INFO 7 -#define FW_PHY_ACT_UD_2 0x1002 -#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6) -#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5) -#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4) -#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3) -#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2) -#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1) -#define FW_PHY_ACT_RETRIES 50 -#define FW_PHY_INFO_SPEED_MASK 0xFFFu -#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u -#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu - -/* Host Interface Command Structures */ - -#pragma pack(push, 1) - -struct ixgbe_hic_hdr { - u8 cmd; - u8 buf_len; - union { - u8 cmd_resv; - u8 ret_status; - } cmd_or_resp; - u8 checksum; -}; - -struct ixgbe_hic_hdr2_req { - u8 cmd; - u8 buf_lenh; - u8 buf_lenl; - u8 checksum; -}; - -struct ixgbe_hic_hdr2_rsp { - u8 cmd; - u8 buf_lenl; - u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ - u8 checksum; -}; - -union ixgbe_hic_hdr2 { - struct ixgbe_hic_hdr2_req req; - struct ixgbe_hic_hdr2_rsp rsp; -}; - -struct ixgbe_hic_drv_info { - struct ixgbe_hic_hdr hdr; - u8 port_num; - u8 ver_sub; - u8 ver_build; - u8 ver_min; - u8 ver_maj; - u8 pad; /* end spacing to ensure length is mult. of dword */ - u16 pad2; /* end spacing to ensure length is mult. of dword2 */ -}; - -struct ixgbe_hic_drv_info2 { - struct ixgbe_hic_hdr hdr; - u8 port_num; - u8 ver_sub; - u8 ver_build; - u8 ver_min; - u8 ver_maj; - char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; -}; - -/* These need to be dword aligned */ -struct ixgbe_hic_read_shadow_ram { - union ixgbe_hic_hdr2 hdr; - u32 address; - u16 length; - u16 pad2; - u16 data; - u16 pad3; -}; - -struct ixgbe_hic_write_shadow_ram { - union ixgbe_hic_hdr2 hdr; - u32 address; - u16 length; - u16 pad2; - u16 data; - u16 pad3; -}; - -struct ixgbe_hic_disable_rxen { - struct ixgbe_hic_hdr hdr; - u8 port_number; - u8 pad2; - u16 pad3; -}; - -struct ixgbe_hic_phy_token_req { - struct ixgbe_hic_hdr hdr; - u8 port_number; - u8 command_type; - u16 pad; -}; - -struct ixgbe_hic_internal_phy_req { - struct ixgbe_hic_hdr hdr; - u8 port_number; - u8 command_type; - __be16 address; - u16 rsv1; - __be32 write_data; - u16 pad; -}; - -struct ixgbe_hic_internal_phy_resp { - struct ixgbe_hic_hdr hdr; - __be32 read_data; -}; - -struct ixgbe_hic_phy_activity_req { - struct ixgbe_hic_hdr hdr; - u8 port_number; - u8 pad; - __le16 activity_id; - __be32 data[FW_PHY_ACT_DATA_COUNT]; -}; - -struct ixgbe_hic_phy_activity_resp { - struct ixgbe_hic_hdr hdr; - __be32 data[FW_PHY_ACT_DATA_COUNT]; -}; - -#pragma pack(pop) - -/* Transmit Descriptor - Legacy */ -struct ixgbe_legacy_tx_desc { - u64 buffer_addr; /* Address of the descriptor's data buffer */ - union { - __le32 data; - struct { - __le16 length; /* Data buffer length */ - u8 cso; /* Checksum offset */ - u8 cmd; /* Descriptor control */ - } flags; - } lower; - union { - __le32 data; - struct { - u8 status; /* Descriptor status */ - u8 css; /* Checksum start */ - __le16 vlan; - } fields; - } upper; -}; - -/* Transmit Descriptor - Advanced */ -union ixgbe_adv_tx_desc { - struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le32 cmd_type_len; - __le32 olinfo_status; - } read; - struct { - __le64 rsvd; /* Reserved */ - __le32 nxtseq_seed; - __le32 status; - } wb; -}; - -/* Receive Descriptor - Legacy */ -struct ixgbe_legacy_rx_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - __le16 length; /* Length of data DMAed into data buffer */ - __le16 csum; /* Packet checksum */ - u8 status; /* Descriptor status */ - u8 errors; /* Descriptor Errors */ - __le16 vlan; -}; - -/* Receive Descriptor - Advanced */ -union ixgbe_adv_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - union { - __le32 data; - struct { - __le16 pkt_info; /* RSS, Pkt type */ - __le16 hdr_info; /* Splithdr, hdrlen */ - } hs_rss; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length; /* Packet length */ - __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -/* Context descriptors */ -struct ixgbe_adv_tx_context_desc { - __le32 vlan_macip_lens; - __le32 seqnum_seed; - __le32 type_tucmd_mlhl; - __le32 mss_l4len_idx; -}; - -/* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ -#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ -#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */ -#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ -#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ -#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ -#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ -/* 1st&Last TSO-full iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 -#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ -#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */ -#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ -#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ -#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ -#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ -#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ -#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ -#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ -#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ -#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ -#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ -#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ -#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ -#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - -#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */ -#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */ -#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */ -#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */ -#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */ -/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */ -#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26 -/* Autonegotiation advertised speeds */ -typedef u32 ixgbe_autoneg_advertised; -/* Link speed */ -typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_UNKNOWN 0 -#define IXGBE_LINK_SPEED_10_FULL 0x0002 -#define IXGBE_LINK_SPEED_100_FULL 0x0008 -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 -#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 -#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ - IXGBE_LINK_SPEED_10GB_FULL) -#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ - IXGBE_LINK_SPEED_1GB_FULL | \ - IXGBE_LINK_SPEED_10GB_FULL) - -/* Physical layer type */ -typedef u64 ixgbe_physical_layer; -#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 -#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001 -#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002 -#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004 -#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008 -#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010 -#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020 -#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040 -#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080 -#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100 -#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200 -#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400 -#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800 -#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000 -#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000 -#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000 -#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 -#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 - -/* Flow Control Data Sheet defined values - * Calculation and defines taken from 802.1bb Annex O - */ - -/* BitTimes (BT) conversion */ -#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) -#define IXGBE_B2BT(BT) (BT * 8) - -/* Calculate Delay to respond to PFC */ -#define IXGBE_PFC_D 672 - -/* Calculate Cable Delay */ -#define IXGBE_CABLE_DC 5556 /* Delay Copper */ -#define IXGBE_CABLE_DO 5000 /* Delay Optical */ - -/* Calculate Interface Delay X540 */ -#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ -#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ -#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ - -#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) - -/* Calculate Interface Delay 82598, 82599 */ -#define IXGBE_PHY_D 12800 -#define IXGBE_MAC_D 4096 -#define IXGBE_XAUI_D (2 * 1024) - -#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D) - -/* Calculate Delay incurred from higher layer */ -#define IXGBE_HD 6144 - -/* Calculate PCI Bus delay for low thresholds */ -#define IXGBE_PCI_DELAY 10000 - -/* Calculate X540 delay value in bit times */ -#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ - ((36 * \ - (IXGBE_B2BT(_max_frame_link) + \ - IXGBE_PFC_D + \ - (2 * IXGBE_CABLE_DC) + \ - (2 * IXGBE_ID_X540) + \ - IXGBE_HD) / 25 + 1) + \ - 2 * IXGBE_B2BT(_max_frame_tc)) - -/* Calculate 82599, 82598 delay value in bit times */ -#define IXGBE_DV(_max_frame_link, _max_frame_tc) \ - ((36 * \ - (IXGBE_B2BT(_max_frame_link) + \ - IXGBE_PFC_D + \ - (2 * IXGBE_CABLE_DC) + \ - (2 * IXGBE_ID) + \ - IXGBE_HD) / 25 + 1) + \ - 2 * IXGBE_B2BT(_max_frame_tc)) - -/* Calculate low threshold delay values */ -#define IXGBE_LOW_DV_X540(_max_frame_tc) \ - (2 * IXGBE_B2BT(_max_frame_tc) + \ - (36 * IXGBE_PCI_DELAY / 25) + 1) -#define IXGBE_LOW_DV(_max_frame_tc) \ - (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) - -/* Software ATR hash keys */ -#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 -#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 - -/* Software ATR input stream values and masks */ -#define IXGBE_ATR_HASH_MASK 0x7fff -#define IXGBE_ATR_L4TYPE_MASK 0x3 -#define IXGBE_ATR_L4TYPE_UDP 0x1 -#define IXGBE_ATR_L4TYPE_TCP 0x2 -#define IXGBE_ATR_L4TYPE_SCTP 0x3 -#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 -#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 -enum ixgbe_atr_flow_type { - IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, - IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, - IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, - IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, - IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, - IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, - IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, - IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, - IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, - IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, - IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, - IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, - IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, - IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, - IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, - IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, -}; - -/* Flow Director ATR input struct. */ -union ixgbe_atr_input { - /* - * Byte layout in order, all values with MSB first: - * - * vm_pool - 1 byte - * flow_type - 1 byte - * vlan_id - 2 bytes - * src_ip - 16 bytes - * inner_mac - 6 bytes - * cloud_mode - 2 bytes - * tni_vni - 4 bytes - * dst_ip - 16 bytes - * src_port - 2 bytes - * dst_port - 2 bytes - * flex_bytes - 2 bytes - * bkt_hash - 2 bytes - */ - struct { - u8 vm_pool; - u8 flow_type; - __be16 vlan_id; - __be32 dst_ip[4]; - __be32 src_ip[4]; - u8 inner_mac[6]; - __be16 tunnel_type; - __be32 tni_vni; - __be16 src_port; - __be16 dst_port; - __be16 flex_bytes; - __be16 bkt_hash; - } formatted; - __be32 dword_stream[14]; -}; - -/* Flow Director compressed ATR hash input struct */ -union ixgbe_atr_hash_dword { - struct { - u8 vm_pool; - u8 flow_type; - __be16 vlan_id; - } formatted; - __be32 ip; - struct { - __be16 src; - __be16 dst; - } port; - __be16 flex_bytes; - __be32 dword; -}; - -#define IXGBE_MVALS_INIT(m) \ - IXGBE_CAT(EEC, m), \ - IXGBE_CAT(FLA, m), \ - IXGBE_CAT(GRC, m), \ - IXGBE_CAT(SRAMREL, m), \ - IXGBE_CAT(FACTPS, m), \ - IXGBE_CAT(SWSM, m), \ - IXGBE_CAT(SWFW_SYNC, m), \ - IXGBE_CAT(FWSM, m), \ - IXGBE_CAT(SDP0_GPIEN, m), \ - IXGBE_CAT(SDP1_GPIEN, m), \ - IXGBE_CAT(SDP2_GPIEN, m), \ - IXGBE_CAT(EICR_GPI_SDP0, m), \ - IXGBE_CAT(EICR_GPI_SDP1, m), \ - IXGBE_CAT(EICR_GPI_SDP2, m), \ - IXGBE_CAT(CIAA, m), \ - IXGBE_CAT(CIAD, m), \ - IXGBE_CAT(I2C_CLK_IN, m), \ - IXGBE_CAT(I2C_CLK_OUT, m), \ - IXGBE_CAT(I2C_DATA_IN, m), \ - IXGBE_CAT(I2C_DATA_OUT, m), \ - IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ - IXGBE_CAT(I2C_BB_EN, m), \ - IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ - IXGBE_CAT(I2CCTL, m) - -enum ixgbe_mvals { - IXGBE_MVALS_INIT(_IDX), - IXGBE_MVALS_IDX_LIMIT -}; - -/* - * Unavailable: The FCoE Boot Option ROM is not present in the flash. - * Disabled: Present; boot order is not set for any targets on the port. - * Enabled: Present; boot order is set for at least one target on the port. - */ -enum ixgbe_fcoe_boot_status { - ixgbe_fcoe_bootstatus_disabled = 0, - ixgbe_fcoe_bootstatus_enabled = 1, - ixgbe_fcoe_bootstatus_unavailable = 0xFFFF -}; - -enum ixgbe_eeprom_type { - ixgbe_eeprom_uninitialized = 0, - ixgbe_eeprom_spi, - ixgbe_flash, - ixgbe_eeprom_none /* No NVM support */ -}; - -enum ixgbe_mac_type { - ixgbe_mac_unknown = 0, - ixgbe_mac_82598EB, - ixgbe_mac_82599EB, - ixgbe_mac_X540, - ixgbe_mac_X550, - ixgbe_mac_X550EM_x, - ixgbe_mac_X550EM_a, - ixgbe_num_macs -}; - -enum ixgbe_phy_type { - ixgbe_phy_unknown = 0, - ixgbe_phy_none, - ixgbe_phy_tn, - ixgbe_phy_aq, - ixgbe_phy_x550em_kr, - ixgbe_phy_x550em_kx4, - ixgbe_phy_x550em_xfi, - ixgbe_phy_x550em_ext_t, - ixgbe_phy_ext_1g_t, - ixgbe_phy_cu_unknown, - ixgbe_phy_qt, - ixgbe_phy_xaui, - ixgbe_phy_nl, - ixgbe_phy_sfp_passive_tyco, - ixgbe_phy_sfp_passive_unknown, - ixgbe_phy_sfp_active_unknown, - ixgbe_phy_sfp_avago, - ixgbe_phy_sfp_ftl, - ixgbe_phy_sfp_ftl_active, - ixgbe_phy_sfp_unknown, - ixgbe_phy_sfp_intel, - ixgbe_phy_qsfp_passive_unknown, - ixgbe_phy_qsfp_active_unknown, - ixgbe_phy_qsfp_intel, - ixgbe_phy_qsfp_unknown, - ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ - ixgbe_phy_sgmii, - ixgbe_phy_fw, - ixgbe_phy_generic -}; - -/* - * SFP+ module type IDs: - * - * ID Module Type - * ============= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CU_CORE0 - 82599-specific - * 4 SFP_DA_CU_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific - */ -enum ixgbe_sfp_type { - ixgbe_sfp_type_da_cu = 0, - ixgbe_sfp_type_sr = 1, - ixgbe_sfp_type_lr = 2, - ixgbe_sfp_type_da_cu_core0 = 3, - ixgbe_sfp_type_da_cu_core1 = 4, - ixgbe_sfp_type_srlr_core0 = 5, - ixgbe_sfp_type_srlr_core1 = 6, - ixgbe_sfp_type_da_act_lmt_core0 = 7, - ixgbe_sfp_type_da_act_lmt_core1 = 8, - ixgbe_sfp_type_1g_cu_core0 = 9, - ixgbe_sfp_type_1g_cu_core1 = 10, - ixgbe_sfp_type_1g_sx_core0 = 11, - ixgbe_sfp_type_1g_sx_core1 = 12, - ixgbe_sfp_type_1g_lx_core0 = 13, - ixgbe_sfp_type_1g_lx_core1 = 14, - ixgbe_sfp_type_not_present = 0xFFFE, - ixgbe_sfp_type_unknown = 0xFFFF -}; - -enum ixgbe_media_type { - ixgbe_media_type_unknown = 0, - ixgbe_media_type_fiber, - ixgbe_media_type_fiber_qsfp, - ixgbe_media_type_fiber_lco, - ixgbe_media_type_copper, - ixgbe_media_type_backplane, - ixgbe_media_type_cx4, - ixgbe_media_type_virtual -}; - -/* Flow Control Settings */ -enum ixgbe_fc_mode { - ixgbe_fc_none = 0, - ixgbe_fc_rx_pause, - ixgbe_fc_tx_pause, - ixgbe_fc_full, - ixgbe_fc_default -}; - -/* Smart Speed Settings */ -#define IXGBE_SMARTSPEED_MAX_RETRIES 3 -enum ixgbe_smart_speed { - ixgbe_smart_speed_auto = 0, - ixgbe_smart_speed_on, - ixgbe_smart_speed_off -}; - -/* PCI bus types */ -enum ixgbe_bus_type { - ixgbe_bus_type_unknown = 0, - ixgbe_bus_type_pci, - ixgbe_bus_type_pcix, - ixgbe_bus_type_pci_express, - ixgbe_bus_type_internal, - ixgbe_bus_type_reserved -}; - -/* PCI bus speeds */ -enum ixgbe_bus_speed { - ixgbe_bus_speed_unknown = 0, - ixgbe_bus_speed_33 = 33, - ixgbe_bus_speed_66 = 66, - ixgbe_bus_speed_100 = 100, - ixgbe_bus_speed_120 = 120, - ixgbe_bus_speed_133 = 133, - ixgbe_bus_speed_2500 = 2500, - ixgbe_bus_speed_5000 = 5000, - ixgbe_bus_speed_8000 = 8000, - ixgbe_bus_speed_reserved -}; - -/* PCI bus widths */ -enum ixgbe_bus_width { - ixgbe_bus_width_unknown = 0, - ixgbe_bus_width_pcie_x1 = 1, - ixgbe_bus_width_pcie_x2 = 2, - ixgbe_bus_width_pcie_x4 = 4, - ixgbe_bus_width_pcie_x8 = 8, - ixgbe_bus_width_32 = 32, - ixgbe_bus_width_64 = 64, - ixgbe_bus_width_reserved -}; - -struct ixgbe_addr_filter_info { - u32 num_mc_addrs; - u32 rar_used_count; - u32 mta_in_use; - u32 overflow_promisc; - bool user_set_promisc; -}; - -/* Bus parameters */ -struct ixgbe_bus_info { - enum ixgbe_bus_speed speed; - enum ixgbe_bus_width width; - enum ixgbe_bus_type type; - - u16 func; - u8 lan_id; - u16 instance_id; -}; - -/* Flow control parameters */ -struct ixgbe_fc_info { - u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ - u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ - u16 pause_time; /* Flow Control Pause timer */ - bool send_xon; /* Flow control send XON */ - bool strict_ieee; /* Strict IEEE mode */ - bool disable_fc_autoneg; /* Do not autonegotiate FC */ - bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ - enum ixgbe_fc_mode current_mode; /* FC mode in effect */ - enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ -}; - -/* Statistics counters collected by the MAC */ -struct ixgbe_hw_stats { - u64 crcerrs; - u64 illerrc; - u64 errbc; - u64 mspdc; - u64 mpctotal; - u64 mpc[8]; - u64 mlfc; - u64 mrfc; - u64 rlec; - u64 lxontxc; - u64 lxonrxc; - u64 lxofftxc; - u64 lxoffrxc; - u64 pxontxc[8]; - u64 pxonrxc[8]; - u64 pxofftxc[8]; - u64 pxoffrxc[8]; - u64 prc64; - u64 prc127; - u64 prc255; - u64 prc511; - u64 prc1023; - u64 prc1522; - u64 gprc; - u64 bprc; - u64 mprc; - u64 gptc; - u64 gorc; - u64 gotc; - u64 rnbc[8]; - u64 ruc; - u64 rfc; - u64 roc; - u64 rjc; - u64 mngprc; - u64 mngpdc; - u64 mngptc; - u64 tor; - u64 tpr; - u64 tpt; - u64 ptc64; - u64 ptc127; - u64 ptc255; - u64 ptc511; - u64 ptc1023; - u64 ptc1522; - u64 mptc; - u64 bptc; - u64 xec; - u64 qprc[16]; - u64 qptc[16]; - u64 qbrc[16]; - u64 qbtc[16]; - u64 qprdc[16]; - u64 pxon2offc[8]; - u64 fdirustat_add; - u64 fdirustat_remove; - u64 fdirfstat_fadd; - u64 fdirfstat_fremove; - u64 fdirmatch; - u64 fdirmiss; - u64 fccrc; - u64 fclast; - u64 fcoerpdc; - u64 fcoeprc; - u64 fcoeptc; - u64 fcoedwrc; - u64 fcoedwtc; - u64 fcoe_noddp; - u64 fcoe_noddp_ext_buff; - u64 ldpcec; - u64 pcrc8ec; - u64 b2ospc; - u64 b2ogprc; - u64 o2bgptc; - u64 o2bspc; -}; - -/* forward declaration */ -struct ixgbe_hw; - -/* iterator type for walking multicast address lists */ -typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq); - -/* Function pointer table */ -struct ixgbe_eeprom_operations { - s32 (*init_params)(struct ixgbe_hw *); - s32 (*read)(struct ixgbe_hw *, u16, u16 *); - s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); - s32 (*write)(struct ixgbe_hw *, u16, u16); - s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); - s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); - s32 (*update_checksum)(struct ixgbe_hw *); - s32 (*calc_checksum)(struct ixgbe_hw *); -}; - -struct ixgbe_mac_operations { - s32 (*init_hw)(struct ixgbe_hw *); - s32 (*reset_hw)(struct ixgbe_hw *); - s32 (*start_hw)(struct ixgbe_hw *); - s32 (*clear_hw_cntrs)(struct ixgbe_hw *); - enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); - u64 (*get_supported_physical_layer)(struct ixgbe_hw *); - s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); - s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); - s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *); - s32 (*stop_adapter)(struct ixgbe_hw *); - s32 (*get_bus_info)(struct ixgbe_hw *); - void (*set_lan_id)(struct ixgbe_hw *); - s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); - s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); - s32 (*setup_sfp)(struct ixgbe_hw *); - s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); - s32 (*disable_sec_rx_path)(struct ixgbe_hw *); - s32 (*enable_sec_rx_path)(struct ixgbe_hw *); - s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); - void (*release_swfw_sync)(struct ixgbe_hw *, u32); - void (*init_swfw_sync)(struct ixgbe_hw *); - s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); - s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); - - /* Link */ - void (*disable_tx_laser)(struct ixgbe_hw *); - void (*enable_tx_laser)(struct ixgbe_hw *); - void (*flap_tx_laser)(struct ixgbe_hw *); - s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); - s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); - s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); - s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, - bool *); - void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); - - /* Packet Buffer manipulation */ - void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int); - - /* LED */ - s32 (*led_on)(struct ixgbe_hw *, u32); - s32 (*led_off)(struct ixgbe_hw *, u32); - s32 (*blink_led_start)(struct ixgbe_hw *, u32); - s32 (*blink_led_stop)(struct ixgbe_hw *, u32); - s32 (*init_led_link_act)(struct ixgbe_hw *); - - /* RAR, Multicast, VLAN */ - s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); - s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); - s32 (*clear_rar)(struct ixgbe_hw *, u32); - s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32); - s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); - s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); - s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); - s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, - ixgbe_mc_addr_itr); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, - ixgbe_mc_addr_itr, bool clear); - s32 (*enable_mc)(struct ixgbe_hw *); - s32 (*disable_mc)(struct ixgbe_hw *); - s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); - s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32, - bool); - s32 (*init_uta_tables)(struct ixgbe_hw *); - void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); - void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); - - /* Flow Control */ - s32 (*fc_enable)(struct ixgbe_hw *); - s32 (*setup_fc)(struct ixgbe_hw *); - void (*fc_autoneg)(struct ixgbe_hw *); - - /* Manageability interface */ - s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, - const char *); - s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); - s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); - void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map); - void (*disable_rx)(struct ixgbe_hw *hw); - void (*enable_rx)(struct ixgbe_hw *hw); - void (*set_source_address_pruning)(struct ixgbe_hw *, bool, - unsigned int); - void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); - s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); - s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); - s32 (*dmac_config)(struct ixgbe_hw *hw); - s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee); - s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); - s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); - void (*disable_mdd)(struct ixgbe_hw *hw); - void (*enable_mdd)(struct ixgbe_hw *hw); - void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap); - void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf); -}; - -struct ixgbe_phy_operations { - s32 (*identify)(struct ixgbe_hw *); - s32 (*identify_sfp)(struct ixgbe_hw *); - s32 (*init)(struct ixgbe_hw *); - s32 (*reset)(struct ixgbe_hw *); - s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); - s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); - s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); - s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); - s32 (*setup_link)(struct ixgbe_hw *); - s32 (*setup_internal_link)(struct ixgbe_hw *); - s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); - s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); - s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); - s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); - s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); - s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); - s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); - s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); - void (*i2c_bus_clear)(struct ixgbe_hw *); - s32 (*check_overtemp)(struct ixgbe_hw *); - s32 (*set_phy_power)(struct ixgbe_hw *, bool on); - s32 (*enter_lplu)(struct ixgbe_hw *); - s32 (*handle_lasi)(struct ixgbe_hw *hw); - s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, - u8 *value); - s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, - u8 value); -}; - -struct ixgbe_link_operations { - s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); - s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, - u16 *val); - s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); - s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, - u16 val); -}; - -struct ixgbe_link_info { - struct ixgbe_link_operations ops; - u8 addr; -}; - -struct ixgbe_eeprom_info { - struct ixgbe_eeprom_operations ops; - enum ixgbe_eeprom_type type; - u32 semaphore_delay; - u16 word_size; - u16 address_bits; - u16 word_page_size; - u16 ctrl_word_3; -}; - -#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 -struct ixgbe_mac_info { - struct ixgbe_mac_operations ops; - enum ixgbe_mac_type type; - u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - /* prefix for World Wide Node Name (WWNN) */ - u16 wwnn_prefix; - /* prefix for World Wide Port Name (WWPN) */ - u16 wwpn_prefix; -#define IXGBE_MAX_MTA 128 - u32 mta_shadow[IXGBE_MAX_MTA]; - s32 mc_filter_type; - u32 mcft_size; - u32 vft_size; - u32 num_rar_entries; - u32 rar_highwater; - u32 rx_pb_size; - u32 max_tx_queues; - u32 max_rx_queues; - u32 orig_autoc; - u8 san_mac_rar_index; - bool get_link_status; - u32 orig_autoc2; - u16 max_msix_vectors; - bool arc_subsystem_valid; - bool orig_link_settings_stored; - bool autotry_restart; - u8 flags; - struct ixgbe_thermal_sensor_data thermal_sensor_data; - bool thermal_sensor_enabled; - struct ixgbe_dmac_config dmac_config; - bool set_lben; - u32 max_link_up_time; - u8 led_link_act; -}; - -struct ixgbe_phy_info { - struct ixgbe_phy_operations ops; - enum ixgbe_phy_type type; - u32 addr; - u32 id; - enum ixgbe_sfp_type sfp_type; - bool sfp_setup_needed; - u32 revision; - enum ixgbe_media_type media_type; - u32 phy_semaphore_mask; - bool reset_disable; - ixgbe_autoneg_advertised autoneg_advertised; - ixgbe_link_speed speeds_supported; - ixgbe_link_speed eee_speeds_supported; - ixgbe_link_speed eee_speeds_advertised; - enum ixgbe_smart_speed smart_speed; - bool smart_speed_active; - bool multispeed_fiber; - bool reset_if_overtemp; - bool qsfp_shared_i2c_bus; - u32 nw_mng_if_sel; -}; - -#include "ixgbe_mbx.h" - -struct ixgbe_mbx_operations { - void (*init_params)(struct ixgbe_hw *hw); - s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct ixgbe_hw *, u16); - s32 (*check_for_ack)(struct ixgbe_hw *, u16); - s32 (*check_for_rst)(struct ixgbe_hw *, u16); -}; - -struct ixgbe_mbx_stats { - u32 msgs_tx; - u32 msgs_rx; - - u32 acks; - u32 reqs; - u32 rsts; -}; - -struct ixgbe_mbx_info { - struct ixgbe_mbx_operations ops; - struct ixgbe_mbx_stats stats; - u32 timeout; - u32 usec_delay; - u32 v2p_mailbox; - u16 size; -}; - -struct ixgbe_hw { - u8 IOMEM *hw_addr; - void *back; - struct ixgbe_mac_info mac; - struct ixgbe_addr_filter_info addr_ctrl; - struct ixgbe_fc_info fc; - struct ixgbe_phy_info phy; - struct ixgbe_link_info link; - struct ixgbe_eeprom_info eeprom; - struct ixgbe_bus_info bus; - struct ixgbe_mbx_info mbx; - const u32 *mvals; - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - bool adapter_stopped; - int api_version; - bool force_full_reset; - bool allow_unsupported_sfp; - bool wol_enabled; - bool need_crosstalk_fix; -}; - -#define ixgbe_call_func(hw, func, params, error) \ - (func != NULL) ? func params : error - -/* Error Codes */ -#define IXGBE_SUCCESS 0 -#define IXGBE_ERR_EEPROM -1 -#define IXGBE_ERR_EEPROM_CHECKSUM -2 -#define IXGBE_ERR_PHY -3 -#define IXGBE_ERR_CONFIG -4 -#define IXGBE_ERR_PARAM -5 -#define IXGBE_ERR_MAC_TYPE -6 -#define IXGBE_ERR_UNKNOWN_PHY -7 -#define IXGBE_ERR_LINK_SETUP -8 -#define IXGBE_ERR_ADAPTER_STOPPED -9 -#define IXGBE_ERR_INVALID_MAC_ADDR -10 -#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 -#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 -#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 -#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 -#define IXGBE_ERR_RESET_FAILED -15 -#define IXGBE_ERR_SWFW_SYNC -16 -#define IXGBE_ERR_PHY_ADDR_INVALID -17 -#define IXGBE_ERR_I2C -18 -#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 -#define IXGBE_ERR_SFP_NOT_PRESENT -20 -#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 -#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 -#define IXGBE_ERR_FDIR_REINIT_FAILED -23 -#define IXGBE_ERR_EEPROM_VERSION -24 -#define IXGBE_ERR_NO_SPACE -25 -#define IXGBE_ERR_OVERTEMP -26 -#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 -#define IXGBE_ERR_FC_NOT_SUPPORTED -28 -#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 -#define IXGBE_ERR_PBA_SECTION -31 -#define IXGBE_ERR_INVALID_ARGUMENT -32 -#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 -#define IXGBE_ERR_OUT_OF_MEM -34 -#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36 -#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37 -#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 -#define IXGBE_ERR_FW_RESP_INVALID -39 -#define IXGBE_ERR_TOKEN_RETRY -40 - -#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF - -#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) -#define IXGBE_FUSES0_300MHZ (1 << 5) -#define IXGBE_FUSES0_REV_MASK (3 << 6) - -#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) -#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) -#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) -#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) -#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238) -#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) -#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918) -#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C) -#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) -#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) -#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) -#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) -#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) -#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) -#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) -#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) - -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) -#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31) - -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) - -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) -#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) - -#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) -#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) -#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1) -#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2) -#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2) -#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3) -#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29) -#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0) -#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1) - -#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10) -#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11) - -#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12) -#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19) - -#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) -#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) -#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) - -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) - -#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) - -#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) -#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) - -#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 -#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 - -#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 -#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF -#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 -#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ - (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) -#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 -#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ - (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) -#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 -#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 -#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 -#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) -#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 - -#define IXGBE_NW_MNG_IF_SEL 0x00011178 -#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1) -#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2) -#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20) -#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21) -#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25) -#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */ -#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 -#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ - (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) - -#include "ixgbe_osdep2.h" - -#endif /* _IXGBE_TYPE_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c deleted file mode 100644 index 2a62f443c4cd..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.c +++ /dev/null @@ -1,1048 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_x540.h" -#include "ixgbe_type.h" -#include "ixgbe_api.h" -#include "ixgbe_common.h" -#include "ixgbe_phy.h" - -#define IXGBE_X540_MAX_TX_QUEUES 128 -#define IXGBE_X540_MAX_RX_QUEUES 128 -#define IXGBE_X540_RAR_ENTRIES 128 -#define IXGBE_X540_MC_TBL_SIZE 128 -#define IXGBE_X540_VFT_TBL_SIZE 128 -#define IXGBE_X540_RX_PB_SIZE 384 - -STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); -STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); -STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); - -/** - * ixgbe_init_ops_X540 - Inits func ptrs and MAC type - * @hw: pointer to hardware structure - * - * Initialize the function pointers and assign the MAC type for X540. - * Does not touch the hardware. - **/ -s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - s32 ret_val; - - DEBUGFUNC("ixgbe_init_ops_X540"); - - ret_val = ixgbe_init_phy_ops_generic(hw); - ret_val = ixgbe_init_ops_generic(hw); - - /* EEPROM */ - eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; - eeprom->ops.read = ixgbe_read_eerd_X540; - eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; - eeprom->ops.write = ixgbe_write_eewr_X540; - eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; - eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; - eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; - eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; - - /* PHY */ - phy->ops.init = ixgbe_init_phy_ops_generic; - phy->ops.reset = NULL; - phy->ops.set_phy_power = ixgbe_set_copper_phy_power; - - /* MAC */ - mac->ops.reset_hw = ixgbe_reset_hw_X540; - mac->ops.get_media_type = ixgbe_get_media_type_X540; - mac->ops.get_supported_physical_layer = - ixgbe_get_supported_physical_layer_X540; - mac->ops.read_analog_reg8 = NULL; - mac->ops.write_analog_reg8 = NULL; - mac->ops.start_hw = ixgbe_start_hw_X540; - mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; - mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; - mac->ops.get_device_caps = ixgbe_get_device_caps_generic; - mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; - mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; - mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; - mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; - mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540; - mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; - mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; - - /* RAR, Multicast, VLAN */ - mac->ops.set_vmdq = ixgbe_set_vmdq_generic; - mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; - mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; - mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; - mac->rar_highwater = 1; - mac->ops.set_vfta = ixgbe_set_vfta_generic; - mac->ops.set_vlvf = ixgbe_set_vlvf_generic; - mac->ops.clear_vfta = ixgbe_clear_vfta_generic; - mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; - mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; - mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; - - /* Link */ - mac->ops.get_link_capabilities = - ixgbe_get_copper_link_capabilities_generic; - mac->ops.setup_link = ixgbe_setup_mac_link_X540; - mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; - mac->ops.check_link = ixgbe_check_mac_link_generic; - - mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; - mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; - mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; - mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - /* - * FWSM register - * ARC supported; valid only if manageability features are - * enabled. - */ - mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) - & IXGBE_FWSM_MODE_MASK); - - hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; - - /* LEDs */ - mac->ops.blink_led_start = ixgbe_blink_led_start_X540; - mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; - - /* Manageability interface */ - mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; - - mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; - - return ret_val; -} - -/** - * ixgbe_get_link_capabilities_X540 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: true when autoneg or autotry is enabled - * - * Determines the link capabilities by reading the AUTOC register. - **/ -s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_get_media_type_X540 - Get media type - * @hw: pointer to hardware structure - * - * Returns the media type (fiber, copper, backplane) - **/ -enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) -{ - UNREFERENCED_1PARAMETER(hw); - return ixgbe_media_type_copper; -} - -/** - * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed - **/ -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - DEBUGFUNC("ixgbe_setup_mac_link_X540"); - return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); -} - -/** - * ixgbe_reset_hw_X540 - Perform hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, and perform a reset. - **/ -s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) -{ - s32 status; - u32 ctrl, i; - u32 swfw_mask = hw->phy.phy_semaphore_mask; - - DEBUGFUNC("ixgbe_reset_hw_X540"); - - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); - if (status != IXGBE_SUCCESS) - goto reset_hw_out; - - /* flush pending Tx transactions */ - ixgbe_clear_tx_pending(hw); - -mac_reset_top: - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status != IXGBE_SUCCESS) { - ERROR_REPORT2(IXGBE_ERROR_CAUTION, - "semaphore failed with %d", status); - return IXGBE_ERR_SWFW_SYNC; - } - ctrl = IXGBE_CTRL_RST; - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - - /* Poll for reset bit to self-clear indicating reset is complete */ - for (i = 0; i < 10; i++) { - usec_delay(1); - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST_MASK)) - break; - } - - if (ctrl & IXGBE_CTRL_RST_MASK) { - status = IXGBE_ERR_RESET_FAILED; - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "Reset polling failed to complete.\n"); - } - msec_delay(100); - - /* - * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to allow time - * for any pending HW events to complete. - */ - if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { - hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - goto mac_reset_top; - } - - /* Set the Rx packet buffer size. */ - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* - * Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. - */ - hw->mac.num_rar_entries = 128; - hw->mac.ops.init_rx_addrs(hw); - - /* Store the permanent SAN mac address */ - hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); - - /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { - /* Save the SAN MAC RAR index */ - hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; - - hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - - /* clear VMDq pool/queue selection for this RAR */ - hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, - IXGBE_CLEAR_VMDQ_ALL); - - /* Reserve the last RAR for the SAN MAC address */ - hw->mac.num_rar_entries--; - } - - /* Store the alternative WWNN/WWPN prefix */ - hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, - &hw->mac.wwpn_prefix); - -reset_hw_out: - return status; -} - -/** - * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware using the generic start_hw function - * and the generation start_hw function. - * Then performs revision-specific operations, if any. - **/ -s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_start_hw_X540"); - - ret_val = ixgbe_start_hw_generic(hw); - if (ret_val != IXGBE_SUCCESS) - goto out; - - ret_val = ixgbe_start_hw_gen2(hw); - -out: - return ret_val; -} - -/** - * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current configuration. - **/ -u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) -{ - u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - u16 ext_ability = 0; - - DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); - - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); - if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; - if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; - - return physical_layer; -} - -/** - * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params - * @hw: pointer to hardware structure - * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. - **/ -s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - u32 eec; - u16 eeprom_size; - - DEBUGFUNC("ixgbe_init_eeprom_params_X540"); - - if (eeprom->type == ixgbe_eeprom_uninitialized) { - eeprom->semaphore_delay = 10; - eeprom->type = ixgbe_flash; - - eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); - - DEBUGOUT2("Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_read_eerd_X540- Read EEPROM word using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the EERD register. - **/ -s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) -{ - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_read_eerd_X540"); - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - IXGBE_SUCCESS) { - status = ixgbe_read_eerd_generic(hw, offset, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; - } - - return status; -} - -/** - * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM - * - * Reads a 16 bit word(s) from the EEPROM using the EERD register. - **/ -s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) -{ - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - IXGBE_SUCCESS) { - status = ixgbe_read_eerd_buffer_generic(hw, offset, - words, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; - } - - return status; -} - -/** - * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the EEWR register. - **/ -s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_write_eewr_X540"); - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - IXGBE_SUCCESS) { - status = ixgbe_write_eewr_generic(hw, offset, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; - } - - return status; -} - -/** - * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of words - * @data: word(s) write to the EEPROM - * - * Write a 16 bit word(s) to the EEPROM using the EEWR register. - **/ -s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) -{ - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - IXGBE_SUCCESS) { - status = ixgbe_write_eewr_buffer_generic(hw, offset, - words, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; - } - - return status; -} - -/** - * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum - * - * This function does not use synchronization for EERD and EEWR. It can - * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. - * - * @hw: pointer to hardware structure - * - * Returns a negative error code on error, or the 16-bit checksum - **/ -s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) -{ - u16 i, j; - u16 checksum = 0; - u16 length = 0; - u16 pointer = 0; - u16 word = 0; - u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; - - /* Do not use hw->eeprom.ops.read because we do not want to take - * the synchronization semaphores here. Instead use - * ixgbe_read_eerd_generic - */ - - DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); - - /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the - * checksum itself - */ - for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (ixgbe_read_eerd_generic(hw, i, &word)) { - DEBUGOUT("EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; - } - checksum += word; - } - - /* Include all data from pointers 0x3, 0x6-0xE. This excludes the - * FW, PHY module, and PCIe Expansion/Option ROM pointers. - */ - for (i = ptr_start; i < IXGBE_FW_PTR; i++) { - if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) - continue; - - if (ixgbe_read_eerd_generic(hw, i, &pointer)) { - DEBUGOUT("EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; - } - - /* Skip pointer section if the pointer is invalid. */ - if (pointer == 0xFFFF || pointer == 0 || - pointer >= hw->eeprom.word_size) - continue; - - if (ixgbe_read_eerd_generic(hw, pointer, &length)) { - DEBUGOUT("EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; - } - - /* Skip pointer section if length is invalid. */ - if (length == 0xFFFF || length == 0 || - (pointer + length) >= hw->eeprom.word_size) - continue; - - for (j = pointer + 1; j <= pointer + length; j++) { - if (ixgbe_read_eerd_generic(hw, j, &word)) { - DEBUGOUT("EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; - } - checksum += word; - } - } - - checksum = (u16)IXGBE_EEPROM_SUM - checksum; - - return (s32)checksum; -} - -/** - * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. - **/ -s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, - u16 *checksum_val) -{ - s32 status; - u16 checksum; - u16 read_checksum = 0; - - DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - if (status) { - DEBUGOUT("EEPROM read failed\n"); - return status; - } - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; - - status = hw->eeprom.ops.calc_checksum(hw); - if (status < 0) - goto out; - - checksum = (u16)(status & 0xffff); - - /* Do not use hw->eeprom.ops.read because we do not want to take - * the synchronization semaphores twice here. - */ - status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, - &read_checksum); - if (status) - goto out; - - /* Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) { - ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, - "Invalid EEPROM checksum"); - status = IXGBE_ERR_EEPROM_CHECKSUM; - } - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - -out: - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - - return status; -} - -/** - * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash - * @hw: pointer to hardware structure - * - * After writing EEPROM to shadow RAM using EEWR register, software calculates - * checksum and updates the EEPROM and instructs the hardware to update - * the flash. - **/ -s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) -{ - s32 status; - u16 checksum; - - DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - if (status) { - DEBUGOUT("EEPROM read failed\n"); - return status; - } - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; - - status = hw->eeprom.ops.calc_checksum(hw); - if (status < 0) - goto out; - - checksum = (u16)(status & 0xffff); - - /* Do not use hw->eeprom.ops.write because we do not want to - * take the synchronization semaphores twice here. - */ - status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); - if (status) - goto out; - - status = ixgbe_update_flash_X540(hw); - -out: - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - - return status; -} - -/** - * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device - * @hw: pointer to hardware structure - * - * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy - * EEPROM from shadow RAM to the flash device. - **/ -s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) -{ - u32 flup; - s32 status; - - DEBUGFUNC("ixgbe_update_flash_X540"); - - status = ixgbe_poll_flash_update_done_X540(hw); - if (status == IXGBE_ERR_EEPROM) { - DEBUGOUT("Flash update time out\n"); - goto out; - } - - flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); - - status = ixgbe_poll_flash_update_done_X540(hw); - if (status == IXGBE_SUCCESS) - DEBUGOUT("Flash update complete\n"); - else - DEBUGOUT("Flash update time out\n"); - - if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) { - flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - - if (flup & IXGBE_EEC_SEC1VAL) { - flup |= IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); - } - - status = ixgbe_poll_flash_update_done_X540(hw); - if (status == IXGBE_SUCCESS) - DEBUGOUT("Flash update complete\n"); - else - DEBUGOUT("Flash update time out\n"); - } -out: - return status; -} - -/** - * ixgbe_poll_flash_update_done_X540 - Poll flash update status - * @hw: pointer to hardware structure - * - * Polls the FLUDONE (bit 26) of the EEC Register to determine when the - * flash update is done. - **/ -STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) -{ - u32 i; - u32 reg; - s32 status = IXGBE_ERR_EEPROM; - - DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); - - for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { - reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); - if (reg & IXGBE_EEC_FLUDONE) { - status = IXGBE_SUCCESS; - break; - } - msec_delay(5); - } - - if (i == IXGBE_FLUDONE_ATTEMPTS) - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "Flash update status polling timed out"); - - return status; -} - -/** - * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire - * - * Acquires the SWFW semaphore thought the SW_FW_SYNC register for - * the specified function (CSR, PHY0, PHY1, NVM, Flash) - **/ -s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) -{ - u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; - u32 fwmask = swmask << 5; - u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; - u32 timeout = 200; - u32 hwmask = 0; - u32 swfw_sync; - u32 i; - - DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); - - if (swmask & IXGBE_GSSR_EEP_SM) - hwmask |= IXGBE_GSSR_FLASH_SM; - - /* SW only mask doesn't have FW bit pair */ - if (mask & IXGBE_GSSR_SW_MNG_SM) - swmask |= IXGBE_GSSR_SW_MNG_SM; - - swmask |= swi2c_mask; - fwmask |= swi2c_mask << 2; - for (i = 0; i < timeout; i++) { - /* SW NVM semaphore bit is used for access to all - * SW_FW_SYNC bits (not just NVM) - */ - if (ixgbe_get_swfw_sync_semaphore(hw)) { - DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n"); - return IXGBE_ERR_SWFW_SYNC; - } - - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); - if (!(swfw_sync & (fwmask | swmask | hwmask))) { - swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), - swfw_sync); - ixgbe_release_swfw_sync_semaphore(hw); - return IXGBE_SUCCESS; - } - /* Firmware currently using resource (fwmask), hardware - * currently using resource (hwmask), or other software - * thread currently using resource (swmask) - */ - ixgbe_release_swfw_sync_semaphore(hw); - msec_delay(5); - } - - /* If the resource is not released by the FW/HW the SW can assume that - * the FW/HW malfunctions. In that case the SW should set the SW bit(s) - * of the requested resource(s) while ignoring the corresponding FW/HW - * bits in the SW_FW_SYNC register. - */ - if (ixgbe_get_swfw_sync_semaphore(hw)) { - DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); - return IXGBE_ERR_SWFW_SYNC; - } - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); - if (swfw_sync & (fwmask | hwmask)) { - swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); - ixgbe_release_swfw_sync_semaphore(hw); - msec_delay(5); - return IXGBE_SUCCESS; - } - /* If the resource is not released by other SW the SW can assume that - * the other SW malfunctions. In that case the SW should clear all SW - * flags that it does not own and then repeat the whole process once - * again. - */ - if (swfw_sync & swmask) { - u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | - IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | - IXGBE_GSSR_SW_MNG_SM; - - if (swi2c_mask) - rmask |= IXGBE_GSSR_I2C_MASK; - ixgbe_release_swfw_sync_X540(hw, rmask); - ixgbe_release_swfw_sync_semaphore(hw); - DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n"); - return IXGBE_ERR_SWFW_SYNC; - } - ixgbe_release_swfw_sync_semaphore(hw); - DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n"); - - return IXGBE_ERR_SWFW_SYNC; -} - -/** - * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release - * - * Releases the SWFW semaphore through the SW_FW_SYNC register - * for the specified function (CSR, PHY0, PHY1, EVM, Flash) - **/ -void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) -{ - u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); - u32 swfw_sync; - - DEBUGFUNC("ixgbe_release_swfw_sync_X540"); - - if (mask & IXGBE_GSSR_I2C_MASK) - swmask |= mask & IXGBE_GSSR_I2C_MASK; - ixgbe_get_swfw_sync_semaphore(hw); - - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); - swfw_sync &= ~swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); - - ixgbe_release_swfw_sync_semaphore(hw); - msec_delay(2); -} - -/** - * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore - * @hw: pointer to hardware structure - * - * Sets the hardware semaphores so SW/FW can gain control of shared resources - **/ -STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_EEPROM; - u32 timeout = 2000; - u32 i; - u32 swsm; - - DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); - - /* Get SMBI software semaphore between device drivers first */ - for (i = 0; i < timeout; i++) { - /* - * If the SMBI bit is 0 when we read it, then the bit will be - * set and we have the semaphore - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); - if (!(swsm & IXGBE_SWSM_SMBI)) { - status = IXGBE_SUCCESS; - break; - } - usec_delay(50); - } - - /* Now get the semaphore between SW/FW through the REGSMP bit */ - if (status == IXGBE_SUCCESS) { - for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); - if (!(swsm & IXGBE_SWFW_REGSMP)) - break; - - usec_delay(50); - } - - /* - * Release semaphores and return error if SW NVM semaphore - * was not granted because we don't have access to the EEPROM - */ - if (i >= timeout) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "REGSMP Software NVM semaphore not granted.\n"); - ixgbe_release_swfw_sync_semaphore(hw); - status = IXGBE_ERR_EEPROM; - } - } else { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "Software semaphore SMBI between device drivers " - "not granted.\n"); - } - - return status; -} - -/** - * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore - * @hw: pointer to hardware structure - * - * This function clears hardware semaphore bits. - **/ -STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) -{ - u32 swsm; - - DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); - - /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ - - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); - swsm &= ~IXGBE_SWFW_REGSMP; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm); - - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); - swsm &= ~IXGBE_SWSM_SMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); - - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_init_swfw_sync_X540 - Release hardware semaphore - * @hw: pointer to hardware structure - * - * This function reset hardware semaphore bits for a semaphore that may - * have be left locked due to a catastrophic failure. - **/ -void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) -{ - u32 rmask; - - /* First try to grab the semaphore but we don't need to bother - * looking to see whether we got the lock or not since we do - * the same thing regardless of whether we got the lock or not. - * We got the lock - we release it. - * We timeout trying to get the lock - we force its release. - */ - ixgbe_get_swfw_sync_semaphore(hw); - ixgbe_release_swfw_sync_semaphore(hw); - - /* Acquire and release all software resources. */ - rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | - IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | - IXGBE_GSSR_SW_MNG_SM; - - rmask |= IXGBE_GSSR_I2C_MASK; - ixgbe_acquire_swfw_sync_X540(hw, rmask); - ixgbe_release_swfw_sync_X540(hw, rmask); -} - -/** - * ixgbe_blink_led_start_X540 - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - * - * Devices that implement the version 2 interface: - * X540 - **/ -s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) -{ - u32 macc_reg; - u32 ledctl_reg; - ixgbe_link_speed speed; - bool link_up; - - DEBUGFUNC("ixgbe_blink_led_start_X540"); - - if (index > 3) - return IXGBE_ERR_PARAM; - - /* - * Link should be up in order for the blink bit in the LED control - * register to work. Force link and speed in the MAC if link is down. - * This will be reversed when we stop the blinking. - */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (link_up == false) { - macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); - macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; - IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); - } - /* Set the LED to LINK_UP + BLINK. */ - ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); - ledctl_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); - IXGBE_WRITE_FLUSH(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - * - * Devices that implement the version 2 interface: - * X540 - **/ -s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) -{ - u32 macc_reg; - u32 ledctl_reg; - - if (index > 3) - return IXGBE_ERR_PARAM; - - DEBUGFUNC("ixgbe_blink_led_stop_X540"); - - /* Restore the LED to its default value. */ - ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); - ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); - ledctl_reg &= ~IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); - - /* Unforce link and speed in the MAC. */ - macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); - macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); - IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); - IXGBE_WRITE_FLUSH(hw); - - return IXGBE_SUCCESS; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h deleted file mode 100644 index 4cace8523980..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x540.h +++ /dev/null @@ -1,58 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_X540_H_ -#define _IXGBE_X540_H_ - -#include "ixgbe_type.h" - -s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, bool *autoneg); -enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool link_up_wait_to_complete); -s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); -u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw); - -s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); -s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data); -s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, - u16 *data); -s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data); -s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, - u16 *data); -s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw); -s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val); -s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); -s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); - -s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); -void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); -void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); - -s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); -#endif /* _IXGBE_X540_H_ */ - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c deleted file mode 100644 index 2cdaa276f8e3..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.c +++ /dev/null @@ -1,4711 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe_x550.h" -#include "ixgbe_x540.h" -#include "ixgbe_type.h" -#include "ixgbe_api.h" -#include "ixgbe_common.h" -#include "ixgbe_phy.h" - -STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); -STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); -STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); -STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw); - -/** - * ixgbe_init_ops_X550 - Inits func ptrs and MAC type - * @hw: pointer to hardware structure - * - * Initialize the function pointers and assign the MAC type for X550. - * Does not touch the hardware. - **/ -s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - s32 ret_val; - - DEBUGFUNC("ixgbe_init_ops_X550"); - - ret_val = ixgbe_init_ops_X540(hw); - mac->ops.dmac_config = ixgbe_dmac_config_X550; - mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550; - mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550; - mac->ops.setup_eee = NULL; - mac->ops.set_source_address_pruning = - ixgbe_set_source_address_pruning_X550; - mac->ops.set_ethertype_anti_spoofing = - ixgbe_set_ethertype_anti_spoofing_X550; - - mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; - eeprom->ops.init_params = ixgbe_init_eeprom_params_X550; - eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; - eeprom->ops.read = ixgbe_read_ee_hostif_X550; - eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; - eeprom->ops.write = ixgbe_write_ee_hostif_X550; - eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; - eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; - eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; - - mac->ops.disable_mdd = ixgbe_disable_mdd_X550; - mac->ops.enable_mdd = ixgbe_enable_mdd_X550; - mac->ops.mdd_event = ixgbe_mdd_event_X550; - mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550; - mac->ops.disable_rx = ixgbe_disable_rx_x550; - /* Manageability interface */ - mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550; - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_X_1G_T: - hw->mac.ops.led_on = NULL; - hw->mac.ops.led_off = NULL; - break; - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: - hw->mac.ops.led_on = ixgbe_led_on_t_X550em; - hw->mac.ops.led_off = ixgbe_led_off_t_X550em; - break; - default: - break; - } - return ret_val; -} - -/** - * ixgbe_read_cs4227 - Read CS4227 register - * @hw: pointer to hardware structure - * @reg: register number to write - * @value: pointer to receive value read - * - * Returns status code - **/ -STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) -{ - return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); -} - -/** - * ixgbe_write_cs4227 - Write CS4227 register - * @hw: pointer to hardware structure - * @reg: register number to write - * @value: value to write to register - * - * Returns status code - **/ -STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) -{ - return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); -} - -/** - * ixgbe_read_pe - Read register from port expander - * @hw: pointer to hardware structure - * @reg: register number to read - * @value: pointer to receive read value - * - * Returns status code - **/ -STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) -{ - s32 status; - - status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); - if (status != IXGBE_SUCCESS) - ERROR_REPORT2(IXGBE_ERROR_CAUTION, - "port expander access failed with %d\n", status); - return status; -} - -/** - * ixgbe_write_pe - Write register to port expander - * @hw: pointer to hardware structure - * @reg: register number to write - * @value: value to write - * - * Returns status code - **/ -STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) -{ - s32 status; - - status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); - if (status != IXGBE_SUCCESS) - ERROR_REPORT2(IXGBE_ERROR_CAUTION, - "port expander access failed with %d\n", status); - return status; -} - -/** - * ixgbe_reset_cs4227 - Reset CS4227 using port expander - * @hw: pointer to hardware structure - * - * This function assumes that the caller has acquired the proper semaphore. - * Returns error code - **/ -STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) -{ - s32 status; - u32 retry; - u16 value; - u8 reg; - - /* Trigger hard reset. */ - status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status != IXGBE_SUCCESS) - return status; - reg |= IXGBE_PE_BIT1; - status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status != IXGBE_SUCCESS) - return status; - - status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); - if (status != IXGBE_SUCCESS) - return status; - reg &= ~IXGBE_PE_BIT1; - status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); - if (status != IXGBE_SUCCESS) - return status; - - status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status != IXGBE_SUCCESS) - return status; - reg &= ~IXGBE_PE_BIT1; - status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status != IXGBE_SUCCESS) - return status; - - usec_delay(IXGBE_CS4227_RESET_HOLD); - - status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); - if (status != IXGBE_SUCCESS) - return status; - reg |= IXGBE_PE_BIT1; - status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); - if (status != IXGBE_SUCCESS) - return status; - - /* Wait for the reset to complete. */ - msec_delay(IXGBE_CS4227_RESET_DELAY); - for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, - &value); - if (status == IXGBE_SUCCESS && - value == IXGBE_CS4227_EEPROM_LOAD_OK) - break; - msec_delay(IXGBE_CS4227_CHECK_DELAY); - } - if (retry == IXGBE_CS4227_RETRIES) { - ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, - "CS4227 reset did not complete."); - return IXGBE_ERR_PHY; - } - - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); - if (status != IXGBE_SUCCESS || - !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { - ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, - "CS4227 EEPROM did not load successfully."); - return IXGBE_ERR_PHY; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_check_cs4227 - Check CS4227 and reset as needed - * @hw: pointer to hardware structure - **/ -STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u32 swfw_mask = hw->phy.phy_semaphore_mask; - u16 value = 0; - u8 retry; - - for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status != IXGBE_SUCCESS) { - ERROR_REPORT2(IXGBE_ERROR_CAUTION, - "semaphore failed with %d", status); - msec_delay(IXGBE_CS4227_CHECK_DELAY); - continue; - } - - /* Get status of reset flow. */ - status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); - - if (status == IXGBE_SUCCESS && - value == IXGBE_CS4227_RESET_COMPLETE) - goto out; - - if (status != IXGBE_SUCCESS || - value != IXGBE_CS4227_RESET_PENDING) - break; - - /* Reset is pending. Wait and check again. */ - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msec_delay(IXGBE_CS4227_CHECK_DELAY); - } - - /* If still pending, assume other instance failed. */ - if (retry == IXGBE_CS4227_RETRIES) { - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status != IXGBE_SUCCESS) { - ERROR_REPORT2(IXGBE_ERROR_CAUTION, - "semaphore failed with %d", status); - return; - } - } - - /* Reset the CS4227. */ - status = ixgbe_reset_cs4227(hw); - if (status != IXGBE_SUCCESS) { - ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, - "CS4227 reset failed: %d", status); - goto out; - } - - /* Reset takes so long, temporarily release semaphore in case the - * other driver instance is waiting for the reset indication. - */ - ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, - IXGBE_CS4227_RESET_PENDING); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msec_delay(10); - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status != IXGBE_SUCCESS) { - ERROR_REPORT2(IXGBE_ERROR_CAUTION, - "semaphore failed with %d", status); - return; - } - - /* Record completion for next time. */ - status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, - IXGBE_CS4227_RESET_COMPLETE); - -out: - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msec_delay(hw->eeprom.semaphore_delay); -} - -/** - * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control - * @hw: pointer to hardware structure - **/ -STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) -{ - u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - - if (hw->bus.lan_id) { - esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); - esdp |= IXGBE_ESDP_SDP1_DIR; - } - esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @dev_type: always unused - * @phy_data: Pointer to read data from PHY register - */ -STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr, - u32 dev_type, u16 *phy_data) -{ - u32 i, data, command; - UNREFERENCED_1PARAMETER(dev_type); - - /* Setup and write the read command */ - command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | - IXGBE_MSCA_MDI_COMMAND; - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* Check every 10 usec to see if the access completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - usec_delay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if (!(command & IXGBE_MSCA_MDI_COMMAND)) - break; - } - - if (command & IXGBE_MSCA_MDI_COMMAND) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "PHY read command did not complete.\n"); - return IXGBE_ERR_PHY; - } - - /* Read operation is complete. Get the data from MSRWD */ - data = IXGBE_READ_REG(hw, IXGBE_MSRWD); - data >>= IXGBE_MSRWD_READ_DATA_SHIFT; - *phy_data = (u16)data; - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @dev_type: always unused - * @phy_data: Data to write to the PHY register - */ -STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr, - u32 dev_type, u16 phy_data) -{ - u32 i, command; - UNREFERENCED_1PARAMETER(dev_type); - - /* Put the data in the MDI single read and write data register*/ - IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); - - /* Setup and write the write command */ - command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | - IXGBE_MSCA_MDI_COMMAND; - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* Check every 10 usec to see if the access completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - usec_delay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if (!(command & IXGBE_MSCA_MDI_COMMAND)) - break; - } - - if (command & IXGBE_MSCA_MDI_COMMAND) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "PHY write cmd didn't complete\n"); - return IXGBE_ERR_PHY; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_identify_phy_x550em - Get PHY type based on device id - * @hw: pointer to hardware structure - * - * Returns error code - */ -STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) -{ - hw->mac.ops.set_lan_id(hw); - - ixgbe_read_mng_if_sel_x550em(hw); - - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_SFP: - return ixgbe_identify_module_generic(hw); - case IXGBE_DEV_ID_X550EM_X_SFP: - /* set up for CS4227 usage */ - ixgbe_setup_mux_ctl(hw); - ixgbe_check_cs4227(hw); - /* Fallthrough */ - - case IXGBE_DEV_ID_X550EM_A_SFP_N: - return ixgbe_identify_module_generic(hw); - break; - case IXGBE_DEV_ID_X550EM_X_KX4: - hw->phy.type = ixgbe_phy_x550em_kx4; - break; - case IXGBE_DEV_ID_X550EM_X_XFI: - hw->phy.type = ixgbe_phy_x550em_xfi; - break; - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - hw->phy.type = ixgbe_phy_x550em_kr; - break; - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_X_10G_T: - return ixgbe_identify_phy_generic(hw); - case IXGBE_DEV_ID_X550EM_X_1G_T: - hw->phy.type = ixgbe_phy_ext_1g_t; - hw->phy.ops.read_reg = NULL; - hw->phy.ops.write_reg = NULL; - break; - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - hw->phy.type = ixgbe_phy_fw; - hw->phy.ops.read_reg = NULL; - hw->phy.ops.write_reg = NULL; - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; - break; - default: - break; - } - return IXGBE_SUCCESS; -} - -/** - * ixgbe_fw_phy_activity - Perform an activity on a PHY - * @hw: pointer to hardware structure - * @activity: activity to perform - * @data: Pointer to 4 32-bit words of data - */ -s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, - u32 (*data)[FW_PHY_ACT_DATA_COUNT]) -{ - union { - struct ixgbe_hic_phy_activity_req cmd; - struct ixgbe_hic_phy_activity_resp rsp; - } hic; - u16 retries = FW_PHY_ACT_RETRIES; - s32 rc; - u16 i; - - do { - memset(&hic, 0, sizeof(hic)); - hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; - hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; - hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - hic.cmd.port_number = hw->bus.lan_id; - hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity); - for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) - hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]); - - rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, - sizeof(hic.cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (rc != IXGBE_SUCCESS) - return rc; - if (hic.rsp.hdr.cmd_or_resp.ret_status == - FW_CEM_RESP_STATUS_SUCCESS) { - for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) - (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]); - return IXGBE_SUCCESS; - } - usec_delay(20); - --retries; - } while (retries > 0); - - return IXGBE_ERR_HOST_INTERFACE_COMMAND; -} - -static const struct { - u16 fw_speed; - ixgbe_link_speed phy_speed; -} ixgbe_fw_map[] = { - { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, - { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, - { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, - { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, - { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, - { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, -}; - -/** - * ixgbe_get_phy_id_fw - Get the phy ID via firmware command - * @hw: pointer to hardware structure - * - * Returns error code - */ -static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) -{ - u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; - u16 phy_speeds; - u16 phy_id_lo; - s32 rc; - u16 i; - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); - if (rc) - return rc; - - hw->phy.speeds_supported = 0; - phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; - for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { - if (phy_speeds & ixgbe_fw_map[i].fw_speed) - hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; - } - if (!hw->phy.autoneg_advertised) - hw->phy.autoneg_advertised = hw->phy.speeds_supported; - - hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; - phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; - hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; - hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; - if (hw->phy.id == IXGBE_PHY_REVISION_MASK) - return IXGBE_ERR_PHY_ADDR_INVALID; - return IXGBE_SUCCESS; -} - -/** - * ixgbe_identify_phy_fw - Get PHY type based on firmware command - * @hw: pointer to hardware structure - * - * Returns error code - */ -static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) -{ - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - - hw->phy.type = ixgbe_phy_fw; - /*hw->phy.ops.read_reg = NULL; - hw->phy.ops.write_reg = NULL;*/ - return ixgbe_get_phy_id_fw(hw); -} - -/** - * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY - * @hw: pointer to hardware structure - * - * Returns error code - */ -s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) -{ - u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; - - setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; - return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); -} - -STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) -{ - UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data); - return IXGBE_NOT_IMPLEMENTED; -} - -STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) -{ - UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data); - return IXGBE_NOT_IMPLEMENTED; -} - -/** - * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * - * Returns an error code on error. - **/ -STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val) -{ - return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); -} - -/** - * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to read from - * @reg: I2C device register to read from - * @val: pointer to location to receive read value - * - * Returns an error code on error. - **/ -STATIC s32 -ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, - u16 reg, u16 *val) -{ - return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); -} - -/** - * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * - * Returns an error code on error. - **/ -STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, - u8 addr, u16 reg, u16 val) -{ - return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); -} - -/** - * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation - * @hw: pointer to the hardware structure - * @addr: I2C bus address to write to - * @reg: I2C device register to write to - * @val: value to write - * - * Returns an error code on error. - **/ -STATIC s32 -ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, - u8 addr, u16 reg, u16 val) -{ - return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); -} - -/** -* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type -* @hw: pointer to hardware structure -* -* Initialize the function pointers and for MAC type X550EM. -* Does not touch the hardware. -**/ -s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; - - DEBUGFUNC("ixgbe_init_ops_X550EM"); - - /* Similar to X550 so start there. */ - ret_val = ixgbe_init_ops_X550(hw); - - /* Since this function eventually calls - * ixgbe_init_ops_540 by design, we are setting - * the pointers to NULL explicitly here to overwrite - * the values being set in the x540 function. - */ - /* Thermal sensor not supported in x550EM */ - mac->ops.get_thermal_sensor_data = NULL; - mac->ops.init_thermal_sensor_thresh = NULL; - mac->thermal_sensor_enabled = false; - - /* FCOE not supported in x550EM */ - mac->ops.get_san_mac_addr = NULL; - mac->ops.set_san_mac_addr = NULL; - mac->ops.get_wwn_prefix = NULL; - mac->ops.get_fcoe_boot_status = NULL; - - /* IPsec not supported in x550EM */ - mac->ops.disable_sec_rx_path = NULL; - mac->ops.enable_sec_rx_path = NULL; - - /* AUTOC register is not present in x550EM. */ - mac->ops.prot_autoc_read = NULL; - mac->ops.prot_autoc_write = NULL; - - /* X550EM bus type is internal*/ - hw->bus.type = ixgbe_bus_type_internal; - mac->ops.get_bus_info = ixgbe_get_bus_info_X550em; - - - mac->ops.get_media_type = ixgbe_get_media_type_X550em; - mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em; - mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em; - mac->ops.reset_hw = ixgbe_reset_hw_X550em; - mac->ops.get_supported_physical_layer = - ixgbe_get_supported_physical_layer_X550em; - - if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) - mac->ops.setup_fc = ixgbe_setup_fc_generic; - else - mac->ops.setup_fc = ixgbe_setup_fc_X550em; - - /* PHY */ - phy->ops.init = ixgbe_init_phy_ops_X550em; - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - mac->ops.setup_fc = NULL; - phy->ops.identify = ixgbe_identify_phy_fw; - phy->ops.set_phy_power = NULL; - phy->ops.get_firmware_version = NULL; - break; - case IXGBE_DEV_ID_X550EM_X_1G_T: - mac->ops.setup_fc = NULL; - phy->ops.identify = ixgbe_identify_phy_x550em; - phy->ops.set_phy_power = NULL; - break; - default: - phy->ops.identify = ixgbe_identify_phy_x550em; - } - - if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) - phy->ops.set_phy_power = NULL; - - /* EEPROM */ - eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; - eeprom->ops.read = ixgbe_read_ee_hostif_X550; - eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; - eeprom->ops.write = ixgbe_write_ee_hostif_X550; - eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; - eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; - eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; - eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; - - return ret_val; -} - -/** - * ixgbe_setup_fw_link - Setup firmware-controlled PHYs - * @hw: pointer to hardware structure - */ -static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) -{ - u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; - u16 i; - - if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) - return 0; - - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, - "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; - } - - switch (hw->fc.requested_mode) { - case ixgbe_fc_full: - setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << - FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; - break; - case ixgbe_fc_rx_pause: - setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << - FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; - break; - case ixgbe_fc_tx_pause: - setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << - FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; - break; - default: - break; - } - - for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { - if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) - setup[0] |= ixgbe_fw_map[i].fw_speed; - } - setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; - - if (hw->phy.eee_speeds_advertised) - setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); - if (rc) - return rc; - if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) - return IXGBE_ERR_OVERTEMP; - return IXGBE_SUCCESS; -} - -/** - * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs - * @hw: pointer to hardware structure - * - * Called at init time to set up flow control. - */ -static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) -{ - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; - - return ixgbe_setup_fw_link(hw); -} - -/** - * ixgbe_setup_eee_fw - Enable/disable EEE support - * @hw: pointer to the HW structure - * @enable_eee: boolean flag to enable EEE - * - * Enable/disable EEE based on enable_eee flag. - * This function controls EEE for firmware-based PHY implementations. - */ -static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee) -{ - if (!!hw->phy.eee_speeds_advertised == enable_eee) - return IXGBE_SUCCESS; - if (enable_eee) - hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; - else - hw->phy.eee_speeds_advertised = 0; - return hw->phy.ops.setup_link(hw); -} - -/** -* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type -* @hw: pointer to hardware structure -* -* Initialize the function pointers and for MAC type X550EM_a. -* Does not touch the hardware. -**/ -s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - s32 ret_val; - - DEBUGFUNC("ixgbe_init_ops_X550EM_a"); - - /* Start with generic X550EM init */ - ret_val = ixgbe_init_ops_X550EM(hw); - - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || - hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) { - mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; - mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; - } else { - mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a; - mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a; - } - mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a; - mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a; - - switch (mac->ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - mac->ops.setup_fc = NULL; - mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; - break; - case ixgbe_media_type_backplane: - mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; - mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; - break; - default: - break; - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; - mac->ops.setup_fc = ixgbe_fc_autoneg_fw; - mac->ops.setup_eee = ixgbe_setup_eee_fw; - hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; - break; - default: - break; - } - - return ret_val; -} - -/** -* ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type -* @hw: pointer to hardware structure -* -* Initialize the function pointers and for MAC type X550EM_x. -* Does not touch the hardware. -**/ -s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_link_info *link = &hw->link; - s32 ret_val; - - DEBUGFUNC("ixgbe_init_ops_X550EM_x"); - - /* Start with generic X550EM init */ - ret_val = ixgbe_init_ops_X550EM(hw); - - mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; - mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; - mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em; - mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em; - link->ops.read_link = ixgbe_read_i2c_combined_generic; - link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked; - link->ops.write_link = ixgbe_write_i2c_combined_generic; - link->ops.write_link_unlocked = - ixgbe_write_i2c_combined_generic_unlocked; - link->addr = IXGBE_CS4227; - - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) { - mac->ops.setup_fc = NULL; - mac->ops.setup_eee = NULL; - mac->ops.init_led_link_act = NULL; - } - - return ret_val; -} - -/** - * ixgbe_dmac_config_X550 - * @hw: pointer to hardware structure - * - * Configure DMA coalescing. If enabling dmac, dmac is activated. - * When disabling dmac, dmac enable dmac bit is cleared. - **/ -s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw) -{ - u32 reg, high_pri_tc; - - DEBUGFUNC("ixgbe_dmac_config_X550"); - - /* Disable DMA coalescing before configuring */ - reg = IXGBE_READ_REG(hw, IXGBE_DMACR); - reg &= ~IXGBE_DMACR_DMAC_EN; - IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); - - /* Disable DMA Coalescing if the watchdog timer is 0 */ - if (!hw->mac.dmac_config.watchdog_timer) - goto out; - - ixgbe_dmac_config_tcs_X550(hw); - - /* Configure DMA Coalescing Control Register */ - reg = IXGBE_READ_REG(hw, IXGBE_DMACR); - - /* Set the watchdog timer in units of 40.96 usec */ - reg &= ~IXGBE_DMACR_DMACWT_MASK; - reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096; - - reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK; - /* If fcoe is enabled, set high priority traffic class */ - if (hw->mac.dmac_config.fcoe_en) { - high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc; - reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) & - IXGBE_DMACR_HIGH_PRI_TC_MASK); - } - reg |= IXGBE_DMACR_EN_MNG_IND; - - /* Enable DMA coalescing after configuration */ - reg |= IXGBE_DMACR_DMAC_EN; - IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); - -out: - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dmac_config_tcs_X550 - * @hw: pointer to hardware structure - * - * Configure DMA coalescing threshold per TC. The dmac enable bit must - * be cleared before configuring. - **/ -s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw) -{ - u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb; - - DEBUGFUNC("ixgbe_dmac_config_tcs_X550"); - - /* Configure DMA coalescing enabled */ - switch (hw->mac.dmac_config.link_speed) { - case IXGBE_LINK_SPEED_10_FULL: - case IXGBE_LINK_SPEED_100_FULL: - pb_headroom = IXGBE_DMACRXT_100M; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - pb_headroom = IXGBE_DMACRXT_1G; - break; - default: - pb_headroom = IXGBE_DMACRXT_10G; - break; - } - - maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >> - IXGBE_MHADD_MFS_SHIFT) / 1024); - - /* Set the per Rx packet buffer receive threshold */ - for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { - reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc)); - reg &= ~IXGBE_DMCTH_DMACRXT_MASK; - - if (tc < hw->mac.dmac_config.num_tcs) { - /* Get Rx PB size */ - rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc)); - rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >> - IXGBE_RXPBSIZE_SHIFT; - - /* Calculate receive buffer threshold in kilobytes */ - if (rx_pb_size > pb_headroom) - rx_pb_size = rx_pb_size - pb_headroom; - else - rx_pb_size = 0; - - /* Minimum of MFS shall be set for DMCTH */ - reg |= (rx_pb_size > maxframe_size_kb) ? - rx_pb_size : maxframe_size_kb; - } - IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg); - } - return IXGBE_SUCCESS; -} - -/** - * ixgbe_dmac_update_tcs_X550 - * @hw: pointer to hardware structure - * - * Disables dmac, updates per TC settings, and then enables dmac. - **/ -s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw) -{ - u32 reg; - - DEBUGFUNC("ixgbe_dmac_update_tcs_X550"); - - /* Disable DMA coalescing before configuring */ - reg = IXGBE_READ_REG(hw, IXGBE_DMACR); - reg &= ~IXGBE_DMACR_DMAC_EN; - IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); - - ixgbe_dmac_config_tcs_X550(hw); - - /* Enable DMA coalescing after configuration */ - reg = IXGBE_READ_REG(hw, IXGBE_DMACR); - reg |= IXGBE_DMACR_DMAC_EN; - IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params - * @hw: pointer to hardware structure - * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. - **/ -s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - u32 eec; - u16 eeprom_size; - - DEBUGFUNC("ixgbe_init_eeprom_params_X550"); - - if (eeprom->type == ixgbe_eeprom_uninitialized) { - eeprom->semaphore_delay = 10; - eeprom->type = ixgbe_flash; - - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); - - DEBUGOUT2("Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning - * @hw: pointer to hardware structure - * @enable: enable or disable source address pruning - * @pool: Rx pool to set source address pruning for - **/ -void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, - unsigned int pool) -{ - u64 pfflp; - - /* max rx pool is 63 */ - if (pool > 63) - return; - - pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); - pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; - - if (enable) - pfflp |= (1ULL << pool); - else - pfflp &= ~(1ULL << pool); - - IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); - IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); -} - -/** - * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for Ethertype anti-spoofing - * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing - * - **/ -void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, - bool enable, int vf) -{ - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; - u32 pfvfspoof; - - DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550"); - - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - if (enable) - pfvfspoof |= (1 << vf_target_shift); - else - pfvfspoof &= ~(1 << vf_target_shift); - - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); -} - -/** - * ixgbe_iosf_wait - Wait for IOSF command completion - * @hw: pointer to hardware structure - * @ctrl: pointer to location to receive final IOSF control value - * - * Returns failing status on timeout - * - * Note: ctrl can be NULL if the IOSF control register value is not needed - **/ -STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) -{ - u32 i, command = 0; - - /* Check every 10 usec to see if the address cycle completed. - * The SB IOSF BUSY bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); - if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) - break; - usec_delay(10); - } - if (ctrl) - *ctrl = command; - if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n"); - return IXGBE_ERR_PHY; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register - * of the IOSF device - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @data: Data to write to the register - **/ -s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 data) -{ - u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; - u32 command, error; - s32 ret; - - ret = ixgbe_acquire_swfw_semaphore(hw, gssr); - if (ret != IXGBE_SUCCESS) - return ret; - - ret = ixgbe_iosf_wait(hw, NULL); - if (ret != IXGBE_SUCCESS) - goto out; - - command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | - (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); - - /* Write IOSF control register */ - IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); - - /* Write IOSF data register */ - IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); - - ret = ixgbe_iosf_wait(hw, &command); - - if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { - error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> - IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; - ERROR_REPORT2(IXGBE_ERROR_POLLING, - "Failed to write, error %x\n", error); - ret = IXGBE_ERR_PHY; - } - -out: - ixgbe_release_swfw_semaphore(hw, gssr); - return ret; -} - -/** - * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @data: Pointer to read data from the register - **/ -s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *data) -{ - u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; - u32 command, error; - s32 ret; - - ret = ixgbe_acquire_swfw_semaphore(hw, gssr); - if (ret != IXGBE_SUCCESS) - return ret; - - ret = ixgbe_iosf_wait(hw, NULL); - if (ret != IXGBE_SUCCESS) - goto out; - - command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | - (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); - - /* Write IOSF control register */ - IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); - - ret = ixgbe_iosf_wait(hw, &command); - - if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { - error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> - IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; - ERROR_REPORT2(IXGBE_ERROR_POLLING, - "Failed to read, error %x\n", error); - ret = IXGBE_ERR_PHY; - } - - if (ret == IXGBE_SUCCESS) - *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); - -out: - ixgbe_release_swfw_semaphore(hw, gssr); - return ret; -} - -/** - * ixgbe_get_phy_token - Get the token for shared phy access - * @hw: Pointer to hardware structure - */ - -s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) -{ - struct ixgbe_hic_phy_token_req token_cmd; - s32 status; - - token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; - token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; - token_cmd.hdr.cmd_or_resp.cmd_resv = 0; - token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - token_cmd.port_number = hw->bus.lan_id; - token_cmd.command_type = FW_PHY_TOKEN_REQ; - token_cmd.pad = 0; - status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, - sizeof(token_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (status) { - DEBUGOUT1("Issuing host interface command failed with Status = %d\n", - status); - return status; - } - if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) - return IXGBE_SUCCESS; - if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) { - DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n", - token_cmd.hdr.cmd_or_resp.ret_status); - return IXGBE_ERR_FW_RESP_INVALID; - } - - DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n"); - return IXGBE_ERR_TOKEN_RETRY; -} - -/** - * ixgbe_put_phy_token - Put the token for shared phy access - * @hw: Pointer to hardware structure - */ - -s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) -{ - struct ixgbe_hic_phy_token_req token_cmd; - s32 status; - - token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; - token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; - token_cmd.hdr.cmd_or_resp.cmd_resv = 0; - token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - token_cmd.port_number = hw->bus.lan_id; - token_cmd.command_type = FW_PHY_TOKEN_REL; - token_cmd.pad = 0; - status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, - sizeof(token_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (status) - return status; - if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) - return IXGBE_SUCCESS; - - DEBUGOUT("Put PHY Token host interface command failed"); - return IXGBE_ERR_FW_RESP_INVALID; -} - -/** - * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register - * of the IOSF device - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @data: Data to write to the register - **/ -s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 data) -{ - struct ixgbe_hic_internal_phy_req write_cmd; - s32 status; - UNREFERENCED_1PARAMETER(device_type); - - memset(&write_cmd, 0, sizeof(write_cmd)); - write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; - write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; - write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - write_cmd.port_number = hw->bus.lan_id; - write_cmd.command_type = FW_INT_PHY_REQ_WRITE; - write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr); - write_cmd.write_data = IXGBE_CPU_TO_BE32(data); - - status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd, - sizeof(write_cmd), - IXGBE_HI_COMMAND_TIMEOUT, false); - - return status; -} - -/** - * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 3 bit device type - * @data: Pointer to read data from the register - **/ -s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *data) -{ - union { - struct ixgbe_hic_internal_phy_req cmd; - struct ixgbe_hic_internal_phy_resp rsp; - } hic; - s32 status; - UNREFERENCED_1PARAMETER(device_type); - - memset(&hic, 0, sizeof(hic)); - hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; - hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; - hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - hic.cmd.port_number = hw->bus.lan_id; - hic.cmd.command_type = FW_INT_PHY_REQ_READ; - hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr); - - status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, - sizeof(hic.cmd), - IXGBE_HI_COMMAND_TIMEOUT, true); - - /* Extract the register value from the response. */ - *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data); - - return status; -} - -/** - * ixgbe_disable_mdd_X550 - * @hw: pointer to hardware structure - * - * Disable malicious driver detection - **/ -void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw) -{ - u32 reg; - - DEBUGFUNC("ixgbe_disable_mdd_X550"); - - /* Disable MDD for TX DMA and interrupt */ - reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); - - /* Disable MDD for RX and interrupt */ - reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); -} - -/** - * ixgbe_enable_mdd_X550 - * @hw: pointer to hardware structure - * - * Enable malicious driver detection - **/ -void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw) -{ - u32 reg; - - DEBUGFUNC("ixgbe_enable_mdd_X550"); - - /* Enable MDD for TX DMA and interrupt */ - reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); - - /* Enable MDD for RX and interrupt */ - reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); -} - -/** - * ixgbe_restore_mdd_vf_X550 - * @hw: pointer to hardware structure - * @vf: vf index - * - * Restore VF that was disabled during malicious driver detection event - **/ -void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) -{ - u32 idx, reg, num_qs, start_q, bitmask; - - DEBUGFUNC("ixgbe_restore_mdd_vf_X550"); - - /* Map VF to queues */ - reg = IXGBE_READ_REG(hw, IXGBE_MRQC); - switch (reg & IXGBE_MRQC_MRQE_MASK) { - case IXGBE_MRQC_VMDQRT8TCEN: - num_qs = 8; /* 16 VFs / pools */ - bitmask = 0x000000FF; - break; - case IXGBE_MRQC_VMDQRSS32EN: - case IXGBE_MRQC_VMDQRT4TCEN: - num_qs = 4; /* 32 VFs / pools */ - bitmask = 0x0000000F; - break; - default: /* 64 VFs / pools */ - num_qs = 2; - bitmask = 0x00000003; - break; - } - start_q = vf * num_qs; - - /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */ - idx = start_q / 32; - reg = 0; - reg |= (bitmask << (start_q % 32)); - IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg); - IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg); -} - -/** - * ixgbe_mdd_event_X550 - * @hw: pointer to hardware structure - * @vf_bitmap: vf bitmap of malicious vfs - * - * Handle malicious driver detection event. - **/ -void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap) -{ - u32 wqbr; - u32 i, j, reg, q, shift, vf, idx; - - DEBUGFUNC("ixgbe_mdd_event_X550"); - - /* figure out pool size for mapping to vf's */ - reg = IXGBE_READ_REG(hw, IXGBE_MRQC); - switch (reg & IXGBE_MRQC_MRQE_MASK) { - case IXGBE_MRQC_VMDQRT8TCEN: - shift = 3; /* 16 VFs / pools */ - break; - case IXGBE_MRQC_VMDQRSS32EN: - case IXGBE_MRQC_VMDQRT4TCEN: - shift = 2; /* 32 VFs / pools */ - break; - default: - shift = 1; /* 64 VFs / pools */ - break; - } - - /* Read WQBR_TX and WQBR_RX and check for malicious queues */ - for (i = 0; i < 4; i++) { - wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)); - wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i)); - - if (!wqbr) - continue; - - /* Get malicious queue */ - for (j = 0; j < 32 && wqbr; j++) { - - if (!(wqbr & (1 << j))) - continue; - - /* Get queue from bitmask */ - q = j + (i * 32); - - /* Map queue to vf */ - vf = (q >> shift); - - /* Set vf bit in vf_bitmap */ - idx = vf / 32; - vf_bitmap[idx] |= (1 << (vf % 32)); - wqbr &= ~(1 << j); - } - } -} - -/** - * ixgbe_get_media_type_X550em - Get media type - * @hw: pointer to hardware structure - * - * Returns the media type (fiber, copper, backplane) - */ -enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) -{ - enum ixgbe_media_type media_type; - - DEBUGFUNC("ixgbe_get_media_type_X550em"); - - /* Detect if there is a copper PHY attached. */ - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_KX4: - case IXGBE_DEV_ID_X550EM_X_XFI: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - media_type = ixgbe_media_type_backplane; - break; - case IXGBE_DEV_ID_X550EM_X_SFP: - case IXGBE_DEV_ID_X550EM_A_SFP: - case IXGBE_DEV_ID_X550EM_A_SFP_N: - case IXGBE_DEV_ID_X550EM_A_QSFP: - case IXGBE_DEV_ID_X550EM_A_QSFP_N: - media_type = ixgbe_media_type_fiber; - break; - case IXGBE_DEV_ID_X550EM_X_1G_T: - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_10G_T: - media_type = ixgbe_media_type_copper; - break; - case IXGBE_DEV_ID_X550EM_A_SGMII: - case IXGBE_DEV_ID_X550EM_A_SGMII_L: - media_type = ixgbe_media_type_backplane; - hw->phy.type = ixgbe_phy_sgmii; - break; - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - media_type = ixgbe_media_type_copper; - break; - default: - media_type = ixgbe_media_type_unknown; - break; - } - return media_type; -} - -/** - * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported - * @hw: pointer to hardware structure - * @linear: true if SFP module is linear - */ -STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) -{ - DEBUGFUNC("ixgbe_supported_sfp_modules_X550em"); - - switch (hw->phy.sfp_type) { - case ixgbe_sfp_type_not_present: - return IXGBE_ERR_SFP_NOT_PRESENT; - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - *linear = true; - break; - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - case ixgbe_sfp_type_da_act_lmt_core0: - case ixgbe_sfp_type_da_act_lmt_core1: - case ixgbe_sfp_type_1g_sx_core0: - case ixgbe_sfp_type_1g_sx_core1: - case ixgbe_sfp_type_1g_lx_core0: - case ixgbe_sfp_type_1g_lx_core1: - *linear = false; - break; - case ixgbe_sfp_type_unknown: - case ixgbe_sfp_type_1g_cu_core0: - case ixgbe_sfp_type_1g_cu_core1: - default: - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_identify_sfp_module_X550em - Identifies SFP modules - * @hw: pointer to hardware structure - * - * Searches for and identifies the SFP module and assigns appropriate PHY type. - **/ -s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw) -{ - s32 status; - bool linear; - - DEBUGFUNC("ixgbe_identify_sfp_module_X550em"); - - status = ixgbe_identify_module_generic(hw); - - if (status != IXGBE_SUCCESS) - return status; - - /* Check if SFP module is supported */ - status = ixgbe_supported_sfp_modules_X550em(hw, &linear); - - return status; -} - -/** - * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops - * @hw: pointer to hardware structure - */ -s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) -{ - s32 status; - bool linear; - - DEBUGFUNC("ixgbe_setup_sfp_modules_X550em"); - - /* Check if SFP module is supported */ - status = ixgbe_supported_sfp_modules_X550em(hw, &linear); - - if (status != IXGBE_SUCCESS) - return status; - - ixgbe_init_mac_link_ops_X550em(hw); - hw->phy.ops.reset = NULL; - - return IXGBE_SUCCESS; -} - -/** -* ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the -* internal PHY -* @hw: pointer to hardware structure -**/ -STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) -{ - s32 status; - u32 link_ctrl; - - /* Restart auto-negotiation. */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); - - if (status) { - DEBUGOUT("Auto-negotiation did not complete\n"); - return status; - } - - link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); - - if (hw->mac.type == ixgbe_mac_X550EM_a) { - u32 flx_mask_st20; - - /* Indicate to FW that AN restart has been asserted */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); - - if (status) { - DEBUGOUT("Auto-negotiation did not complete\n"); - return status; - } - - flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); - } - - return status; -} - -/** - * ixgbe_setup_sgmii - Set up link for sgmii - * @hw: pointer to hardware structure - */ -STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait) -{ - struct ixgbe_mac_info *mac = &hw->mac; - u32 lval, sval, flx_val; - s32 rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); - if (rc) - return rc; - - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, lval); - if (rc) - return rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); - if (rc) - return rc; - - sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; - sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, sval); - if (rc) - return rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); - if (rc) - return rc; - - flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; - flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; - - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); - if (rc) - return rc; - - rc = ixgbe_restart_an_internal_phy_x550em(hw); - if (rc) - return rc; - - return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); -} - -/** - * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs - * @hw: pointer to hardware structure - */ -STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg_wait) -{ - struct ixgbe_mac_info *mac = &hw->mac; - u32 lval, sval, flx_val; - s32 rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); - if (rc) - return rc; - - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; - lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; - lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, lval); - if (rc) - return rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); - if (rc) - return rc; - - sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; - sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, sval); - if (rc) - return rc; - - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, lval); - if (rc) - return rc; - - rc = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); - if (rc) - return rc; - - flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; - flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; - - rc = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); - if (rc) - return rc; - - rc = ixgbe_restart_an_internal_phy_x550em(hw); - - return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); -} - -/** - * ixgbe_init_mac_link_ops_X550em - init mac link function pointers - * @hw: pointer to hardware structure - */ -void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); - - switch (hw->mac.ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: - /* CS4227 does not support autoneg, so disable the laser control - * functions for SFP+ fiber - */ - mac->ops.disable_tx_laser = NULL; - mac->ops.enable_tx_laser = NULL; - mac->ops.flap_tx_laser = NULL; - mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - mac->ops.set_rate_select_speed = - ixgbe_set_soft_rate_select_speed; - - if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) || - (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP)) - mac->ops.setup_mac_link = - ixgbe_setup_mac_link_sfp_x550a; - else - mac->ops.setup_mac_link = - ixgbe_setup_mac_link_sfp_x550em; - break; - case ixgbe_media_type_copper: - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) - break; - if (hw->mac.type == ixgbe_mac_X550EM_a) { - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || - hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { - mac->ops.setup_link = ixgbe_setup_sgmii_fw; - mac->ops.check_link = - ixgbe_check_mac_link_generic; - } else { - mac->ops.setup_link = - ixgbe_setup_mac_link_t_X550em; - } - } else { - mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; - mac->ops.check_link = ixgbe_check_link_t_X550em; - } - break; - case ixgbe_media_type_backplane: - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || - hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) - mac->ops.setup_link = ixgbe_setup_sgmii; - break; - default: - break; - } -} - -/** - * ixgbe_get_link_capabilities_x550em - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: true when autoneg or autotry is enabled - */ -s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - DEBUGFUNC("ixgbe_get_link_capabilities_X550em"); - - if (hw->phy.type == ixgbe_phy_fw) { - *autoneg = true; - *speed = hw->phy.speeds_supported; - return 0; - } - - /* SFP */ - if (hw->phy.media_type == ixgbe_media_type_fiber) { - - /* CS4227 SFP must not enable auto-negotiation */ - *autoneg = false; - - /* Check if 1G SFP module. */ - if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 - || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { - *speed = IXGBE_LINK_SPEED_1GB_FULL; - return IXGBE_SUCCESS; - } - - /* Link capabilities are based on SFP */ - if (hw->phy.multispeed_fiber) - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - else - *speed = IXGBE_LINK_SPEED_10GB_FULL; - } else { - switch (hw->phy.type) { - case ixgbe_phy_ext_1g_t: - case ixgbe_phy_sgmii: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case ixgbe_phy_x550em_kr: - if (hw->mac.type == ixgbe_mac_X550EM_a) { - /* check different backplane modes */ - if (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { - *speed = IXGBE_LINK_SPEED_2_5GB_FULL; - break; - } else if (hw->device_id == - IXGBE_DEV_ID_X550EM_A_KR_L) { - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - } - } - /* fall through */ - default: - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - break; - } - *autoneg = true; - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause - * @hw: pointer to hardware structure - * @lsc: pointer to boolean flag which indicates whether external Base T - * PHY interrupt is lsc - * - * Determime if external Base T PHY interrupt cause is high temperature - * failure alarm or link status change. - * - * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature - * failure alarm, else return PHY access status. - */ -STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) -{ - u32 status; - u16 reg; - - *lsc = false; - - /* Vendor alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS || - !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) - return status; - - /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS || - !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | - IXGBE_MDIO_GLOBAL_ALARM_1_INT))) - return status; - - /* Global alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS) - return status; - - /* If high temperature failure, then return over temp error and exit */ - if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { - /* power down the PHY in case the PHY FW didn't already */ - ixgbe_set_copper_phy_power(hw, false); - return IXGBE_ERR_OVERTEMP; - } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { - /* device fault alarm triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS) - return status; - - /* if device fault was due to high temp alarm handle and exit */ - if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { - /* power down the PHY in case the PHY FW didn't */ - ixgbe_set_copper_phy_power(hw, false); - return IXGBE_ERR_OVERTEMP; - } - } - - /* Vendor alarm 2 triggered */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); - - if (status != IXGBE_SUCCESS || - !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) - return status; - - /* link connect/disconnect event occurred */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); - - if (status != IXGBE_SUCCESS) - return status; - - /* Indicate LSC */ - if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) - *lsc = true; - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts - * @hw: pointer to hardware structure - * - * Enable link status change and temperature failure alarm for the external - * Base T PHY - * - * Returns PHY access status - */ -STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) -{ - u32 status; - u16 reg; - bool lsc; - - /* Clear interrupt flags */ - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); - - /* Enable link status change alarm */ - - /* Enable the LASI interrupts on X552 devices to receive notifications - * of the link configurations of the external PHY and correspondingly - * support the configuration of the internal iXFI link, since iXFI does - * not support auto-negotiation. This is not required for X553 devices - * having KR support, which performs auto-negotiations and which is used - * as the internal link to the external PHY. Hence adding a check here - * to avoid enabling LASI interrupts for X553 devices. - */ - if (hw->mac.type != ixgbe_mac_X550EM_a) { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); - - if (status != IXGBE_SUCCESS) - return status; - - reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; - - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); - - if (status != IXGBE_SUCCESS) - return status; - } - - /* Enable high temperature failure and global fault alarms */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS) - return status; - - reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | - IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - reg); - - if (status != IXGBE_SUCCESS) - return status; - - /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS) - return status; - - reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | - IXGBE_MDIO_GLOBAL_ALARM_1_INT); - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - reg); - - if (status != IXGBE_SUCCESS) - return status; - - /* Enable chip-wide vendor alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS) - return status; - - reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; - - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - reg); - - return status; -} - -/** - * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. - * @hw: pointer to hardware structure - * @speed: link speed - * - * Configures the integrated KR PHY. - **/ -STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed) -{ - s32 status; - u32 reg_val; - - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; - - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); - - /* Advertise 10G support. */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; - - /* Advertise 1G support. */ - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; - - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - if (hw->mac.type == ixgbe_mac_X550EM_a) { - /* Set lane mode to KR auto negotiation */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - - if (status) - return status; - - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - } - - return ixgbe_restart_an_internal_phy_x550em(hw); -} - -/** - * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs - * @hw: pointer to hardware structure - */ -static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) -{ - u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; - - if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) - return IXGBE_SUCCESS; - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); - if (rc) - return rc; - memset(store, 0, sizeof(store)); - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); - if (rc) - return rc; - - return ixgbe_setup_fw_link(hw); -} - -/** - * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp - * @hw: pointer to hardware structure - */ -static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) -{ - u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; - - rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); - if (rc) - return rc; - - if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { - ixgbe_shutdown_fw_phy(hw); - return IXGBE_ERR_OVERTEMP; - } - return IXGBE_SUCCESS; -} - -/** - * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register - * @hw: pointer to hardware structure - * - * Read NW_MNG_IF_SEL register and save field values, and check for valid field - * values. - **/ -STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) -{ - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode. - */ - hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - - /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set - * PHY address. This register field was has only been used for X552. - */ - if (hw->mac.type == ixgbe_mac_X550EM_a && - hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { - hw->phy.addr = (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> - IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; - } - - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { - hw->phy.addr = (hw->bus.lan_id == 0) ? (1) : (0); - } - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_init_phy_ops_X550em - PHY/SFP specific init - * @hw: pointer to hardware structure - * - * Initialize any function pointers that were not able to be - * set during init_shared_code because the PHY/SFP type was - * not known. Perform the SFP init if necessary. - */ -s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) -{ - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; - - DEBUGFUNC("ixgbe_init_phy_ops_X550em"); - - hw->mac.ops.set_lan_id(hw); - ixgbe_read_mng_if_sel_x550em(hw); - - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { - phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; - ixgbe_setup_mux_ctl(hw); - phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22; - phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22; - hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; - hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; - phy->ops.check_overtemp = ixgbe_check_overtemp_fw; - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; - - break; - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_A_SFP: - hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; - hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; - if (hw->bus.lan_id) - hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; - else - hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; - break; - case IXGBE_DEV_ID_X550EM_X_SFP: - /* set up for CS4227 usage */ - hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; - break; - default: - break; - } - - /* Identify the PHY or SFP module */ - ret_val = phy->ops.identify(hw); - if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED || - ret_val == IXGBE_ERR_PHY_ADDR_INVALID) - return ret_val; - - /* Setup function pointers based on detected hardware */ - ixgbe_init_mac_link_ops_X550em(hw); - if (phy->sfp_type != ixgbe_sfp_type_unknown) - phy->ops.reset = NULL; - - /* Set functions pointers based on phy type */ - switch (hw->phy.type) { - case ixgbe_phy_x550em_kx4: - phy->ops.setup_link = NULL; - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; - case ixgbe_phy_x550em_kr: - phy->ops.setup_link = ixgbe_setup_kr_x550em; - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; - case ixgbe_phy_ext_1g_t: - /* link is managed by FW */ - phy->ops.setup_link = NULL; - phy->ops.reset = NULL; - break; - case ixgbe_phy_x550em_xfi: - /* link is managed by HW */ - phy->ops.setup_link = NULL; - phy->ops.read_reg = ixgbe_read_phy_reg_x550em; - phy->ops.write_reg = ixgbe_write_phy_reg_x550em; - break; - case ixgbe_phy_x550em_ext_t: - /* If internal link mode is XFI, then setup iXFI internal link, - * else setup KR now. - */ - phy->ops.setup_internal_link = - ixgbe_setup_internal_phy_t_x550em; - - /* setup SW LPLU only for first revision of X550EM_x */ - if ((hw->mac.type == ixgbe_mac_X550EM_x) && - !(IXGBE_FUSES0_REV_MASK & - IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) - phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; - - phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; - phy->ops.reset = ixgbe_reset_phy_t_X550em; - break; - case ixgbe_phy_sgmii: - phy->ops.setup_link = NULL; - break; - case ixgbe_phy_fw: - phy->ops.setup_link = ixgbe_setup_fw_link; - phy->ops.reset = ixgbe_reset_phy_fw; - break; - default: - break; - } - return ret_val; -} - -/** - * ixgbe_set_mdio_speed - Set MDIO clock speed - * @hw: pointer to hardware structure - */ -STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) -{ - u32 hlreg0; - - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_SGMII: - case IXGBE_DEV_ID_X550EM_A_SGMII_L: - case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_A_SFP: - case IXGBE_DEV_ID_X550EM_A_QSFP: - /* Config MDIO clock speed before the first MDIO PHY access */ - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 &= ~IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - break; - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: - /* Select fast MDIO clock speed for these devices */ - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 |= IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - break; - default: - break; - } -} - -/** - * ixgbe_reset_hw_X550em - Perform hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. - */ -s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) -{ - ixgbe_link_speed link_speed; - s32 status; - u32 ctrl = 0; - u32 i; - bool link_up = false; - u32 swfw_mask = hw->phy.phy_semaphore_mask; - - DEBUGFUNC("ixgbe_reset_hw_X550em"); - - /* Call adapter stop to disable Tx/Rx and clear interrupts */ - status = hw->mac.ops.stop_adapter(hw); - if (status != IXGBE_SUCCESS) { - DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status); - return status; - } - /* flush pending Tx transactions */ - ixgbe_clear_tx_pending(hw); - - ixgbe_set_mdio_speed(hw); - - /* PHY ops must be identified and initialized prior to reset */ - status = hw->phy.ops.init(hw); - - if (status) - DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n", - status); - - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || - status == IXGBE_ERR_PHY_ADDR_INVALID) { - DEBUGOUT("Returning from reset HW due to PHY init failure\n"); - return status; - } - - /* start the external PHY */ - if (hw->phy.type == ixgbe_phy_x550em_ext_t) { - status = ixgbe_init_ext_t_x550em(hw); - if (status) { - DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n", - status); - return status; - } - } - - /* Setup SFP module if there is one present. */ - if (hw->phy.sfp_setup_needed) { - status = hw->mac.ops.setup_sfp(hw); - hw->phy.sfp_setup_needed = false; - } - - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - return status; - - /* Reset PHY */ - if (!hw->phy.reset_disable && hw->phy.ops.reset) { - if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP) - return IXGBE_ERR_OVERTEMP; - } - -mac_reset_top: - /* Issue global reset to the MAC. Needs to be SW reset if link is up. - * If link reset is used when link is up, it might reset the PHY when - * mng is using it. If link is down or the flag to force full link - * reset is set, then perform link reset. - */ - ctrl = IXGBE_CTRL_LNK_RST; - if (!hw->force_full_reset) { - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up) - ctrl = IXGBE_CTRL_RST; - } - - status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); - if (status != IXGBE_SUCCESS) { - ERROR_REPORT2(IXGBE_ERROR_CAUTION, - "semaphore failed with %d", status); - return IXGBE_ERR_SWFW_SYNC; - } - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - IXGBE_WRITE_FLUSH(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - - /* Poll for reset bit to self-clear meaning reset is complete */ - for (i = 0; i < 10; i++) { - usec_delay(1); - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST_MASK)) - break; - } - - if (ctrl & IXGBE_CTRL_RST_MASK) { - status = IXGBE_ERR_RESET_FAILED; - DEBUGOUT("Reset polling failed to complete.\n"); - } - - msec_delay(50); - - /* Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to - * allow time for any pending HW events to complete. - */ - if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { - hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - goto mac_reset_top; - } - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. - */ - hw->mac.num_rar_entries = 128; - hw->mac.ops.init_rx_addrs(hw); - - ixgbe_set_mdio_speed(hw); - - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) - ixgbe_setup_mux_ctl(hw); - - if (status != IXGBE_SUCCESS) - DEBUGOUT1("Reset HW failed, STATUS = %d\n", status); - - return status; -} - -/** - * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. - * @hw: pointer to hardware structure - */ -s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) -{ - u32 status; - u16 reg; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_TX_VENDOR_ALARMS_3, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS) - return status; - - /* If PHY FW reset completed bit is set then this is the first - * SW instance after a power on so the PHY FW must be un-stalled. - */ - if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - - if (status != IXGBE_SUCCESS) - return status; - - reg &= ~IXGBE_MDIO_POWER_UP_STALL; - - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - reg); - - if (status != IXGBE_SUCCESS) - return status; - } - - return status; -} - -/** - * ixgbe_setup_kr_x550em - Configure the KR PHY. - * @hw: pointer to hardware structure - **/ -s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) -{ - /* leave link alone for 2.5G */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) - return IXGBE_SUCCESS; - - if (ixgbe_check_reset_blocked(hw)) - return 0; - - return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); -} - -/** - * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP - * @hw: pointer to hardware structure - * - * Configure the external PHY and the integrated KR PHY for SFP support. - **/ -s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - s32 ret_val; - u16 reg_slice, reg_val; - bool setup_linear = false; - UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); - - /* Check if SFP module is supported and linear */ - ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - - /* If no SFP module present, then return success. Return success since - * there is no reason to configure CS4227 and SFP not present error is - * not excepted in the setup MAC link flow. - */ - if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return IXGBE_SUCCESS; - - if (ret_val != IXGBE_SUCCESS) - return ret_val; - - /* Configure internal PHY for KR/KX. */ - ixgbe_setup_kr_speed_x550em(hw, speed); - - /* Configure CS4227 LINE side to proper mode. */ - reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + - (hw->bus.lan_id << 12); - if (setup_linear) - reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; - else - reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; - ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, - reg_val); - return ret_val; -} - -/** - * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode - * @hw: pointer to hardware structure - * @speed: the link speed to force - * - * Configures the integrated PHY for native SFI mode. Used to connect the - * internal PHY directly to an SFP cage, without autonegotiation. - **/ -STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -{ - struct ixgbe_mac_info *mac = &hw->mac; - s32 status; - u32 reg_val; - - /* Disable all AN and force speed to 10G Serial. */ - status = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; - reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; - - /* Select forced link speed for internal PHY. */ - switch (*speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; - break; - default: - /* Other link speeds are not supported by internal PHY. */ - return IXGBE_ERR_LINK_SETUP; - } - - status = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - /* Toggle port SW reset by AN reset. */ - status = ixgbe_restart_an_internal_phy_x550em(hw); - - return status; -} - -/** - * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP - * @hw: pointer to hardware structure - * - * Configure the the integrated PHY for SFP support. - **/ -s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - s32 ret_val; - u16 reg_phy_ext; - bool setup_linear = false; - u32 reg_slice, reg_phy_int, slice_offset; - - UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); - - /* Check if SFP module is supported and linear */ - ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); - - /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. - */ - if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) - return IXGBE_SUCCESS; - - if (ret_val != IXGBE_SUCCESS) - return ret_val; - - if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { - /* Configure internal PHY for native SFI based on module type */ - ret_val = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); - - if (ret_val != IXGBE_SUCCESS) - return ret_val; - - reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; - if (!setup_linear) - reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; - - ret_val = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); - - if (ret_val != IXGBE_SUCCESS) - return ret_val; - - /* Setup SFI internal link. */ - ret_val = ixgbe_setup_sfi_x550a(hw, &speed); - } else { - /* Configure internal PHY for KR/KX. */ - ixgbe_setup_kr_speed_x550em(hw, speed); - - if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) { - /* Find Address */ - DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n"); - return IXGBE_ERR_PHY_ADDR_INVALID; - } - - /* Get external PHY SKU id */ - ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - - if (ret_val != IXGBE_SUCCESS) - return ret_val; - - /* When configuring quad port CS4223, the MAC instance is part - * of the slice offset. - */ - if (reg_phy_ext == IXGBE_CS4223_SKU_ID) - slice_offset = (hw->bus.lan_id + - (hw->bus.instance_id << 1)) << 12; - else - slice_offset = hw->bus.lan_id << 12; - - /* Configure CS4227/CS4223 LINE side to proper mode. */ - reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; - - ret_val = hw->phy.ops.read_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - - if (ret_val != IXGBE_SUCCESS) - return ret_val; - - reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | - (IXGBE_CS4227_EDC_MODE_SR << 1)); - - if (setup_linear) - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; - else - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; - ret_val = hw->phy.ops.write_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); - - /* Flush previous write with a read */ - ret_val = hw->phy.ops.read_reg(hw, reg_slice, - IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); - } - return ret_val; -} - -/** - * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration - * @hw: pointer to hardware structure - * - * iXfI configuration needed for ixgbe_mac_X550EM_x devices. - **/ -STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - s32 status; - u32 reg_val; - - /* Disable training protocol FSM. */ - status = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; - status = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status != IXGBE_SUCCESS) - return status; - - /* Disable Flex from training TXFFE. */ - status = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; - status = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status != IXGBE_SUCCESS) - return status; - status = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; - reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; - status = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status != IXGBE_SUCCESS) - return status; - - /* Enable override for coefficients. */ - status = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; - reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; - status = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - return status; -} - -/** - * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. - * @hw: pointer to hardware structure - * @speed: the link speed to force - * - * Configures the integrated KR PHY to use iXFI mode. Used to connect an - * internal and external PHY at a specific speed, without autonegotiation. - **/ -STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) -{ - struct ixgbe_mac_info *mac = &hw->mac; - s32 status; - u32 reg_val; - - /* iXFI is only supported with X552 */ - if (mac->type != ixgbe_mac_X550EM_x) - return IXGBE_ERR_LINK_SETUP; - - /* Disable AN and force speed to 10G Serial. */ - status = mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - - reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; - - /* Select forced link speed for internal PHY. */ - switch (*speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; - break; - default: - /* Other link speeds are not supported by internal KR PHY. */ - return IXGBE_ERR_LINK_SETUP; - } - - status = mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status != IXGBE_SUCCESS) - return status; - - /* Additional configuration needed for x550em_x */ - if (hw->mac.type == ixgbe_mac_X550EM_x) { - status = ixgbe_setup_ixfi_x550em_x(hw); - if (status != IXGBE_SUCCESS) - return status; - } - - /* Toggle port SW reset by AN reset. */ - status = ixgbe_restart_an_internal_phy_x550em(hw); - - return status; -} - -/** - * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status - * @hw: address of hardware structure - * @link_up: address of boolean to indicate link status - * - * Returns error code if unable to get link status. - */ -STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) -{ - u32 ret; - u16 autoneg_status; - - *link_up = false; - - /* read this twice back to back to indicate current status */ - ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_status); - if (ret != IXGBE_SUCCESS) - return ret; - - ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_status); - if (ret != IXGBE_SUCCESS) - return ret; - - *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link - * @hw: point to hardware structure - * - * Configures the link between the integrated KR PHY and the external X557 PHY - * The driver will call this function when it gets a link status change - * interrupt from the X557 PHY. This function configures the link speed - * between the PHYs to match the link speed of the BASE-T link. - * - * A return of a non-zero value indicates an error, and the base driver should - * not report link up. - */ -s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) -{ - ixgbe_link_speed force_speed; - bool link_up; - u32 status; - u16 speed; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; - - if (hw->mac.type == ixgbe_mac_X550EM_x && - !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - /* If link is down, there is no setup necessary so return */ - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status != IXGBE_SUCCESS) - return status; - - if (!link_up) - return IXGBE_SUCCESS; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &speed); - if (status != IXGBE_SUCCESS) - return status; - - /* If link is still down - no setup is required so return */ - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status != IXGBE_SUCCESS) - return status; - if (!link_up) - return IXGBE_SUCCESS; - - /* clear everything but the speed and duplex bits */ - speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; - - switch (speed) { - case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: - force_speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: - force_speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - default: - /* Internal PHY does not support anything else */ - return IXGBE_ERR_INVALID_LINK_SETTINGS; - } - - return ixgbe_setup_ixfi_x550em(hw, &force_speed); - } else { - speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - return ixgbe_setup_kr_speed_x550em(hw, speed); - } -} - -/** - * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback. - * @hw: pointer to hardware structure - * - * Configures the integrated KR PHY to use internal loopback mode. - **/ -s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) -{ - s32 status; - u32 reg_val; - - /* Disable AN and force speed to 10G Serial. */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status != IXGBE_SUCCESS) - return status; - - /* Set near-end loopback clocks. */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; - reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status != IXGBE_SUCCESS) - return status; - - /* Set loopback enable. */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - if (status != IXGBE_SUCCESS) - return status; - - /* Training bypass. */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status != IXGBE_SUCCESS) - return status; - reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - return status; -} - -/** - * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command - * assuming that the semaphore is already obtained. - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the hostif. - **/ -s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) -{ - const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; - struct ixgbe_hic_read_shadow_ram buffer; - s32 status; - - DEBUGFUNC("ixgbe_read_ee_hostif_X550"); - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = IXGBE_CPU_TO_BE32(offset * 2); - /* one word */ - buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); - - status = hw->mac.ops.acquire_swfw_sync(hw, mask); - if (status) - return status; - - status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT); - if (!status) { - *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, - FW_NVM_DATA_OFFSET); - } - - hw->mac.ops.release_swfw_sync(hw, mask); - return status; -} - -/** - * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM - * - * Reads a 16 bit word(s) from the EEPROM using the hostif. - **/ -s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) -{ - const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; - struct ixgbe_hic_read_shadow_ram buffer; - u32 current_word = 0; - u16 words_to_read; - s32 status; - u32 i; - - DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550"); - - /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, mask); - if (status) { - DEBUGOUT("EEPROM read buffer - semaphore failed\n"); - return status; - } - - while (words) { - if (words > FW_MAX_READ_BUFFER_SIZE / 2) - words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; - else - words_to_read = words; - - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2); - buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2); - - status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT); - - if (status) { - DEBUGOUT("Host interface command failed\n"); - goto out; - } - - for (i = 0; i < words_to_read; i++) { - u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + - 2 * i; - u32 value = IXGBE_READ_REG(hw, reg); - - data[current_word] = (u16)(value & 0xffff); - current_word++; - i++; - if (i < words_to_read) { - value >>= 16; - data[current_word] = (u16)(value & 0xffff); - current_word++; - } - } - words -= words_to_read; - } - -out: - hw->mac.ops.release_swfw_sync(hw, mask); - return status; -} - -/** - * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the hostif. - **/ -s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - u16 data) -{ - s32 status; - struct ixgbe_hic_write_shadow_ram buffer; - - DEBUGFUNC("ixgbe_write_ee_hostif_data_X550"); - - buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* one word */ - buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); - buffer.data = data; - buffer.address = IXGBE_CPU_TO_BE32(offset * 2); - - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - - return status; -} - -/** - * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the hostif. - **/ -s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, - u16 data) -{ - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_write_ee_hostif_X550"); - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - IXGBE_SUCCESS) { - status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - DEBUGOUT("write ee hostif failed to get semaphore"); - status = IXGBE_ERR_SWFW_SYNC; - } - - return status; -} - -/** - * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of words - * @data: word(s) write to the EEPROM - * - * Write a 16 bit word(s) to the EEPROM using the hostif. - **/ -s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) -{ - s32 status = IXGBE_SUCCESS; - u32 i = 0; - - DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550"); - - /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - if (status != IXGBE_SUCCESS) { - DEBUGOUT("EEPROM write buffer - semaphore failed\n"); - goto out; - } - - for (i = 0; i < words; i++) { - status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, - data[i]); - - if (status != IXGBE_SUCCESS) { - DEBUGOUT("Eeprom buffered write failed\n"); - break; - } - } - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); -out: - - return status; -} - -/** - * ixgbe_checksum_ptr_x550 - Checksum one pointer region - * @hw: pointer to hardware structure - * @ptr: pointer offset in eeprom - * @size: size of section pointed by ptr, if 0 first word will be used as size - * @csum: address of checksum to update - * - * Returns error status for any failure - */ -STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, - u16 size, u16 *csum, u16 *buffer, - u32 buffer_size) -{ - u16 buf[256]; - s32 status; - u16 length, bufsz, i, start; - u16 *local_buffer; - - bufsz = sizeof(buf) / sizeof(buf[0]); - - /* Read a chunk at the pointer location */ - if (!buffer) { - status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); - if (status) { - DEBUGOUT("Failed to read EEPROM image\n"); - return status; - } - local_buffer = buf; - } else { - if (buffer_size < ptr) - return IXGBE_ERR_PARAM; - local_buffer = &buffer[ptr]; - } - - if (size) { - start = 0; - length = size; - } else { - start = 1; - length = local_buffer[0]; - - /* Skip pointer section if length is invalid. */ - if (length == 0xFFFF || length == 0 || - (ptr + length) >= hw->eeprom.word_size) - return IXGBE_SUCCESS; - } - - if (buffer && ((u32)start + (u32)length > buffer_size)) - return IXGBE_ERR_PARAM; - - for (i = start; length; i++, length--) { - if (i == bufsz && !buffer) { - ptr += bufsz; - i = 0; - if (length < bufsz) - bufsz = length; - - /* Read a chunk at the pointer location */ - status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, - bufsz, buf); - if (status) { - DEBUGOUT("Failed to read EEPROM image\n"); - return status; - } - } - *csum += local_buffer[i]; - } - return IXGBE_SUCCESS; -} - -/** - * ixgbe_calc_checksum_X550 - Calculates and returns the checksum - * @hw: pointer to hardware structure - * @buffer: pointer to buffer containing calculated checksum - * @buffer_size: size of buffer - * - * Returns a negative error code on error, or the 16-bit checksum - **/ -s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) -{ - u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; - u16 *local_buffer; - s32 status; - u16 checksum = 0; - u16 pointer, i, size; - - DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550"); - - hw->eeprom.ops.init_params(hw); - - if (!buffer) { - /* Read pointer area */ - status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, - IXGBE_EEPROM_LAST_WORD + 1, - eeprom_ptrs); - if (status) { - DEBUGOUT("Failed to read EEPROM image\n"); - return status; - } - local_buffer = eeprom_ptrs; - } else { - if (buffer_size < IXGBE_EEPROM_LAST_WORD) - return IXGBE_ERR_PARAM; - local_buffer = buffer; - } - - /* - * For X550 hardware include 0x0-0x41 in the checksum, skip the - * checksum word itself - */ - for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) - if (i != IXGBE_EEPROM_CHECKSUM) - checksum += local_buffer[i]; - - /* - * Include all data from pointers 0x3, 0x6-0xE. This excludes the - * FW, PHY module, and PCIe Expansion/Option ROM pointers. - */ - for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { - if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) - continue; - - pointer = local_buffer[i]; - - /* Skip pointer section if the pointer is invalid. */ - if (pointer == 0xFFFF || pointer == 0 || - pointer >= hw->eeprom.word_size) - continue; - - switch (i) { - case IXGBE_PCIE_GENERAL_PTR: - size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; - break; - case IXGBE_PCIE_CONFIG0_PTR: - case IXGBE_PCIE_CONFIG1_PTR: - size = IXGBE_PCIE_CONFIG_SIZE; - break; - default: - size = 0; - break; - } - - status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, - buffer, buffer_size); - if (status) - return status; - } - - checksum = (u16)IXGBE_EEPROM_SUM - checksum; - - return (s32)checksum; -} - -/** - * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum - * @hw: pointer to hardware structure - * - * Returns a negative error code on error, or the 16-bit checksum - **/ -s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) -{ - return ixgbe_calc_checksum_X550(hw, NULL, 0); -} - -/** - * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. - **/ -s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) -{ - s32 status; - u16 checksum; - u16 read_checksum = 0; - - DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550"); - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - if (status) { - DEBUGOUT("EEPROM read failed\n"); - return status; - } - - status = hw->eeprom.ops.calc_checksum(hw); - if (status < 0) - return status; - - checksum = (u16)(status & 0xffff); - - status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, - &read_checksum); - if (status) - return status; - - /* Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) { - status = IXGBE_ERR_EEPROM_CHECKSUM; - ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, - "Invalid EEPROM checksum"); - } - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - - return status; -} - -/** - * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash - * @hw: pointer to hardware structure - * - * After writing EEPROM to shadow RAM using EEWR register, software calculates - * checksum and updates the EEPROM and instructs the hardware to update - * the flash. - **/ -s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) -{ - s32 status; - u16 checksum = 0; - - DEBUGFUNC("ixgbe_update_eeprom_checksum_X550"); - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); - if (status) { - DEBUGOUT("EEPROM read failed\n"); - return status; - } - - status = ixgbe_calc_eeprom_checksum_X550(hw); - if (status < 0) - return status; - - checksum = (u16)(status & 0xffff); - - status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - if (status) - return status; - - status = ixgbe_update_flash_X550(hw); - - return status; -} - -/** - * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device - * @hw: pointer to hardware structure - * - * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. - **/ -s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - union ixgbe_hic_hdr2 buffer; - - DEBUGFUNC("ixgbe_update_flash_X550"); - - buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; - buffer.req.buf_lenh = 0; - buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; - buffer.req.checksum = FW_DEFAULT_CHECKSUM; - - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - - return status; -} - -/** - * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current configuration. - **/ -u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw) -{ - u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - u16 ext_ability = 0; - - DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em"); - - hw->phy.ops.identify(hw); - - switch (hw->phy.type) { - case ixgbe_phy_x550em_kr: - if (hw->mac.type == ixgbe_mac_X550EM_a) { - if (hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { - physical_layer = - IXGBE_PHYSICAL_LAYER_2500BASE_KX; - break; - } else if (hw->device_id == - IXGBE_DEV_ID_X550EM_A_KR_L) { - physical_layer = - IXGBE_PHYSICAL_LAYER_1000BASE_KX; - break; - } - } - /* fall through */ - case ixgbe_phy_x550em_xfi: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR | - IXGBE_PHYSICAL_LAYER_1000BASE_KX; - break; - case ixgbe_phy_x550em_kx4: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | - IXGBE_PHYSICAL_LAYER_1000BASE_KX; - break; - case ixgbe_phy_x550em_ext_t: - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &ext_ability); - if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; - if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - break; - case ixgbe_phy_fw: - if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL) - physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; - if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL) - physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T; - break; - case ixgbe_phy_sgmii: - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; - break; - case ixgbe_phy_ext_1g_t: - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - break; - default: - break; - } - - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) - physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); - - return physical_layer; -} - -/** - * ixgbe_get_bus_info_x550em - Set PCI bus info - * @hw: pointer to hardware structure - * - * Sets bus link width and speed to unknown because X550em is - * not a PCI device. - **/ -s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) -{ - - DEBUGFUNC("ixgbe_get_bus_info_x550em"); - - hw->bus.width = ixgbe_bus_width_unknown; - hw->bus.speed = ixgbe_bus_speed_unknown; - - hw->mac.ops.set_lan_id(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_disable_rx_x550 - Disable RX unit - * - * Enables the Rx DMA unit for x550 - **/ -void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) -{ - u32 rxctrl, pfdtxgswc; - s32 status; - struct ixgbe_hic_disable_rxen fw_cmd; - - DEBUGFUNC("ixgbe_enable_rx_dma_x550"); - - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (rxctrl & IXGBE_RXCTRL_RXEN) { - pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); - if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { - pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); - hw->mac.set_lben = true; - } else { - hw->mac.set_lben = false; - } - - fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; - fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; - fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - fw_cmd.port_number = (u8)hw->bus.lan_id; - - status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(struct ixgbe_hic_disable_rxen), - IXGBE_HI_COMMAND_TIMEOUT, true); - - /* If we fail - disable RX using register write */ - if (status) { - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (rxctrl & IXGBE_RXCTRL_RXEN) { - rxctrl &= ~IXGBE_RXCTRL_RXEN; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); - } - } - } -} - -/** - * ixgbe_enter_lplu_x550em - Transition to low power states - * @hw: pointer to hardware structure - * - * Configures Low Power Link Up on transition to low power states - * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the - * X557 PHY immediately prior to entering LPLU. - **/ -s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) -{ - u16 an_10g_cntl_reg, autoneg_reg, speed; - s32 status; - ixgbe_link_speed lcd_speed; - u32 save_autoneg; - bool link_up; - - /* SW LPLU not required on later HW revisions. */ - if ((hw->mac.type == ixgbe_mac_X550EM_x) && - (IXGBE_FUSES0_REV_MASK & - IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) - return IXGBE_SUCCESS; - - /* If blocked by MNG FW, then don't restart AN */ - if (ixgbe_check_reset_blocked(hw)) - return IXGBE_SUCCESS; - - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status != IXGBE_SUCCESS) - return status; - - status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); - - if (status != IXGBE_SUCCESS) - return status; - - /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability - * disabled, then force link down by entering low power mode. - */ - if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || - !(hw->wol_enabled || ixgbe_mng_present(hw))) - return ixgbe_set_copper_phy_power(hw, FALSE); - - /* Determine LCD */ - status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); - - if (status != IXGBE_SUCCESS) - return status; - - /* If no valid LCD link speed, then force link down and exit. */ - if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) - return ixgbe_set_copper_phy_power(hw, FALSE); - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &speed); - - if (status != IXGBE_SUCCESS) - return status; - - /* If no link now, speed is invalid so take link down */ - status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); - if (status != IXGBE_SUCCESS) - return ixgbe_set_copper_phy_power(hw, false); - - /* clear everything but the speed bits */ - speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; - - /* If current speed is already LCD, then exit. */ - if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && - (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || - ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && - (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) - return status; - - /* Clear AN completed indication */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - if (status != IXGBE_SUCCESS) - return status; - - status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &an_10g_cntl_reg); - - if (status != IXGBE_SUCCESS) - return status; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - if (status != IXGBE_SUCCESS) - return status; - - save_autoneg = hw->phy.autoneg_advertised; - - /* Setup link at least common link speed */ - status = hw->mac.ops.setup_link(hw, lcd_speed, false); - - /* restore autoneg from before setting lplu speed */ - hw->phy.autoneg_advertised = save_autoneg; - - return status; -} - -/** - * ixgbe_get_lcd_x550em - Determine lowest common denominator - * @hw: pointer to hardware structure - * @lcd_speed: pointer to lowest common link speed - * - * Determine lowest common link speed with link partner. - **/ -s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) -{ - u16 an_lp_status; - s32 status; - u16 word = hw->eeprom.ctrl_word_3; - - *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; - - status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &an_lp_status); - - if (status != IXGBE_SUCCESS) - return status; - - /* If link partner advertised 1G, return 1G */ - if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { - *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; - return status; - } - - /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ - if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || - (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) - return status; - - /* Link partner not capable of lower speeds, return 10G */ - *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; - return status; -} - -/** - * ixgbe_setup_fc_X550em - Set up flow control - * @hw: pointer to hardware structure - * - * Called at init time to set up flow control. - **/ -s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_SUCCESS; - u32 pause, asm_dir, reg_val; - - DEBUGFUNC("ixgbe_setup_fc_X550em"); - - /* Validate the requested mode */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, - "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* 10gig parts do not have a word in the EEPROM to determine the - * default flow control setting, so we explicitly set it to full. - */ - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; - - /* Determine PAUSE and ASM_DIR bits. */ - switch (hw->fc.requested_mode) { - case ixgbe_fc_none: - pause = 0; - asm_dir = 0; - break; - case ixgbe_fc_tx_pause: - pause = 0; - asm_dir = 1; - break; - case ixgbe_fc_rx_pause: - /* Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE, as such we fall - * through to the fc_full statement. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - case ixgbe_fc_full: - pause = 1; - asm_dir = 1; - break; - default: - ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, - "Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_A_KR: - case IXGBE_DEV_ID_X550EM_A_KR_L: - ret_val = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (ret_val != IXGBE_SUCCESS) - goto out; - reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); - if (pause) - reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; - if (asm_dir) - reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - ret_val = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - /* This device does not fully support AN. */ - hw->fc.disable_fc_autoneg = true; - break; - case IXGBE_DEV_ID_X550EM_X_XFI: - hw->fc.disable_fc_autoneg = true; - break; - default: - break; - } - -out: - return ret_val; -} - -/** - * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure - * - * Enable flow control according to IEEE clause 37. - **/ -void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) -{ - u32 link_s1, lp_an_page_low, an_cntl_1; - s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; - ixgbe_link_speed speed; - bool link_up; - - /* AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - */ - if (hw->fc.disable_fc_autoneg) { - ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, - "Flow control autoneg is disabled"); - goto out; - } - - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { - ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); - goto out; - } - - /* Check at auto-negotiation has completed */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_S1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); - - if (status != IXGBE_SUCCESS || - (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { - DEBUGOUT("Auto-Negotiation did not complete\n"); - status = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } - - /* Read the 10g AN autoc and LP ability registers and resolve - * local flow control settings accordingly - */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); - - if (status != IXGBE_SUCCESS) { - DEBUGOUT("Auto-Negotiation did not complete\n"); - goto out; - } - - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); - - if (status != IXGBE_SUCCESS) { - DEBUGOUT("Auto-Negotiation did not complete\n"); - goto out; - } - - status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, - IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, - IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, - IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); - -out: - if (status == IXGBE_SUCCESS) { - hw->fc.fc_was_autonegged = true; - } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - } -} - -/** - * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings - * @hw: pointer to hardware structure - * - **/ -void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) -{ - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; -} - -/** - * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure - * - * Enable flow control according to IEEE clause 37. - **/ -void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; - u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; - ixgbe_link_speed speed; - bool link_up; - - /* AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - */ - if (hw->fc.disable_fc_autoneg) { - ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, - "Flow control autoneg is disabled"); - goto out; - } - - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { - ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); - goto out; - } - - /* Check if auto-negotiation has completed */ - status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); - if (status != IXGBE_SUCCESS || - !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { - DEBUGOUT("Auto-Negotiation did not complete\n"); - status = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } - - /* Negotiate the flow control */ - status = ixgbe_negotiate_fc(hw, info[0], info[0], - FW_PHY_ACT_GET_LINK_INFO_FC_RX, - FW_PHY_ACT_GET_LINK_INFO_FC_TX, - FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, - FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); - -out: - if (status == IXGBE_SUCCESS) { - hw->fc.fc_was_autonegged = true; - } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - } -} - -/** - * ixgbe_setup_fc_backplane_x550em_a - Set up flow control - * @hw: pointer to hardware structure - * - * Called at init time to set up flow control. - **/ -s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_SUCCESS; - u32 an_cntl = 0; - - DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a"); - - /* Validate the requested mode */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, - "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; - } - - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; - - /* Set up the 1G and 10G flow control advertisement registers so the - * HW will be able to do FC autoneg once the cable is plugged in. If - * we link at 10G, the 1G advertisement is harmless and vice versa. - */ - status = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); - - if (status != IXGBE_SUCCESS) { - DEBUGOUT("Auto-Negotiation did not complete\n"); - return status; - } - - /* The possible values of fc.requested_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - * other: Invalid. - */ - switch (hw->fc.requested_mode) { - case ixgbe_fc_none: - /* Flow control completely disabled by software override. */ - an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); - break; - case ixgbe_fc_tx_pause: - /* Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; - break; - case ixgbe_fc_rx_pause: - /* Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE, as such we fall - * through to the fc_full statement. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - break; - default: - ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, - "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; - } - - status = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); - - /* Restart auto-negotiation. */ - status = ixgbe_restart_an_internal_phy_x550em(hw); - - return status; -} - -/** - * ixgbe_set_mux - Set mux for port 1 access with CS4227 - * @hw: pointer to hardware structure - * @state: set mux if 1, clear if 0 - */ -STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) -{ - u32 esdp; - - if (!hw->bus.lan_id) - return; - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (state) - esdp |= IXGBE_ESDP_SDP1; - else - esdp &= ~IXGBE_ESDP_SDP1; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire - * - * Acquires the SWFW semaphore and sets the I2C MUX - **/ -s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) -{ - s32 status; - - DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em"); - - status = ixgbe_acquire_swfw_sync_X540(hw, mask); - if (status) - return status; - - if (mask & IXGBE_GSSR_I2C_MASK) - ixgbe_set_mux(hw, 1); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release - * - * Releases the SWFW semaphore and sets the I2C MUX - **/ -void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) -{ - DEBUGFUNC("ixgbe_release_swfw_sync_X550em"); - - if (mask & IXGBE_GSSR_I2C_MASK) - ixgbe_set_mux(hw, 0); - - ixgbe_release_swfw_sync_X540(hw, mask); -} - -/** - * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire - * - * Acquires the SWFW semaphore and get the shared phy token as needed - */ -STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) -{ - u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; - int retries = FW_PHY_TOKEN_RETRIES; - s32 status = IXGBE_SUCCESS; - - DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a"); - - while (--retries) { - status = IXGBE_SUCCESS; - if (hmask) - status = ixgbe_acquire_swfw_sync_X540(hw, hmask); - if (status) { - DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", - status); - return status; - } - if (!(mask & IXGBE_GSSR_TOKEN_SM)) - return IXGBE_SUCCESS; - - status = ixgbe_get_phy_token(hw); - if (status == IXGBE_ERR_TOKEN_RETRY) - DEBUGOUT1("Could not acquire PHY token, Status = %d\n", - status); - - if (status == IXGBE_SUCCESS) - return IXGBE_SUCCESS; - - if (hmask) - ixgbe_release_swfw_sync_X540(hw, hmask); - - if (status != IXGBE_ERR_TOKEN_RETRY) { - DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n", - status); - return status; - } - } - - DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n", - hw->phy.id); - return status; -} - -/** - * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release - * - * Releases the SWFW semaphore and puts the shared phy token as needed - */ -STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) -{ - u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; - - DEBUGFUNC("ixgbe_release_swfw_sync_X550a"); - - if (mask & IXGBE_GSSR_TOKEN_SM) - ixgbe_put_phy_token(hw); - - if (hmask) - ixgbe_release_swfw_sync_X540(hw, hmask); -} - -/** - * ixgbe_read_phy_reg_x550a - Reads specified PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @phy_data: Pointer to read data from PHY register - * - * Reads a value from a specified PHY register using the SWFW lock and PHY - * Token. The PHY Token is needed since the MDIO is shared between to MAC - * instances. - **/ -s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) -{ - s32 status; - u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; - - DEBUGFUNC("ixgbe_read_phy_reg_x550a"); - - if (hw->mac.ops.acquire_swfw_sync(hw, mask)) - return IXGBE_ERR_SWFW_SYNC; - - status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); - - hw->mac.ops.release_swfw_sync(hw, mask); - - return status; -} - -/** - * ixgbe_write_phy_reg_x550a - Writes specified PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 5 bit device type - * @phy_data: Data to write to the PHY register - * - * Writes a value to specified PHY register using the SWFW lock and PHY Token. - * The PHY Token is needed since the MDIO is shared between to MAC instances. - **/ -s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) -{ - s32 status; - u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; - - DEBUGFUNC("ixgbe_write_phy_reg_x550a"); - - if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) { - status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, - phy_data); - hw->mac.ops.release_swfw_sync(hw, mask); - } else { - status = IXGBE_ERR_SWFW_SYNC; - } - - return status; -} - -/** - * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt - * @hw: pointer to hardware structure - * - * Handle external Base T PHY interrupt. If high temperature - * failure alarm then return error, else if link status change - * then setup internal/external PHY link - * - * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature - * failure alarm, else return PHY access status. - */ -s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) -{ - bool lsc; - u32 status; - - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); - - if (status != IXGBE_SUCCESS) - return status; - - if (lsc) - return ixgbe_setup_internal_phy(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Setup internal/external PHY link speed based on link speed, then set - * external PHY auto advertised link speed. - * - * Returns error status for any failure - **/ -s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - s32 status; - ixgbe_link_speed force_speed; - - DEBUGFUNC("ixgbe_setup_mac_link_t_X550em"); - - /* Setup internal/external PHY link speed to iXFI (10G), unless - * only 1G is auto advertised then setup KX link. - */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - force_speed = IXGBE_LINK_SPEED_10GB_FULL; - else - force_speed = IXGBE_LINK_SPEED_1GB_FULL; - - /* If X552 and internal link mode is XFI, then setup XFI internal link. - */ - if (hw->mac.type == ixgbe_mac_X550EM_x && - !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { - status = ixgbe_setup_ixfi_x550em(hw, &force_speed); - - if (status != IXGBE_SUCCESS) - return status; - } - - return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); -} - -/** - * ixgbe_check_link_t_X550em - Determine link and speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true when link is up - * @link_up_wait_to_complete: bool used to wait for link up or not - * - * Check that both the MAC and X557 external PHY have link. - **/ -s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete) -{ - u32 status; - u16 i, autoneg_status = 0; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; - - status = ixgbe_check_mac_link_generic(hw, speed, link_up, - link_up_wait_to_complete); - - /* If check link fails or MAC link is not up, then return */ - if (status != IXGBE_SUCCESS || !(*link_up)) - return status; - - /* MAC link is up, so check external PHY link. - * X557 PHY. Link status is latching low, and can only be used to detect - * link drop, and not the current status of the link without performing - * back-to-back reads. - */ - for (i = 0; i < 2; i++) { - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_status); - - if (status != IXGBE_SUCCESS) - return status; - } - - /* If external PHY link is not up, then indicate link not up */ - if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) - *link_up = false; - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI - * @hw: pointer to hardware structure - **/ -s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) -{ - s32 status; - - status = ixgbe_reset_phy_generic(hw); - - if (status != IXGBE_SUCCESS) - return status; - - /* Configure Link Status Alarm and Temperature Threshold interrupts */ - return ixgbe_enable_lasi_ext_t_x550em(hw); -} - -/** - * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs. - * @hw: pointer to hardware structure - * @led_idx: led number to turn on - **/ -s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx) -{ - u16 phy_data; - - DEBUGFUNC("ixgbe_led_on_t_X550em"); - - if (led_idx >= IXGBE_X557_MAX_LED_INDEX) - return IXGBE_ERR_PARAM; - - /* To turn on the LED, set mode to ON. */ - ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); - phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; - ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); - - /* Some designs have the LEDs wired to the MAC */ - return ixgbe_led_on_generic(hw, led_idx); -} - -/** - * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs. - * @hw: pointer to hardware structure - * @led_idx: led number to turn off - **/ -s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx) -{ - u16 phy_data; - - DEBUGFUNC("ixgbe_led_off_t_X550em"); - - if (led_idx >= IXGBE_X557_MAX_LED_INDEX) - return IXGBE_ERR_PARAM; - - /* To turn on the LED, set mode to ON. */ - ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); - phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; - ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); - - /* Some designs have the LEDs wired to the MAC */ - return ixgbe_led_off_generic(hw, led_idx); -} - -/** - * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware - * @hw: pointer to the HW structure - * @maj: driver version major number - * @min: driver version minor number - * @build: driver version build number - * @sub: driver version sub build number - * @len: length of driver_ver string - * @driver_ver: driver string - * - * Sends driver version number to firmware through the manageability - * block. On success return IXGBE_SUCCESS - * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - **/ -s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub, u16 len, const char *driver_ver) -{ - struct ixgbe_hic_drv_info2 fw_cmd; - s32 ret_val = IXGBE_SUCCESS; - int i; - - DEBUGFUNC("ixgbe_set_fw_drv_ver_x550"); - - if ((len == 0) || (driver_ver == NULL) || - (len > sizeof(fw_cmd.driver_string))) - return IXGBE_ERR_INVALID_ARGUMENT; - - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; - fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; - fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = (u8)hw->bus.func; - fw_cmd.ver_maj = maj; - fw_cmd.ver_min = min; - fw_cmd.ver_build = build; - fw_cmd.ver_sub = sub; - fw_cmd.hdr.checksum = 0; - memcpy(fw_cmd.driver_string, driver_ver, len); - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); - - for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(fw_cmd), - IXGBE_HI_COMMAND_TIMEOUT, - true); - if (ret_val != IXGBE_SUCCESS) - continue; - - if (fw_cmd.hdr.cmd_or_resp.ret_status == - FW_CEM_RESP_STATUS_SUCCESS) - ret_val = IXGBE_SUCCESS; - else - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - - break; - } - - return ret_val; -} diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h deleted file mode 100644 index ff2c4ea43eaa..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/ixgbe_x550.h +++ /dev/null @@ -1,115 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_X550_H_ -#define _IXGBE_X550_H_ - -#include "ixgbe_type.h" - -s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw); -s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw); -s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw); - -s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw); -s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw); -s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw); -s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw); -s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size); -s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val); -s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw); -s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data); -s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, - u16 data); -s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data); -s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, -u16 *data); -s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - u16 data); -void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, - unsigned int pool); -void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, - bool enable, int vf); -s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 data); -s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *data); -s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 ver, u16 len, const char *str); -s32 ixgbe_get_phy_token(struct ixgbe_hw *); -s32 ixgbe_put_phy_token(struct ixgbe_hw *); -s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 data); -s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *data); -void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw); -void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw); -void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap); -void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf); -enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw); -s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw); -s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, bool *autoneg); -void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw); -s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw); -s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw); -s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw); -s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw); -s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw); -s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw); -u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw); -void ixgbe_disable_rx_x550(struct ixgbe_hw *hw); -s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed); -s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw); -s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); -void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); -s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); -s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); -s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw); -s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw); -s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw); -void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw); -void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw); -void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw); -s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete); -s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw); -s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw); -s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx); -s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx); -#endif /* _IXGBE_X550_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.c deleted file mode 100644 index 79593f83d9b3..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.c +++ /dev/null @@ -1,2375 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include "kcompat.h" - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ -/* From lib/vsprintf.c */ -#include - -static int skip_atoi(const char **s) -{ - int i=0; - - while (isdigit(**s)) - i = i*10 + *((*s)++) - '0'; - return i; -} - -#define _kc_ZEROPAD 1 /* pad with zero */ -#define _kc_SIGN 2 /* unsigned/signed long */ -#define _kc_PLUS 4 /* show plus */ -#define _kc_SPACE 8 /* space if plus */ -#define _kc_LEFT 16 /* left justified */ -#define _kc_SPECIAL 32 /* 0x */ -#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ - -static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) -{ - char c,sign,tmp[66]; - const char *digits; - const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; - const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - int i; - - digits = (type & _kc_LARGE) ? large_digits : small_digits; - if (type & _kc_LEFT) - type &= ~_kc_ZEROPAD; - if (base < 2 || base > 36) - return 0; - c = (type & _kc_ZEROPAD) ? '0' : ' '; - sign = 0; - if (type & _kc_SIGN) { - if (num < 0) { - sign = '-'; - num = -num; - size--; - } else if (type & _kc_PLUS) { - sign = '+'; - size--; - } else if (type & _kc_SPACE) { - sign = ' '; - size--; - } - } - if (type & _kc_SPECIAL) { - if (base == 16) - size -= 2; - else if (base == 8) - size--; - } - i = 0; - if (num == 0) - tmp[i++]='0'; - else while (num != 0) - tmp[i++] = digits[do_div(num,base)]; - if (i > precision) - precision = i; - size -= precision; - if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { - while(size-->0) { - if (buf <= end) - *buf = ' '; - ++buf; - } - } - if (sign) { - if (buf <= end) - *buf = sign; - ++buf; - } - if (type & _kc_SPECIAL) { - if (base==8) { - if (buf <= end) - *buf = '0'; - ++buf; - } else if (base==16) { - if (buf <= end) - *buf = '0'; - ++buf; - if (buf <= end) - *buf = digits[33]; - ++buf; - } - } - if (!(type & _kc_LEFT)) { - while (size-- > 0) { - if (buf <= end) - *buf = c; - ++buf; - } - } - while (i < precision--) { - if (buf <= end) - *buf = '0'; - ++buf; - } - while (i-- > 0) { - if (buf <= end) - *buf = tmp[i]; - ++buf; - } - while (size-- > 0) { - if (buf <= end) - *buf = ' '; - ++buf; - } - return buf; -} - -int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) -{ - int len; - unsigned long long num; - int i, base; - char *str, *end, c; - const char *s; - - int flags; /* flags to number() */ - - int field_width; /* width of output field */ - int precision; /* min. # of digits for integers; max - number of chars for from string */ - int qualifier; /* 'h', 'l', or 'L' for integer fields */ - /* 'z' support added 23/7/1999 S.H. */ - /* 'z' changed to 'Z' --davidm 1/25/99 */ - - str = buf; - end = buf + size - 1; - - if (end < buf - 1) { - end = ((void *) -1); - size = end - buf + 1; - } - - for (; *fmt ; ++fmt) { - if (*fmt != '%') { - if (str <= end) - *str = *fmt; - ++str; - continue; - } - - /* process flags */ - flags = 0; - repeat: - ++fmt; /* this also skips first '%' */ - switch (*fmt) { - case '-': flags |= _kc_LEFT; goto repeat; - case '+': flags |= _kc_PLUS; goto repeat; - case ' ': flags |= _kc_SPACE; goto repeat; - case '#': flags |= _kc_SPECIAL; goto repeat; - case '0': flags |= _kc_ZEROPAD; goto repeat; - } - - /* get field width */ - field_width = -1; - if (isdigit(*fmt)) - field_width = skip_atoi(&fmt); - else if (*fmt == '*') { - ++fmt; - /* it's the next argument */ - field_width = va_arg(args, int); - if (field_width < 0) { - field_width = -field_width; - flags |= _kc_LEFT; - } - } - - /* get the precision */ - precision = -1; - if (*fmt == '.') { - ++fmt; - if (isdigit(*fmt)) - precision = skip_atoi(&fmt); - else if (*fmt == '*') { - ++fmt; - /* it's the next argument */ - precision = va_arg(args, int); - } - if (precision < 0) - precision = 0; - } - - /* get the conversion qualifier */ - qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { - qualifier = *fmt; - ++fmt; - } - - /* default base */ - base = 10; - - switch (*fmt) { - case 'c': - if (!(flags & _kc_LEFT)) { - while (--field_width > 0) { - if (str <= end) - *str = ' '; - ++str; - } - } - c = (unsigned char) va_arg(args, int); - if (str <= end) - *str = c; - ++str; - while (--field_width > 0) { - if (str <= end) - *str = ' '; - ++str; - } - continue; - - case 's': - s = va_arg(args, char *); - if (!s) - s = ""; - - len = strnlen(s, precision); - - if (!(flags & _kc_LEFT)) { - while (len < field_width--) { - if (str <= end) - *str = ' '; - ++str; - } - } - for (i = 0; i < len; ++i) { - if (str <= end) - *str = *s; - ++str; ++s; - } - while (len < field_width--) { - if (str <= end) - *str = ' '; - ++str; - } - continue; - - case 'p': - if ('M' == *(fmt+1)) { - str = get_mac(str, end, va_arg(args, unsigned char *)); - fmt++; - } else { - if (field_width == -1) { - field_width = 2*sizeof(void *); - flags |= _kc_ZEROPAD; - } - str = number(str, end, - (unsigned long) va_arg(args, void *), - 16, field_width, precision, flags); - } - continue; - - case 'n': - /* FIXME: - * What does C99 say about the overflow case here? */ - if (qualifier == 'l') { - long * ip = va_arg(args, long *); - *ip = (str - buf); - } else if (qualifier == 'Z') { - size_t * ip = va_arg(args, size_t *); - *ip = (str - buf); - } else { - int * ip = va_arg(args, int *); - *ip = (str - buf); - } - continue; - - case '%': - if (str <= end) - *str = '%'; - ++str; - continue; - - /* integer number formats - set up the flags and "break" */ - case 'o': - base = 8; - break; - - case 'X': - flags |= _kc_LARGE; - case 'x': - base = 16; - break; - - case 'd': - case 'i': - flags |= _kc_SIGN; - case 'u': - break; - - default: - if (str <= end) - *str = '%'; - ++str; - if (*fmt) { - if (str <= end) - *str = *fmt; - ++str; - } else { - --fmt; - } - continue; - } - if (qualifier == 'L') - num = va_arg(args, long long); - else if (qualifier == 'l') { - num = va_arg(args, unsigned long); - if (flags & _kc_SIGN) - num = (signed long) num; - } else if (qualifier == 'Z') { - num = va_arg(args, size_t); - } else if (qualifier == 'h') { - num = (unsigned short) va_arg(args, int); - if (flags & _kc_SIGN) - num = (signed short) num; - } else { - num = va_arg(args, unsigned int); - if (flags & _kc_SIGN) - num = (signed int) num; - } - str = number(str, end, num, base, - field_width, precision, flags); - } - if (str <= end) - *str = '\0'; - else if (size > 0) - /* don't write out a null byte if the buf size is zero */ - *end = '\0'; - /* the trailing null byte doesn't count towards the total - * ++str; - */ - return str-buf; -} - -int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) -{ - va_list args; - int i; - - va_start(args, fmt); - i = _kc_vsnprintf(buf,size,fmt,args); - va_end(args); - return i; -} -#endif /* < 2.4.8 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) - -/**************************************/ -/* PCI DMA MAPPING */ - -#if defined(CONFIG_HIGHMEM) - -#ifndef PCI_DRAM_OFFSET -#define PCI_DRAM_OFFSET 0 -#endif - -u64 -_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, - size_t size, int direction) -{ - return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + - PCI_DRAM_OFFSET); -} - -#else /* CONFIG_HIGHMEM */ - -u64 -_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, - size_t size, int direction) -{ - return pci_map_single(dev, (void *)page_address(page) + offset, size, - direction); -} - -#endif /* CONFIG_HIGHMEM */ - -void -_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, - int direction) -{ - return pci_unmap_single(dev, dma_addr, size, direction); -} - -#endif /* 2.4.13 => 2.4.3 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) - -/**************************************/ -/* PCI DRIVER API */ - -int -_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) -{ - if (!pci_dma_supported(dev, mask)) - return -EIO; - dev->dma_mask = mask; - return 0; -} - -int -_kc_pci_request_regions(struct pci_dev *dev, char *res_name) -{ - int i; - - for (i = 0; i < 6; i++) { - if (pci_resource_len(dev, i) == 0) - continue; - - if (pci_resource_flags(dev, i) & IORESOURCE_IO) { - if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { - pci_release_regions(dev); - return -EBUSY; - } - } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { - if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { - pci_release_regions(dev); - return -EBUSY; - } - } - } - return 0; -} - -void -_kc_pci_release_regions(struct pci_dev *dev) -{ - int i; - - for (i = 0; i < 6; i++) { - if (pci_resource_len(dev, i) == 0) - continue; - - if (pci_resource_flags(dev, i) & IORESOURCE_IO) - release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); - - else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) - release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); - } -} - -/**************************************/ -/* NETWORK DRIVER API */ - -struct net_device * -_kc_alloc_etherdev(int sizeof_priv) -{ - struct net_device *dev; - int alloc_size; - - alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; - dev = kzalloc(alloc_size, GFP_KERNEL); - if (!dev) - return NULL; - - if (sizeof_priv) - dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); - dev->name[0] = '\0'; - ether_setup(dev); - - return dev; -} - -int -_kc_is_valid_ether_addr(u8 *addr) -{ - const char zaddr[6] = { 0, }; - - return !(addr[0] & 1) && memcmp(addr, zaddr, 6); -} - -#endif /* 2.4.3 => 2.4.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) - -int -_kc_pci_set_power_state(struct pci_dev *dev, int state) -{ - return 0; -} - -int -_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) -{ - return 0; -} - -#endif /* 2.4.6 => 2.4.3 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, - int off, int size) -{ - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - frag->page = page; - frag->page_offset = off; - frag->size = size; - skb_shinfo(skb)->nr_frags = i + 1; -} - -/* - * Original Copyright: - * find_next_bit.c: fallback find next bit implementation - * - * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - */ - -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - const unsigned long *p = addr + BITOP_WORD(offset); - unsigned long result = offset & ~(BITS_PER_LONG-1); - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset %= BITS_PER_LONG; - if (offset) { - tmp = *(p++); - tmp &= (~0UL << offset); - if (size < BITS_PER_LONG) - goto found_first; - if (tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } - while (size & ~(BITS_PER_LONG-1)) { - if ((tmp = *(p++))) - goto found_middle; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + ffs(tmp); -} - -size_t _kc_strlcpy(char *dest, const char *src, size_t size) -{ - size_t ret = strlen(src); - - if (size) { - size_t len = (ret >= size) ? size - 1 : ret; - memcpy(dest, src, len); - dest[len] = '\0'; - } - return ret; -} - -#ifndef do_div -#if BITS_PER_LONG == 32 -uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) -{ - uint64_t rem = *n; - uint64_t b = base; - uint64_t res, d = 1; - uint32_t high = rem >> 32; - - /* Reduce the thing a bit first */ - res = 0; - if (high >= base) { - high /= base; - res = (uint64_t) high << 32; - rem -= (uint64_t) (high*base) << 32; - } - - while ((int64_t)b > 0 && b < rem) { - b = b+b; - d = d+d; - } - - do { - if (rem >= b) { - rem -= b; - res += d; - } - b >>= 1; - d >>= 1; - } while (d); - - *n = res; - return rem; -} -#endif /* BITS_PER_LONG == 32 */ -#endif /* do_div */ -#endif /* 2.6.0 => 2.4.6 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) -int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) -{ - va_list args; - int i; - - va_start(args, fmt); - i = vsnprintf(buf, size, fmt, args); - va_end(args); - return (i >= size) ? (size - 1) : i; -} -#endif /* < 2.6.4 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) -DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; -#endif /* < 2.6.10 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) -char *_kc_kstrdup(const char *s, unsigned int gfp) -{ - size_t len; - char *buf; - - if (!s) - return NULL; - - len = strlen(s) + 1; - buf = kmalloc(len, gfp); - if (buf) - memcpy(buf, s, len); - return buf; -} -#endif /* < 2.6.13 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) -void *_kc_kzalloc(size_t size, int flags) -{ - void *ret = kmalloc(size, flags); - if (ret) - memset(ret, 0, size); - return ret; -} -#endif /* <= 2.6.13 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) -int _kc_skb_pad(struct sk_buff *skb, int pad) -{ - int ntail; - - /* If the skbuff is non linear tailroom is always zero.. */ - if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { - memset(skb->data+skb->len, 0, pad); - return 0; - } - - ntail = skb->data_len + pad - (skb->end - skb->tail); - if (likely(skb_cloned(skb) || ntail > 0)) { - if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) - goto free_skb; - } - -#ifdef MAX_SKB_FRAGS - if (skb_is_nonlinear(skb) && - !__pskb_pull_tail(skb, skb->data_len)) - goto free_skb; - -#endif - memset(skb->data + skb->len, 0, pad); - return 0; - -free_skb: - kfree_skb(skb); - return -ENOMEM; -} - -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) -int _kc_pci_save_state(struct pci_dev *pdev) -{ - struct adapter_struct *adapter = pci_get_drvdata(pdev); - int size = PCI_CONFIG_SPACE_LEN, i; - u16 pcie_cap_offset, pcie_link_status; - -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) - /* no ->dev for 2.4 kernels */ - WARN_ON(pdev->dev.driver_data == NULL); -#endif - pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (pcie_cap_offset) { - if (!pci_read_config_word(pdev, - pcie_cap_offset + PCIE_LINK_STATUS, - &pcie_link_status)) - size = PCIE_CONFIG_SPACE_LEN; - } - pci_config_space_ich8lan(); -#ifdef HAVE_PCI_ERS - if (adapter->config_space == NULL) -#else - WARN_ON(adapter->config_space != NULL); -#endif - adapter->config_space = kmalloc(size, GFP_KERNEL); - if (!adapter->config_space) { - printk(KERN_ERR "Out of memory in pci_save_state\n"); - return -ENOMEM; - } - for (i = 0; i < (size / 4); i++) - pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); - return 0; -} - -void _kc_pci_restore_state(struct pci_dev *pdev) -{ - struct adapter_struct *adapter = pci_get_drvdata(pdev); - int size = PCI_CONFIG_SPACE_LEN, i; - u16 pcie_cap_offset; - u16 pcie_link_status; - - if (adapter->config_space != NULL) { - pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (pcie_cap_offset && - !pci_read_config_word(pdev, - pcie_cap_offset + PCIE_LINK_STATUS, - &pcie_link_status)) - size = PCIE_CONFIG_SPACE_LEN; - - pci_config_space_ich8lan(); - for (i = 0; i < (size / 4); i++) - pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); -#ifndef HAVE_PCI_ERS - kfree(adapter->config_space); - adapter->config_space = NULL; -#endif - } -} -#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ - -#ifdef HAVE_PCI_ERS -void _kc_free_netdev(struct net_device *netdev) -{ - struct adapter_struct *adapter = netdev_priv(netdev); - - kfree(adapter->config_space); -#ifdef CONFIG_SYSFS - if (netdev->reg_state == NETREG_UNINITIALIZED) { - kfree((char *)netdev - netdev->padded); - } else { - BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); - netdev->reg_state = NETREG_RELEASED; - class_device_put(&netdev->class_dev); - } -#else - kfree((char *)netdev - netdev->padded); -#endif -} -#endif - -void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) -{ - void *p; - - p = kzalloc(len, gfp); - if (p) - memcpy(p, src, len); - return p; -} -#endif /* <= 2.6.19 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) -struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) -{ - return ((struct adapter_struct *)netdev_priv(netdev))->pdev; -} -#endif /* < 2.6.21 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) -/* hexdump code taken from lib/hexdump.c */ -static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, - int groupsize, unsigned char *linebuf, - size_t linebuflen, bool ascii) -{ - const u8 *ptr = buf; - u8 ch; - int j, lx = 0; - int ascii_column; - - if (rowsize != 16 && rowsize != 32) - rowsize = 16; - - if (!len) - goto nil; - if (len > rowsize) /* limit to one line at a time */ - len = rowsize; - if ((len % groupsize) != 0) /* no mixed size output */ - groupsize = 1; - - switch (groupsize) { - case 8: { - const u64 *ptr8 = buf; - int ngroups = len / groupsize; - - for (j = 0; j < ngroups; j++) - lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, - "%s%16.16llx", j ? " " : "", - (unsigned long long)*(ptr8 + j)); - ascii_column = 17 * ngroups + 2; - break; - } - - case 4: { - const u32 *ptr4 = buf; - int ngroups = len / groupsize; - - for (j = 0; j < ngroups; j++) - lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, - "%s%8.8x", j ? " " : "", *(ptr4 + j)); - ascii_column = 9 * ngroups + 2; - break; - } - - case 2: { - const u16 *ptr2 = buf; - int ngroups = len / groupsize; - - for (j = 0; j < ngroups; j++) - lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, - "%s%4.4x", j ? " " : "", *(ptr2 + j)); - ascii_column = 5 * ngroups + 2; - break; - } - - default: - for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { - ch = ptr[j]; - linebuf[lx++] = hex_asc(ch >> 4); - linebuf[lx++] = hex_asc(ch & 0x0f); - linebuf[lx++] = ' '; - } - if (j) - lx--; - - ascii_column = 3 * rowsize + 2; - break; - } - if (!ascii) - goto nil; - - while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) - linebuf[lx++] = ' '; - for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) - linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] - : '.'; -nil: - linebuf[lx++] = '\0'; -} - -void _kc_print_hex_dump(const char *level, - const char *prefix_str, int prefix_type, - int rowsize, int groupsize, - const void *buf, size_t len, bool ascii) -{ - const u8 *ptr = buf; - int i, linelen, remaining = len; - unsigned char linebuf[200]; - - if (rowsize != 16 && rowsize != 32) - rowsize = 16; - - for (i = 0; i < len; i += rowsize) { - linelen = min(remaining, rowsize); - remaining -= rowsize; - _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, - linebuf, sizeof(linebuf), ascii); - - switch (prefix_type) { - case DUMP_PREFIX_ADDRESS: - printk("%s%s%*p: %s\n", level, prefix_str, - (int)(2 * sizeof(void *)), ptr + i, linebuf); - break; - case DUMP_PREFIX_OFFSET: - printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); - break; - default: - printk("%s%s%s\n", level, prefix_str, linebuf); - break; - } - } -} - -#endif /* < 2.6.22 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) -int ixgbe_dcb_netlink_register(void) -{ - return 0; -} - -int ixgbe_dcb_netlink_unregister(void) -{ - return 0; -} - -int ixgbe_copy_dcb_cfg(struct ixgbe_adapter __always_unused *adapter, int __always_unused tc_max) -{ - return 0; -} -#endif /* < 2.6.23 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) -#ifdef NAPI -struct net_device *napi_to_poll_dev(const struct napi_struct *napi) -{ - struct adapter_q_vector *q_vector = container_of(napi, - struct adapter_q_vector, - napi); - return &q_vector->poll_dev; -} - -int __kc_adapter_clean(struct net_device *netdev, int *budget) -{ - int work_done; - int work_to_do = min(*budget, netdev->quota); - /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ - struct napi_struct *napi = netdev->priv; - work_done = napi->poll(napi, work_to_do); - *budget -= work_done; - netdev->quota -= work_done; - return (work_done >= work_to_do) ? 1 : 0; -} -#endif /* NAPI */ -#endif /* <= 2.6.24 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) -void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) -{ - struct pci_dev *parent = pdev->bus->self; - u16 link_state; - int pos; - - if (!parent) - return; - - pos = pci_find_capability(parent, PCI_CAP_ID_EXP); - if (pos) { - pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); - link_state &= ~state; - pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); - } -} -#endif /* < 2.6.26 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) -#ifdef HAVE_TX_MQ -void _kc_netif_tx_stop_all_queues(struct net_device *netdev) -{ - struct adapter_struct *adapter = netdev_priv(netdev); - int i; - - netif_stop_queue(netdev); - if (netif_is_multiqueue(netdev)) - for (i = 0; i < adapter->num_tx_queues; i++) - netif_stop_subqueue(netdev, i); -} -void _kc_netif_tx_wake_all_queues(struct net_device *netdev) -{ - struct adapter_struct *adapter = netdev_priv(netdev); - int i; - - netif_wake_queue(netdev); - if (netif_is_multiqueue(netdev)) - for (i = 0; i < adapter->num_tx_queues; i++) - netif_wake_subqueue(netdev, i); -} -void _kc_netif_tx_start_all_queues(struct net_device *netdev) -{ - struct adapter_struct *adapter = netdev_priv(netdev); - int i; - - netif_start_queue(netdev); - if (netif_is_multiqueue(netdev)) - for (i = 0; i < adapter->num_tx_queues; i++) - netif_start_subqueue(netdev, i); -} -#endif /* HAVE_TX_MQ */ - -void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) -{ - va_list args; - - printk(KERN_WARNING "------------[ cut here ]------------\n"); - printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); - va_start(args, fmt); - vprintk(fmt, args); - va_end(args); - - dump_stack(); -} -#endif /* __VMKLNX__ */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) - -int -_kc_pci_prepare_to_sleep(struct pci_dev *dev) -{ - pci_power_t target_state; - int error; - - target_state = pci_choose_state(dev, PMSG_SUSPEND); - - pci_enable_wake(dev, target_state, true); - - error = pci_set_power_state(dev, target_state); - - if (error) - pci_enable_wake(dev, target_state, false); - - return error; -} - -int -_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) -{ - int err; - - err = pci_enable_wake(dev, PCI_D3cold, enable); - if (err) - goto out; - - err = pci_enable_wake(dev, PCI_D3hot, enable); - -out: - return err; -} -#endif /* < 2.6.28 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) -static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) -{ - u16 old_cmd, cmd; - - pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); - if (enable) - cmd = old_cmd | PCI_COMMAND_MASTER; - else - cmd = old_cmd & ~PCI_COMMAND_MASTER; - if (cmd != old_cmd) { - dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", - enable ? "enabling" : "disabling"); - pci_write_config_word(pdev, PCI_COMMAND, cmd); - } -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) - pdev->is_busmaster = enable; -#endif -} - -void _kc_pci_clear_master(struct pci_dev *dev) -{ - __kc_pci_set_master(dev, false); -} -#endif /* < 2.6.29 */ - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) -#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) -int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) -{ - int num_vf = 0; -#ifdef CONFIG_PCI_IOV - struct pci_dev *vfdev; - - /* loop through all ethernet devices starting at PF dev */ - vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); - while (vfdev) { - if (vfdev->is_virtfn && vfdev->physfn == dev) - num_vf++; - - vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); - } - -#endif - return num_vf; -} -#endif /* RHEL_RELEASE_CODE */ -#endif /* < 2.6.34 */ - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) -#ifdef HAVE_TX_MQ -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) -#ifndef CONFIG_NETDEVICES_MULTIQUEUE -int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) -{ - unsigned int real_num = dev->real_num_tx_queues; - struct Qdisc *qdisc; - int i; - - if (txq < 1 || txq > dev->num_tx_queues) - return -EINVAL; - - else if (txq > real_num) - dev->real_num_tx_queues = txq; - else if (txq < real_num) { - dev->real_num_tx_queues = txq; - for (i = txq; i < dev->num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; - if (qdisc) { - spin_lock_bh(qdisc_lock(qdisc)); - qdisc_reset(qdisc); - spin_unlock_bh(qdisc_lock(qdisc)); - } - } - } - - return 0; -} -#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ -#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ -#endif /* HAVE_TX_MQ */ - -ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, - const void __user *from, size_t count) -{ - loff_t pos = *ppos; - size_t res; - - if (pos < 0) - return -EINVAL; - if (pos >= available || !count) - return 0; - if (count > available - pos) - count = available - pos; - res = copy_from_user(to + pos, from, count); - if (res == count) - return -EFAULT; - count -= res; - *ppos = pos + count; - return count; -} - -#endif /* < 2.6.35 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) -static const u32 _kc_flags_dup_features = - (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); - -u32 _kc_ethtool_op_get_flags(struct net_device *dev) -{ - return dev->features & _kc_flags_dup_features; -} - -int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) -{ - if (data & ~supported) - return -EINVAL; - - dev->features = ((dev->features & ~_kc_flags_dup_features) | - (data & _kc_flags_dup_features)); - return 0; -} -#endif /* < 2.6.36 */ - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) -#ifdef HAVE_NETDEV_SELECT_QUEUE -#include -#include - -u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, - u16 num_tx_queues) -{ - u32 hash; - u16 qoffset = 0; - u16 qcount = num_tx_queues; - - if (skb_rx_queue_recorded(skb)) { - hash = skb_get_rx_queue(skb); - while (unlikely(hash >= num_tx_queues)) - hash -= num_tx_queues; - return hash; - } - - if (netdev_get_num_tc(dev)) { - struct adapter_struct *kc_adapter = netdev_priv(dev); - - if (skb->priority == TC_PRIO_CONTROL) { - qoffset = kc_adapter->dcb_tc - 1; - } else { - qoffset = skb->vlan_tci; - qoffset &= IXGBE_TX_FLAGS_VLAN_PRIO_MASK; - qoffset >>= 13; - } - - qcount = kc_adapter->ring_feature[RING_F_RSS].indices; - qoffset *= qcount; - } - - if (skb->sk && skb->sk->sk_hash) - hash = skb->sk->sk_hash; - else -#ifdef NETIF_F_RXHASH - hash = (__force u16) skb->protocol ^ skb->rxhash; -#else - hash = skb->protocol; -#endif - - hash = jhash_1word(hash, _kc_hashrnd); - - return (u16) (((u64) hash * qcount) >> 32) + qoffset; -} -#endif /* HAVE_NETDEV_SELECT_QUEUE */ - -u8 _kc_netdev_get_num_tc(struct net_device *dev) -{ - struct adapter_struct *kc_adapter = netdev_priv(dev); - if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED) - return kc_adapter->dcb_tc; - else - return 0; -} - -int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) -{ - struct adapter_struct *kc_adapter = netdev_priv(dev); - - if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS) - return -EINVAL; - - kc_adapter->dcb_tc = num_tc; - - return 0; -} - -u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) -{ - struct adapter_struct *kc_adapter = netdev_priv(dev); - - return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); -} - -#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ -#endif /* < 2.6.39 */ - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) -void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, - int off, int size, unsigned int truesize) -{ - skb_fill_page_desc(skb, i, page, off, size); - skb->len += size; - skb->data_len += size; - skb->truesize += truesize; -} - -#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) -int _kc_simple_open(struct inode *inode, struct file *file) -{ - if (inode->i_private) - file->private_data = inode->i_private; - - return 0; -} -#endif /* SLE_VERSION < 11,3,0 */ - -#endif /* < 3.4.0 */ - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) -static inline int __kc_pcie_cap_version(struct pci_dev *dev) -{ - int pos; - u16 reg16; - - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (!pos) - return 0; - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); - return reg16 & PCI_EXP_FLAGS_VERS; -} - -static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) -{ - return true; -} - -static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) -{ - int type = pci_pcie_type(dev); - - return __kc_pcie_cap_version(dev) > 1 || - type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_ENDPOINT || - type == PCI_EXP_TYPE_LEG_END; -} - -static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) -{ - int type = pci_pcie_type(dev); - int pos; - u16 pcie_flags_reg; - - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (!pos) - return false; - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); - - return __kc_pcie_cap_version(dev) > 1 || - type == PCI_EXP_TYPE_ROOT_PORT || - (type == PCI_EXP_TYPE_DOWNSTREAM && - pcie_flags_reg & PCI_EXP_FLAGS_SLOT); -} - -static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) -{ - int type = pci_pcie_type(dev); - - return __kc_pcie_cap_version(dev) > 1 || - type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_RC_EC; -} - -static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) -{ - if (!pci_is_pcie(dev)) - return false; - - switch (pos) { - case PCI_EXP_FLAGS_TYPE: - return true; - case PCI_EXP_DEVCAP: - case PCI_EXP_DEVCTL: - case PCI_EXP_DEVSTA: - return __kc_pcie_cap_has_devctl(dev); - case PCI_EXP_LNKCAP: - case PCI_EXP_LNKCTL: - case PCI_EXP_LNKSTA: - return __kc_pcie_cap_has_lnkctl(dev); - case PCI_EXP_SLTCAP: - case PCI_EXP_SLTCTL: - case PCI_EXP_SLTSTA: - return __kc_pcie_cap_has_sltctl(dev); - case PCI_EXP_RTCTL: - case PCI_EXP_RTCAP: - case PCI_EXP_RTSTA: - return __kc_pcie_cap_has_rtctl(dev); - case PCI_EXP_DEVCAP2: - case PCI_EXP_DEVCTL2: - case PCI_EXP_LNKCAP2: - case PCI_EXP_LNKCTL2: - case PCI_EXP_LNKSTA2: - return __kc_pcie_cap_version(dev) > 1; - default: - return false; - } -} - -/* - * Note that these accessor functions are only for the "PCI Express - * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the - * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) - */ -int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) -{ - int ret; - - *val = 0; - if (pos & 1) - return -EINVAL; - - if (__kc_pcie_capability_reg_implemented(dev, pos)) { - ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); - /* - * Reset *val to 0 if pci_read_config_word() fails, it may - * have been written as 0xFFFF if hardware error happens - * during pci_read_config_word(). - */ - if (ret) - *val = 0; - return ret; - } - - /* - * For Functions that do not implement the Slot Capabilities, - * Slot Status, and Slot Control registers, these spaces must - * be hardwired to 0b, with the exception of the Presence Detect - * State bit in the Slot Status register of Downstream Ports, - * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) - */ - if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && - pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { - *val = PCI_EXP_SLTSTA_PDS; - } - - return 0; -} - -int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) -{ - if (pos & 1) - return -EINVAL; - - if (!__kc_pcie_capability_reg_implemented(dev, pos)) - return 0; - - return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); -} - -int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, - u16 clear, u16 set) -{ - int ret; - u16 val; - - ret = __kc_pcie_capability_read_word(dev, pos, &val); - if (!ret) { - val &= ~clear; - val |= set; - ret = __kc_pcie_capability_write_word(dev, pos, val); - } - - return ret; -} - -int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, - u16 clear) -{ - return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); -} -#endif /* < 3.7.0 */ - -/****************************************************************************** - * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright, - * inferred copyright from kernel - */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) -int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, - int target, unsigned short *fragoff, int *flags) -{ - unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); - u8 nexthdr = ipv6_hdr(skb)->nexthdr; - unsigned int len; - bool found; - -#define __KC_IP6_FH_F_FRAG BIT(0) -#define __KC_IP6_FH_F_AUTH BIT(1) -#define __KC_IP6_FH_F_SKIP_RH BIT(2) - - if (fragoff) - *fragoff = 0; - - if (*offset) { - struct ipv6hdr _ip6, *ip6; - - ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); - if (!ip6 || (ip6->version != 6)) { - printk(KERN_ERR "IPv6 header not found\n"); - return -EBADMSG; - } - start = *offset + sizeof(struct ipv6hdr); - nexthdr = ip6->nexthdr; - } - len = skb->len - start; - - do { - struct ipv6_opt_hdr _hdr, *hp; - unsigned int hdrlen; - found = (nexthdr == target); - - if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { - if (target < 0 || found) - break; - return -ENOENT; - } - - hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); - if (!hp) - return -EBADMSG; - - if (nexthdr == NEXTHDR_ROUTING) { - struct ipv6_rt_hdr _rh, *rh; - - rh = skb_header_pointer(skb, start, sizeof(_rh), - &_rh); - if (!rh) - return -EBADMSG; - - if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && - rh->segments_left == 0) - found = false; - } - - if (nexthdr == NEXTHDR_FRAGMENT) { - unsigned short _frag_off; - __be16 *fp; - - if (flags) /* Indicate that this is a fragment */ - *flags |= __KC_IP6_FH_F_FRAG; - fp = skb_header_pointer(skb, - start+offsetof(struct frag_hdr, - frag_off), - sizeof(_frag_off), - &_frag_off); - if (!fp) - return -EBADMSG; - - _frag_off = ntohs(*fp) & ~0x7; - if (_frag_off) { - if (target < 0 && - ((!ipv6_ext_hdr(hp->nexthdr)) || - hp->nexthdr == NEXTHDR_NONE)) { - if (fragoff) - *fragoff = _frag_off; - return hp->nexthdr; - } - return -ENOENT; - } - hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) { - if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) - break; - hdrlen = (hp->hdrlen + 2) << 2; - } else - hdrlen = ipv6_optlen(hp); - - if (!found) { - nexthdr = hp->nexthdr; - len -= hdrlen; - start += hdrlen; - } - } while (!found); - - *offset = start; - return nexthdr; -} -#endif /* < 3.8.0 */ - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) -#ifdef CONFIG_XPS -#if NR_CPUS < 64 -#define _KC_MAX_XPS_CPUS NR_CPUS -#else -#define _KC_MAX_XPS_CPUS 64 -#endif - -/* - * netdev_queue sysfs structures and functions. - */ -struct _kc_netdev_queue_attribute { - struct attribute attr; - ssize_t (*show)(struct netdev_queue *queue, - struct _kc_netdev_queue_attribute *attr, char *buf); - ssize_t (*store)(struct netdev_queue *queue, - struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); -}; - -#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ - struct _kc_netdev_queue_attribute, attr) - -int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, - u16 index) -{ - struct netdev_queue *txq = netdev_get_tx_queue(dev, index); -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) - /* Redhat requires some odd extended netdev structures */ - struct netdev_tx_queue_extended *txq_ext = - netdev_extended(dev)->_tx_ext + index; - struct kobj_type *ktype = txq_ext->kobj.ktype; -#else - struct kobj_type *ktype = txq->kobj.ktype; -#endif - struct _kc_netdev_queue_attribute *xps_attr; - struct attribute *attr = NULL; - int i, len, err; -#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) - char buf[_KC_XPS_BUFLEN]; - - if (!ktype) - return -ENOMEM; - - /* attempt to locate the XPS attribute in the Tx queue */ - for (i = 0; (attr = ktype->default_attrs[i]); i++) { - if (!strcmp("xps_cpus", attr->name)) - break; - } - - /* if we did not find it return an error */ - if (!attr) - return -EINVAL; - - /* copy the mask into a string */ - len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, - cpumask_bits(mask), _KC_MAX_XPS_CPUS); - if (!len) - return -ENOMEM; - - xps_attr = to_kc_netdev_queue_attr(attr); - - /* Store the XPS value using the SYSFS store call */ - err = xps_attr->store(txq, xps_attr, buf, len); - - /* we only had an error on err < 0 */ - return (err < 0) ? err : 0; -} -#endif /* CONFIG_XPS */ -#ifdef HAVE_NETDEV_SELECT_QUEUE -static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) -{ -#ifdef CONFIG_XPS - struct xps_dev_maps *dev_maps; - struct xps_map *map; - int queue_index = -1; - - rcu_read_lock(); -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) - /* Redhat requires some odd extended netdev structures */ - dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); -#else - dev_maps = rcu_dereference(dev->xps_maps); -#endif - if (dev_maps) { - map = rcu_dereference( - dev_maps->cpu_map[raw_smp_processor_id()]); - if (map) { - if (map->len == 1) - queue_index = map->queues[0]; - else { - u32 hash; - if (skb->sk && skb->sk->sk_hash) - hash = skb->sk->sk_hash; - else - hash = (__force u16) skb->protocol ^ - skb->rxhash; - hash = jhash_1word(hash, _kc_hashrnd); - queue_index = map->queues[ - ((u64)hash * map->len) >> 32]; - } - if (unlikely(queue_index >= dev->real_num_tx_queues)) - queue_index = -1; - } - } - rcu_read_unlock(); - - return queue_index; -#else - struct adapter_struct *kc_adapter = netdev_priv(dev); - int queue_index = -1; - - if (kc_adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - queue_index = skb_rx_queue_recorded(skb) ? - skb_get_rx_queue(skb) : - smp_processor_id(); - while (unlikely(queue_index >= dev->real_num_tx_queues)) - queue_index -= dev->real_num_tx_queues; - return queue_index; - } - - return -1; -#endif -} - -u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) -{ - struct sock *sk = skb->sk; - int queue_index = sk_tx_queue_get(sk); - int new_index; - - if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { -#ifdef CONFIG_XPS - if (!skb->ooo_okay) -#endif - return queue_index; - } - - new_index = kc_get_xps_queue(dev, skb); - if (new_index < 0) - new_index = skb_tx_hash(dev, skb); - - if (queue_index != new_index && sk) { - struct dst_entry *dst = - rcu_dereference(sk->sk_dst_cache); - - if (dst && skb_dst(skb) == dst) - sk_tx_queue_set(sk, new_index); - - } - - return new_index; -} - -#endif /* HAVE_NETDEV_SELECT_QUEUE */ -#endif /* 3.9.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -#ifdef HAVE_FDB_OPS -#ifdef USE_CONST_DEV_UC_CHAR -int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - u16 flags) -#else -int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr, u16 flags) -#endif -{ - int err = -EINVAL; - - /* If aging addresses are supported device will need to - * implement its own handler for this. - */ - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - pr_info("%s: FDB only supports static addresses\n", dev->name); - return err; - } - - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) - err = dev_uc_add_excl(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_add_excl(dev, addr); - - /* Only return duplicate errors if NLM_F_EXCL is set */ - if (err == -EEXIST && !(flags & NLM_F_EXCL)) - err = 0; - - return err; -} - -#ifdef USE_CONST_DEV_UC_CHAR -#ifdef HAVE_FDB_DEL_NLATTR -int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr) -#else -int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, - const unsigned char *addr) -#endif -#else -int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr) -#endif -{ - int err = -EINVAL; - - /* If aging addresses are supported device will need to - * implement its own handler for this. - */ - if (!(ndm->ndm_state & NUD_PERMANENT)) { - pr_info("%s: FDB only supports static addresses\n", dev->name); - return err; - } - - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - - return err; -} - -#endif /* HAVE_FDB_OPS */ -#ifdef CONFIG_PCI_IOV -int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) -{ - unsigned int vfs_assigned = 0; -#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED - int pos; - struct pci_dev *vfdev; - unsigned short dev_id; - - /* only search if we are a PF */ - if (!dev->is_physfn) - return 0; - - /* find SR-IOV capability */ - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) - return 0; - - /* - * determine the device ID for the VFs, the vendor ID will be the - * same as the PF so there is no need to check for that one - */ - pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); - - /* loop through all the VFs to see if we own any that are assigned */ - vfdev = pci_get_device(dev->vendor, dev_id, NULL); - while (vfdev) { - /* - * It is considered assigned if it is a virtual function with - * our dev as the physical function and the assigned bit is set - */ - if (vfdev->is_virtfn && (vfdev->physfn == dev) && - (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) - vfs_assigned++; - - vfdev = pci_get_device(dev->vendor, dev_id, vfdev); - } - -#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ - return vfs_assigned; -} - -#endif /* CONFIG_PCI_IOV */ -#endif /* 3.10.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) -const unsigned char pcie_link_speed[] = { - PCI_SPEED_UNKNOWN, /* 0 */ - PCIE_SPEED_2_5GT, /* 1 */ - PCIE_SPEED_5_0GT, /* 2 */ - PCIE_SPEED_8_0GT, /* 3 */ - PCI_SPEED_UNKNOWN, /* 4 */ - PCI_SPEED_UNKNOWN, /* 5 */ - PCI_SPEED_UNKNOWN, /* 6 */ - PCI_SPEED_UNKNOWN, /* 7 */ - PCI_SPEED_UNKNOWN, /* 8 */ - PCI_SPEED_UNKNOWN, /* 9 */ - PCI_SPEED_UNKNOWN, /* A */ - PCI_SPEED_UNKNOWN, /* B */ - PCI_SPEED_UNKNOWN, /* C */ - PCI_SPEED_UNKNOWN, /* D */ - PCI_SPEED_UNKNOWN, /* E */ - PCI_SPEED_UNKNOWN /* F */ -}; - -int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, - enum pcie_link_width *width) -{ - int ret; - - *speed = PCI_SPEED_UNKNOWN; - *width = PCIE_LNK_WIDTH_UNKNOWN; - - while (dev) { - u16 lnksta; - enum pci_bus_speed next_speed; - enum pcie_link_width next_width; - - ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); - if (ret) - return ret; - - next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; - next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> - PCI_EXP_LNKSTA_NLW_SHIFT; - - if (next_speed < *speed) - *speed = next_speed; - - if (next_width < *width) - *width = next_width; - - dev = dev->bus->self; - } - - return 0; -} - -#endif - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) -int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) -{ - int err = dma_set_mask(dev, mask); - - if (!err) - /* coherent mask for the same size will always succeed if - * dma_set_mask does. However we store the error anyways, due - * to some kernels which use gcc's warn_unused_result on their - * definition of dma_set_coherent_mask. - */ - err = dma_set_coherent_mask(dev, mask); - return err; -} - -void __kc_netdev_rss_key_fill(void *buffer, size_t len) -{ - /* Set of random keys generated using kernel random number generator */ - static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, - 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, - 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, - 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, - 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, - 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, - 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; - - BUG_ON(len > NETDEV_RSS_KEY_LEN); - memcpy(buffer, seed, len); -} -#endif /* 3.13.0 */ - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) -int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, - int minvec, int maxvec) -{ - int nvec = maxvec; - int rc; - - if (maxvec < minvec) - return -ERANGE; - - do { - rc = pci_enable_msix(dev, entries, nvec); - if (rc < 0) { - return rc; - } else if (rc > 0) { - if (rc < minvec) - return -ENOSPC; - nvec = rc; - } - } while (rc); - - return nvec; -} -#endif /* 3.14.0 */ - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) -char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) -{ - size_t size; - char *buf; - - if (!s) - return NULL; - - size = strlen(s) + 1; - buf = devm_kzalloc(dev, size, gfp); - if (buf) - memcpy(buf, s, size); - return buf; -} -#endif /* 3.15.0 */ - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) -#ifdef HAVE_SET_RX_MODE -#ifdef NETDEV_HW_ADDR_T_UNICAST -int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, - struct net_device *dev, - int (*sync)(struct net_device *, const unsigned char *), - int (*unsync)(struct net_device *, const unsigned char *)) -{ - struct netdev_hw_addr *ha, *tmp; - int err; - - /* first go through and flush out any stale entries */ - list_for_each_entry_safe(ha, tmp, &list->list, list) { -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) - if (!ha->synced || ha->refcount != 1) -#else - if (!ha->sync_cnt || ha->refcount != 1) -#endif - continue; - - if (unsync && unsync(dev, ha->addr)) - continue; - - list_del_rcu(&ha->list); - kfree_rcu(ha, rcu_head); - list->count--; - } - - /* go through and sync new entries to the list */ - list_for_each_entry_safe(ha, tmp, &list->list, list) { -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) - if (ha->synced) -#else - if (ha->sync_cnt) -#endif - continue; - - err = sync(dev, ha->addr); - if (err) - return err; -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) - ha->synced = true; -#else - ha->sync_cnt++; -#endif - ha->refcount++; - } - - return 0; -} - -void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, - struct net_device *dev, - int (*unsync)(struct net_device *, const unsigned char *)) -{ - struct netdev_hw_addr *ha, *tmp; - - list_for_each_entry_safe(ha, tmp, &list->list, list) { -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) - if (!ha->synced) -#else - if (!ha->sync_cnt) -#endif - continue; - - if (unsync && unsync(dev, ha->addr)) - continue; - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) - ha->synced = false; -#else - ha->sync_cnt--; -#endif - if (--ha->refcount) - continue; - - list_del_rcu(&ha->list); - kfree_rcu(ha, rcu_head); - list->count--; - } -} - -#endif /* NETDEV_HW_ADDR_T_UNICAST */ -#ifndef NETDEV_HW_ADDR_T_MULTICAST -int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, - struct net_device *dev, - int (*sync)(struct net_device *, const unsigned char *), - int (*unsync)(struct net_device *, const unsigned char *)) -{ - struct dev_addr_list *da, **next = list; - int err; - - /* first go through and flush out any stale entries */ - while ((da = *next) != NULL) { - if (da->da_synced && da->da_users == 1) { - if (!unsync || !unsync(dev, da->da_addr)) { - *next = da->next; - kfree(da); - (*count)--; - continue; - } - } - next = &da->next; - } - - /* go through and sync new entries to the list */ - for (da = *list; da != NULL; da = da->next) { - if (da->da_synced) - continue; - - err = sync(dev, da->da_addr); - if (err) - return err; - - da->da_synced++; - da->da_users++; - } - - return 0; -} - -void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, - struct net_device *dev, - int (*unsync)(struct net_device *, const unsigned char *)) -{ - struct dev_addr_list *da; - - while ((da = *list) != NULL) { - if (da->da_synced) { - if (!unsync || !unsync(dev, da->da_addr)) { - da->da_synced--; - if (--da->da_users == 0) { - *list = da->next; - kfree(da); - (*count)--; - continue; - } - } - } - list = &da->next; - } -} -#endif /* NETDEV_HW_ADDR_T_MULTICAST */ -#endif /* HAVE_SET_RX_MODE */ -void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, - unsigned int gfp) -{ - void *p; - - p = devm_kzalloc(dev, len, gfp); - if (p) - memcpy(p, src, len); - - return p; -} -#endif /* 3.16.0 */ - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) -#endif /* 3.17.0 */ - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) -#ifndef NO_PTP_SUPPORT -static void __kc_sock_efree(struct sk_buff *skb) -{ - sock_put(skb->sk); -} - -struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) -{ - struct sock *sk = skb->sk; - struct sk_buff *clone; - - if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) - return NULL; - - clone = skb_clone(skb, GFP_ATOMIC); - if (!clone) { - sock_put(sk); - return NULL; - } - - clone->sk = sk; - clone->destructor = __kc_sock_efree; - - return clone; -} - -void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, - struct skb_shared_hwtstamps *hwtstamps) -{ - struct sock_exterr_skb *serr; - struct sock *sk = skb->sk; - int err; - - sock_hold(sk); - - *skb_hwtstamps(skb) = *hwtstamps; - - serr = SKB_EXT_ERR(skb); - memset(serr, 0, sizeof(*serr)); - serr->ee.ee_errno = ENOMSG; - serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; - - err = sock_queue_err_skb(sk, skb); - if (err) - kfree_skb(skb); - - sock_put(sk); -} -#endif - -/* include headers needed for get_headlen function */ -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -#include -#endif -#ifdef HAVE_SCTP -#include -#endif - -unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len) -{ - union { - unsigned char *network; - /* l2 headers */ - struct ethhdr *eth; - struct vlan_hdr *vlan; - /* l3 headers */ - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - } hdr; - __be16 proto; - u8 nexthdr = 0; /* default to not TCP */ - u8 hlen; - - /* this should never happen, but better safe than sorry */ - if (max_len < ETH_HLEN) - return max_len; - - /* initialize network frame pointer */ - hdr.network = data; - - /* set first protocol and move network header forward */ - proto = hdr.eth->h_proto; - hdr.network += ETH_HLEN; - -again: - switch (proto) { - /* handle any vlan tag if present */ - case __constant_htons(ETH_P_8021AD): - case __constant_htons(ETH_P_8021Q): - if ((hdr.network - data) > (max_len - VLAN_HLEN)) - return max_len; - - proto = hdr.vlan->h_vlan_encapsulated_proto; - hdr.network += VLAN_HLEN; - goto again; - /* handle L3 protocols */ - case __constant_htons(ETH_P_IP): - if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) - return max_len; - - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return hdr.network - data; - - /* record next protocol if header is present */ - if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) - nexthdr = hdr.ipv4->protocol; - - hdr.network += hlen; - break; -#ifdef NETIF_F_TSO6 - case __constant_htons(ETH_P_IPV6): - if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) - return max_len; - - /* record next protocol */ - nexthdr = hdr.ipv6->nexthdr; - hdr.network += sizeof(struct ipv6hdr); - break; -#endif /* NETIF_F_TSO6 */ -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) - case __constant_htons(ETH_P_FCOE): - hdr.network += FCOE_HEADER_LEN; - break; -#endif - default: - return hdr.network - data; - } - - /* finally sort out L4 */ - switch (nexthdr) { - case IPPROTO_TCP: - if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) - return max_len; - - /* access doff as a u8 to avoid unaligned access on ia64 */ - hdr.network += max_t(u8, sizeof(struct tcphdr), - (hdr.network[12] & 0xF0) >> 2); - - break; - case IPPROTO_UDP: - case IPPROTO_UDPLITE: - hdr.network += sizeof(struct udphdr); - break; -#ifdef HAVE_SCTP - case IPPROTO_SCTP: - hdr.network += sizeof(struct sctphdr); - break; -#endif - } - - /* - * If everything has gone correctly hdr.network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - return min_t(unsigned int, hdr.network - data, max_len); -} - -#endif /* < 3.18.0 */ - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) -#ifdef HAVE_NET_GET_RANDOM_ONCE -static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; - -void __kc_netdev_rss_key_fill(void *buffer, size_t len) -{ - BUG_ON(len > sizeof(__kc_netdev_rss_key)); - net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); - memcpy(buffer, __kc_netdev_rss_key, len); -} -#endif -#endif - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) -#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) -#ifdef CONFIG_SPARC -#include -#include -#endif -int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, - u8 *mac_addr __maybe_unused) -{ -#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ - !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ - !defined(CONFIG_SPARC)) - return -ENODEV; -#else - const unsigned char *addr; - struct device_node *dp; - - if (dev_is_pci(dev)) - dp = pci_device_to_OF_node(to_pci_dev(dev)); - else -#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) - dp = dev->of_node; -#else - dp = NULL; -#endif - - addr = NULL; - if (dp) - addr = of_get_mac_address(dp); -#ifdef CONFIG_SPARC - /* Kernel hasn't implemented arch_get_platform_mac_address, but we - * should handle the SPARC case here since it was supported - * originally. This is replaced by arch_get_platform_mac_address() - * upstream. - */ - if (!addr) - addr = idprom->id_ethaddr; -#endif - if (!addr) - return -ENODEV; - - ether_addr_copy(mac_addr, addr); - return 0; -#endif -} -#endif /* !(RHEL_RELEASE >= 7.3) */ -#endif diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.h b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.h deleted file mode 100644 index b936fcb9d10b..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat.h +++ /dev/null @@ -1,5610 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _KCOMPAT_H_ -#define _KCOMPAT_H_ - -#ifndef LINUX_VERSION_CODE -#include -#else -#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef NSEC_PER_MSEC -#define NSEC_PER_MSEC 1000000L -#endif -#include -/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ -#ifndef UTS_RELEASE -/* utsrelease.h changed locations in 2.6.33 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) -#include -#else -#include -#endif -#endif - -/* NAPI enable/disable flags here */ -#define NAPI - -#define adapter_struct ixgbe_adapter -#define adapter_q_vector ixgbe_q_vector - -/* and finally set defines so that the code sees the changes */ -#ifdef NAPI -#else -#endif /* NAPI */ - -/* Dynamic LTR and deeper C-State support disable/enable */ - -/* packet split disable/enable */ -#ifdef DISABLE_PACKET_SPLIT -#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT -#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT -#endif -#endif /* DISABLE_PACKET_SPLIT */ - -/* MSI compatibility code for all kernels and drivers */ -#ifdef DISABLE_PCI_MSI -#undef CONFIG_PCI_MSI -#endif -#ifndef CONFIG_PCI_MSI -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) -struct msix_entry { - u16 vector; /* kernel uses to write allocated vector */ - u16 entry; /* driver uses to specify entry, OS writes */ -}; -#endif -#undef pci_enable_msi -#define pci_enable_msi(a) -ENOTSUPP -#undef pci_disable_msi -#define pci_disable_msi(a) do {} while (0) -#undef pci_enable_msix -#define pci_enable_msix(a, b, c) -ENOTSUPP -#undef pci_disable_msix -#define pci_disable_msix(a) do {} while (0) -#define msi_remove_pci_irq_vectors(a) do {} while (0) -#endif /* CONFIG_PCI_MSI */ -#ifdef DISABLE_PM -#undef CONFIG_PM -#endif - -#ifdef DISABLE_NET_POLL_CONTROLLER -#undef CONFIG_NET_POLL_CONTROLLER -#endif - -#ifndef PMSG_SUSPEND -#define PMSG_SUSPEND 3 -#endif - -/* generic boolean compatibility */ -#undef TRUE -#undef FALSE -#define TRUE true -#define FALSE false -#ifdef GCC_VERSION -#if ( GCC_VERSION < 3000 ) -#define _Bool char -#endif -#else -#define _Bool char -#endif - -#undef __always_unused -#define __always_unused __attribute__((__unused__)) - -#undef __maybe_unused -#define __maybe_unused __attribute__((__unused__)) - -/* kernels less than 2.4.14 don't have this */ -#ifndef ETH_P_8021Q -#define ETH_P_8021Q 0x8100 -#endif - -#ifndef module_param -#define module_param(v,t,p) MODULE_PARM(v, "i"); -#endif - -#ifndef DMA_64BIT_MASK -#define DMA_64BIT_MASK 0xffffffffffffffffULL -#endif - -#ifndef DMA_32BIT_MASK -#define DMA_32BIT_MASK 0x00000000ffffffffULL -#endif - -#ifndef PCI_CAP_ID_EXP -#define PCI_CAP_ID_EXP 0x10 -#endif - -#ifndef uninitialized_var -#define uninitialized_var(x) x = x -#endif - -#ifndef PCIE_LINK_STATE_L0S -#define PCIE_LINK_STATE_L0S 1 -#endif -#ifndef PCIE_LINK_STATE_L1 -#define PCIE_LINK_STATE_L1 2 -#endif - -#ifndef mmiowb -#ifdef CONFIG_IA64 -#define mmiowb() asm volatile ("mf.a" ::: "memory") -#else -#define mmiowb() -#endif -#endif - -#ifndef SET_NETDEV_DEV -#define SET_NETDEV_DEV(net, pdev) -#endif - -#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) -#define free_netdev(x) kfree(x) -#endif - -#ifdef HAVE_POLL_CONTROLLER -#define CONFIG_NET_POLL_CONTROLLER -#endif - -#ifndef SKB_DATAREF_SHIFT -/* if we do not have the infrastructure to detect if skb_header is cloned - just return false in all cases */ -#define skb_header_cloned(x) 0 -#endif - -#ifndef NETIF_F_GSO -#define gso_size tso_size -#define gso_segs tso_segs -#endif - -#ifndef NETIF_F_GRO -#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ - vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) -#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) -#endif - -#ifndef NETIF_F_SCTP_CSUM -#define NETIF_F_SCTP_CSUM 0 -#endif - -#ifndef NETIF_F_LRO -#define NETIF_F_LRO (1 << 15) -#endif - -#ifndef NETIF_F_NTUPLE -#define NETIF_F_NTUPLE (1 << 27) -#endif - -#ifndef NETIF_F_ALL_FCOE -#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ - NETIF_F_FSO) -#endif - -#ifndef IPPROTO_SCTP -#define IPPROTO_SCTP 132 -#endif - -#ifndef IPPROTO_UDPLITE -#define IPPROTO_UDPLITE 136 -#endif - -#ifndef CHECKSUM_PARTIAL -#define CHECKSUM_PARTIAL CHECKSUM_HW -#define CHECKSUM_COMPLETE CHECKSUM_HW -#endif - -#ifndef __read_mostly -#define __read_mostly -#endif - -#ifndef MII_RESV1 -#define MII_RESV1 0x17 /* Reserved... */ -#endif - -#ifndef unlikely -#define unlikely(_x) _x -#define likely(_x) _x -#endif - -#ifndef WARN_ON -#define WARN_ON(x) -#endif - -#ifndef PCI_DEVICE -#define PCI_DEVICE(vend,dev) \ - .vendor = (vend), .device = (dev), \ - .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID -#endif - -#ifndef node_online -#define node_online(node) ((node) == 0) -#endif - -#ifndef num_online_cpus -#define num_online_cpus() smp_num_cpus -#endif - -#ifndef cpu_online -#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) -#endif - -#ifndef _LINUX_RANDOM_H -#include -#endif - -#ifndef DECLARE_BITMAP -#ifndef BITS_TO_LONGS -#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) -#endif -#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] -#endif - -#ifndef VLAN_HLEN -#define VLAN_HLEN 4 -#endif - -#ifndef VLAN_ETH_HLEN -#define VLAN_ETH_HLEN 18 -#endif - -#ifndef VLAN_ETH_FRAME_LEN -#define VLAN_ETH_FRAME_LEN 1518 -#endif - -#ifndef DCA_GET_TAG_TWO_ARGS -#define dca3_get_tag(a,b) dca_get_tag(b) -#endif - -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#if defined(__i386__) || defined(__x86_64__) -#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#endif -#endif - -/* taken from 2.6.24 definition in linux/kernel.h */ -#ifndef IS_ALIGNED -#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) -#endif - -#ifdef IS_ENABLED -#undef IS_ENABLED -#undef __ARG_PLACEHOLDER_1 -#undef config_enabled -#undef _config_enabled -#undef __config_enabled -#undef ___config_enabled -#endif - -#define __ARG_PLACEHOLDER_1 0, -#define config_enabled(cfg) _config_enabled(cfg) -#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) -#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) -#define ___config_enabled(__ignored, val, ...) val - -#define IS_ENABLED(option) \ - (config_enabled(option) || config_enabled(option##_MODULE)) - -#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) -struct _kc_vlan_ethhdr { - unsigned char h_dest[ETH_ALEN]; - unsigned char h_source[ETH_ALEN]; - __be16 h_vlan_proto; - __be16 h_vlan_TCI; - __be16 h_vlan_encapsulated_proto; -}; -#define vlan_ethhdr _kc_vlan_ethhdr -struct _kc_vlan_hdr { - __be16 h_vlan_TCI; - __be16 h_vlan_encapsulated_proto; -}; -#define vlan_hdr _kc_vlan_hdr -#define vlan_tx_tag_present(_skb) 0 -#define vlan_tx_tag_get(_skb) 0 -#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ - -#ifndef VLAN_PRIO_SHIFT -#define VLAN_PRIO_SHIFT 13 -#endif - -#ifndef PCI_EXP_LNKSTA_CLS_2_5GB -#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 -#endif - -#ifndef PCI_EXP_LNKSTA_CLS_5_0GB -#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 -#endif - -#ifndef PCI_EXP_LNKSTA_CLS_8_0GB -#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 -#endif - -#ifndef PCI_EXP_LNKSTA_NLW_X1 -#define PCI_EXP_LNKSTA_NLW_X1 0x0010 -#endif - -#ifndef PCI_EXP_LNKSTA_NLW_X2 -#define PCI_EXP_LNKSTA_NLW_X2 0x0020 -#endif - -#ifndef PCI_EXP_LNKSTA_NLW_X4 -#define PCI_EXP_LNKSTA_NLW_X4 0x0040 -#endif - -#ifndef PCI_EXP_LNKSTA_NLW_X8 -#define PCI_EXP_LNKSTA_NLW_X8 0x0080 -#endif - -#ifndef __GFP_COLD -#define __GFP_COLD 0 -#endif - -#ifndef __GFP_COMP -#define __GFP_COMP 0 -#endif - -#ifndef IP_OFFSET -#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ -#endif - -/*****************************************************************************/ -/* Installations with ethtool version without eeprom, adapter id, or statistics - * support */ - -#ifndef ETH_GSTRING_LEN -#define ETH_GSTRING_LEN 32 -#endif - -#ifndef ETHTOOL_GSTATS -#define ETHTOOL_GSTATS 0x1d -#undef ethtool_drvinfo -#define ethtool_drvinfo k_ethtool_drvinfo -struct k_ethtool_drvinfo { - u32 cmd; - char driver[32]; - char version[32]; - char fw_version[32]; - char bus_info[32]; - char reserved1[32]; - char reserved2[16]; - u32 n_stats; - u32 testinfo_len; - u32 eedump_len; - u32 regdump_len; -}; - -struct ethtool_stats { - u32 cmd; - u32 n_stats; - u64 data[0]; -}; -#endif /* ETHTOOL_GSTATS */ - -#ifndef ETHTOOL_PHYS_ID -#define ETHTOOL_PHYS_ID 0x1c -#endif /* ETHTOOL_PHYS_ID */ - -#ifndef ETHTOOL_GSTRINGS -#define ETHTOOL_GSTRINGS 0x1b -enum ethtool_stringset { - ETH_SS_TEST = 0, - ETH_SS_STATS, -}; -struct ethtool_gstrings { - u32 cmd; /* ETHTOOL_GSTRINGS */ - u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ - u32 len; /* number of strings in the string set */ - u8 data[0]; -}; -#endif /* ETHTOOL_GSTRINGS */ - -#ifndef ETHTOOL_TEST -#define ETHTOOL_TEST 0x1a -enum ethtool_test_flags { - ETH_TEST_FL_OFFLINE = (1 << 0), - ETH_TEST_FL_FAILED = (1 << 1), -}; -struct ethtool_test { - u32 cmd; - u32 flags; - u32 reserved; - u32 len; - u64 data[0]; -}; -#endif /* ETHTOOL_TEST */ - -#ifndef ETHTOOL_GEEPROM -#define ETHTOOL_GEEPROM 0xb -#undef ETHTOOL_GREGS -struct ethtool_eeprom { - u32 cmd; - u32 magic; - u32 offset; - u32 len; - u8 data[0]; -}; - -struct ethtool_value { - u32 cmd; - u32 data; -}; -#endif /* ETHTOOL_GEEPROM */ - -#ifndef ETHTOOL_GLINK -#define ETHTOOL_GLINK 0xa -#endif /* ETHTOOL_GLINK */ - -#ifndef ETHTOOL_GWOL -#define ETHTOOL_GWOL 0x5 -#define ETHTOOL_SWOL 0x6 -#define SOPASS_MAX 6 -struct ethtool_wolinfo { - u32 cmd; - u32 supported; - u32 wolopts; - u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ -}; -#endif /* ETHTOOL_GWOL */ - -#ifndef ETHTOOL_GREGS -#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ -#define ethtool_regs _kc_ethtool_regs -/* for passing big chunks of data */ -struct _kc_ethtool_regs { - u32 cmd; - u32 version; /* driver-specific, indicates different chips/revs */ - u32 len; /* bytes */ - u8 data[0]; -}; -#endif /* ETHTOOL_GREGS */ - -#ifndef ETHTOOL_GMSGLVL -#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ -#endif -#ifndef ETHTOOL_SMSGLVL -#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ -#endif -#ifndef ETHTOOL_NWAY_RST -#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ -#endif -#ifndef ETHTOOL_GLINK -#define ETHTOOL_GLINK 0x0000000a /* Get link status */ -#endif -#ifndef ETHTOOL_GEEPROM -#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ -#endif -#ifndef ETHTOOL_SEEPROM -#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ -#endif -#ifndef ETHTOOL_GCOALESCE -#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ -/* for configuring coalescing parameters of chip */ -#define ethtool_coalesce _kc_ethtool_coalesce -struct _kc_ethtool_coalesce { - u32 cmd; /* ETHTOOL_{G,S}COALESCE */ - - /* How many usecs to delay an RX interrupt after - * a packet arrives. If 0, only rx_max_coalesced_frames - * is used. - */ - u32 rx_coalesce_usecs; - - /* How many packets to delay an RX interrupt after - * a packet arrives. If 0, only rx_coalesce_usecs is - * used. It is illegal to set both usecs and max frames - * to zero as this would cause RX interrupts to never be - * generated. - */ - u32 rx_max_coalesced_frames; - - /* Same as above two parameters, except that these values - * apply while an IRQ is being serviced by the host. Not - * all cards support this feature and the values are ignored - * in that case. - */ - u32 rx_coalesce_usecs_irq; - u32 rx_max_coalesced_frames_irq; - - /* How many usecs to delay a TX interrupt after - * a packet is sent. If 0, only tx_max_coalesced_frames - * is used. - */ - u32 tx_coalesce_usecs; - - /* How many packets to delay a TX interrupt after - * a packet is sent. If 0, only tx_coalesce_usecs is - * used. It is illegal to set both usecs and max frames - * to zero as this would cause TX interrupts to never be - * generated. - */ - u32 tx_max_coalesced_frames; - - /* Same as above two parameters, except that these values - * apply while an IRQ is being serviced by the host. Not - * all cards support this feature and the values are ignored - * in that case. - */ - u32 tx_coalesce_usecs_irq; - u32 tx_max_coalesced_frames_irq; - - /* How many usecs to delay in-memory statistics - * block updates. Some drivers do not have an in-memory - * statistic block, and in such cases this value is ignored. - * This value must not be zero. - */ - u32 stats_block_coalesce_usecs; - - /* Adaptive RX/TX coalescing is an algorithm implemented by - * some drivers to improve latency under low packet rates and - * improve throughput under high packet rates. Some drivers - * only implement one of RX or TX adaptive coalescing. Anything - * not implemented by the driver causes these values to be - * silently ignored. - */ - u32 use_adaptive_rx_coalesce; - u32 use_adaptive_tx_coalesce; - - /* When the packet rate (measured in packets per second) - * is below pkt_rate_low, the {rx,tx}_*_low parameters are - * used. - */ - u32 pkt_rate_low; - u32 rx_coalesce_usecs_low; - u32 rx_max_coalesced_frames_low; - u32 tx_coalesce_usecs_low; - u32 tx_max_coalesced_frames_low; - - /* When the packet rate is below pkt_rate_high but above - * pkt_rate_low (both measured in packets per second) the - * normal {rx,tx}_* coalescing parameters are used. - */ - - /* When the packet rate is (measured in packets per second) - * is above pkt_rate_high, the {rx,tx}_*_high parameters are - * used. - */ - u32 pkt_rate_high; - u32 rx_coalesce_usecs_high; - u32 rx_max_coalesced_frames_high; - u32 tx_coalesce_usecs_high; - u32 tx_max_coalesced_frames_high; - - /* How often to do adaptive coalescing packet rate sampling, - * measured in seconds. Must not be zero. - */ - u32 rate_sample_interval; -}; -#endif /* ETHTOOL_GCOALESCE */ - -#ifndef ETHTOOL_SCOALESCE -#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ -#endif -#ifndef ETHTOOL_GRINGPARAM -#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ -/* for configuring RX/TX ring parameters */ -#define ethtool_ringparam _kc_ethtool_ringparam -struct _kc_ethtool_ringparam { - u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ - - /* Read only attributes. These indicate the maximum number - * of pending RX/TX ring entries the driver will allow the - * user to set. - */ - u32 rx_max_pending; - u32 rx_mini_max_pending; - u32 rx_jumbo_max_pending; - u32 tx_max_pending; - - /* Values changeable by the user. The valid values are - * in the range 1 to the "*_max_pending" counterpart above. - */ - u32 rx_pending; - u32 rx_mini_pending; - u32 rx_jumbo_pending; - u32 tx_pending; -}; -#endif /* ETHTOOL_GRINGPARAM */ - -#ifndef ETHTOOL_SRINGPARAM -#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ -#endif -#ifndef ETHTOOL_GPAUSEPARAM -#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ -/* for configuring link flow control parameters */ -#define ethtool_pauseparam _kc_ethtool_pauseparam -struct _kc_ethtool_pauseparam { - u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ - - /* If the link is being auto-negotiated (via ethtool_cmd.autoneg - * being true) the user may set 'autoneg' here non-zero to have the - * pause parameters be auto-negotiated too. In such a case, the - * {rx,tx}_pause values below determine what capabilities are - * advertised. - * - * If 'autoneg' is zero or the link is not being auto-negotiated, - * then {rx,tx}_pause force the driver to use/not-use pause - * flow control. - */ - u32 autoneg; - u32 rx_pause; - u32 tx_pause; -}; -#endif /* ETHTOOL_GPAUSEPARAM */ - -#ifndef ETHTOOL_SPAUSEPARAM -#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ -#endif -#ifndef ETHTOOL_GRXCSUM -#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ -#endif -#ifndef ETHTOOL_SRXCSUM -#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ -#endif -#ifndef ETHTOOL_GTXCSUM -#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ -#endif -#ifndef ETHTOOL_STXCSUM -#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ -#endif -#ifndef ETHTOOL_GSG -#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable - * (ethtool_value) */ -#endif -#ifndef ETHTOOL_SSG -#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable - * (ethtool_value). */ -#endif -#ifndef ETHTOOL_TEST -#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ -#endif -#ifndef ETHTOOL_GSTRINGS -#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ -#endif -#ifndef ETHTOOL_PHYS_ID -#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ -#endif -#ifndef ETHTOOL_GSTATS -#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ -#endif -#ifndef ETHTOOL_GTSO -#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ -#endif -#ifndef ETHTOOL_STSO -#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ -#endif - -#ifndef ETHTOOL_BUSINFO_LEN -#define ETHTOOL_BUSINFO_LEN 32 -#endif - -#ifndef SPEED_2500 -#define SPEED_2500 2500 -#endif -#ifndef SPEED_5000 -#define SPEED_5000 5000 -#endif -#ifndef SPEED_25000 -#define SPEED_25000 25000 -#endif -#ifndef SPEED_50000 -#define SPEED_50000 50000 -#endif -#ifndef SPEED_100000 -#define SPEED_100000 100000 -#endif - -#ifndef RHEL_RELEASE_VERSION -#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) -#endif -#ifndef AX_RELEASE_VERSION -#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) -#endif - -#ifndef AX_RELEASE_CODE -#define AX_RELEASE_CODE 0 -#endif - -#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) -#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) -#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) -#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) -#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) -#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) -#endif - -#ifndef RHEL_RELEASE_CODE -/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ -#define RHEL_RELEASE_CODE 0 -#endif - -/* RHEL 7 didn't backport the parameter change in - * create_singlethread_workqueue. - * If/when RH corrects this we will want to tighten up the version check. - */ -#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) -#undef create_singlethread_workqueue -#define create_singlethread_workqueue(name) \ - alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) -#endif - -/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find - * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new - * enough versions of Ubuntu. Otherwise you can simply see it in the output of - * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in - * the linux-source package, but in the linux-headers package. It begins to - * appear in later releases of 14.04 and 14.10. - * - * Ex: - * - * $uname -r - * 3.13.0-45-generic - * ABI is 45 - * - * - * $uname -r - * 3.16.0-23-generic - * ABI is 23 - */ -#ifndef UTS_UBUNTU_RELEASE_ABI -#define UTS_UBUNTU_RELEASE_ABI 0 -#define UBUNTU_VERSION_CODE 0 -#else -/* Ubuntu does not provide actual release version macro, so we use the kernel - * version plus the ABI to generate a unique version code specific to Ubuntu. - * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to - * ignore differences in sublevel which are not important since we have the - * ABI value. Otherwise, it becomes impossible to correlate ABI to version for - * ordering checks. - */ -#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ - UTS_UBUNTU_RELEASE_ABI) - -#if UTS_UBUNTU_RELEASE_ABI > 255 -#error UTS_UBUNTU_RELEASE_ABI is too large... -#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ - -#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) -/* Our version code scheme does not make sense for non 3.x or newer kernels, - * and we have no support in kcompat for this scenario. Thus, treat this as a - * non-Ubuntu kernel. Possibly might be better to error here. - */ -#define UTS_UBUNTU_RELEASE_ABI 0 -#define UBUNTU_VERSION_CODE 0 -#endif - -#endif - -/* Note that the 3rd digit is always zero, and will be ignored. This is - * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux - * version codes are 3 digit, this 3rd digit is superseded by the ABI value. - */ -#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) - -/* SuSE version macros are the same as Linux kernel version macro */ -#ifndef SLE_VERSION -#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) -#endif -#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) -#ifdef CONFIG_SUSE_KERNEL -#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) -/* SLES11 GA is 2.6.27 based */ -#define SLE_VERSION_CODE SLE_VERSION(11,0,0) -#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) -/* SLES11 SP1 is 2.6.32 based */ -#define SLE_VERSION_CODE SLE_VERSION(11,1,0) -#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) -/* SLES11 SP2 GA is 3.0.13-0.27 */ -#define SLE_VERSION_CODE SLE_VERSION(11,2,0) -#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) -/* SLES11 SP3 GA is 3.0.76-0.11 */ -#define SLE_VERSION_CODE SLE_VERSION(11,3,0) -#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) - #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) - /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ - #define SLE_VERSION_CODE SLE_VERSION(11,2,0) - #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) - /* most SLES11sp3 update kernels */ - #define SLE_VERSION_CODE SLE_VERSION(11,3,0) - #else - /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ - #define SLE_VERSION_CODE SLE_VERSION(11,4,0) - #endif -#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) -/* SLES12 GA is 3.12.28-4 - * kernel updates 3.12.xx-<33 through 52>[.yy] */ -#define SLE_VERSION_CODE SLE_VERSION(12,0,0) -#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) -/* SLES12 SP1 GA is 3.12.49-11 - * updates 3.12.xx-60.yy where xx={51..} */ -#define SLE_VERSION_CODE SLE_VERSION(12,1,0) -#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,21)) -/* SLES12 SP2 GA is 4.4.21-69 */ -#define SLE_VERSION_CODE SLE_VERSION(12,2,0) -/* SLES12 SP3 Beta3 is 4.4.68-2 */ -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,68)) -#define SLE_VERSION_CODE SLE_VERSION(12,3,0) -/* new SLES kernels must be added here with >= based on kernel - * the idea is to order from newest to oldest and just catch all - * of them using the >= - */ -#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ -#endif /* CONFIG_SUSE_KERNEL */ -#ifndef SLE_VERSION_CODE -#define SLE_VERSION_CODE 0 -#endif /* SLE_VERSION_CODE */ -#ifndef SLE_LOCALVERSION_CODE -#define SLE_LOCALVERSION_CODE 0 -#endif /* SLE_LOCALVERSION_CODE */ - -#ifdef __KLOCWORK__ -/* The following are not compiled into the binary driver; they are here - * only to tune Klocwork scans to workaround false-positive issues. - */ -#ifdef ARRAY_SIZE -#undef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - -#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) - -static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags = 0; - - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old & ~mask; - _atomic_spin_unlock_irqrestore(p, flags); - - return (old & mask) != 0; -} -#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) - -static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags = 0; - - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old | mask; - _atomic_spin_unlock_irqrestore(p, flags); - - return (old & mask) != 0; -} -#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) - -#ifdef CONFIG_DYNAMIC_DEBUG -#undef dev_dbg -#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) -#endif /* CONFIG_DYNAMIC_DEBUG */ - -#endif /* __KLOCWORK__ */ - -/*****************************************************************************/ -/* 2.4.3 => 2.4.0 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) - -/**************************************/ -/* PCI DRIVER API */ - -#ifndef pci_set_dma_mask -#define pci_set_dma_mask _kc_pci_set_dma_mask -extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); -#endif - -#ifndef pci_request_regions -#define pci_request_regions _kc_pci_request_regions -extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); -#endif - -#ifndef pci_release_regions -#define pci_release_regions _kc_pci_release_regions -extern void _kc_pci_release_regions(struct pci_dev *pdev); -#endif - -/**************************************/ -/* NETWORK DRIVER API */ - -#ifndef alloc_etherdev -#define alloc_etherdev _kc_alloc_etherdev -extern struct net_device * _kc_alloc_etherdev(int sizeof_priv); -#endif - -#ifndef is_valid_ether_addr -#define is_valid_ether_addr _kc_is_valid_ether_addr -extern int _kc_is_valid_ether_addr(u8 *addr); -#endif - -/**************************************/ -/* MISCELLANEOUS */ - -#ifndef INIT_TQUEUE -#define INIT_TQUEUE(_tq, _routine, _data) \ - do { \ - INIT_LIST_HEAD(&(_tq)->list); \ - (_tq)->sync = 0; \ - (_tq)->routine = _routine; \ - (_tq)->data = _data; \ - } while (0) -#endif - -#endif /* 2.4.3 => 2.4.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) -/* Generic MII registers. */ -#define MII_BMCR 0x00 /* Basic mode control register */ -#define MII_BMSR 0x01 /* Basic mode status register */ -#define MII_PHYSID1 0x02 /* PHYS ID 1 */ -#define MII_PHYSID2 0x03 /* PHYS ID 2 */ -#define MII_ADVERTISE 0x04 /* Advertisement control reg */ -#define MII_LPA 0x05 /* Link partner ability reg */ -#define MII_EXPANSION 0x06 /* Expansion register */ -/* Basic mode control register. */ -#define BMCR_FULLDPLX 0x0100 /* Full duplex */ -#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ -/* Basic mode status register. */ -#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ -#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ -#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ -#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ -#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ -#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ -/* Advertisement control register. */ -#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ -#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ -#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ -#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ -#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ -#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ - ADVERTISE_100HALF | ADVERTISE_100FULL) -/* Expansion register for auto-negotiation. */ -#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ -#endif - -/*****************************************************************************/ -/* 2.4.6 => 2.4.3 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) - -#ifndef pci_set_power_state -#define pci_set_power_state _kc_pci_set_power_state -extern int _kc_pci_set_power_state(struct pci_dev *dev, int state); -#endif - -#ifndef pci_enable_wake -#define pci_enable_wake _kc_pci_enable_wake -extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); -#endif - -#ifndef pci_disable_device -#define pci_disable_device _kc_pci_disable_device -extern void _kc_pci_disable_device(struct pci_dev *pdev); -#endif - -/* PCI PM entry point syntax changed, so don't support suspend/resume */ -#undef CONFIG_PM - -#endif /* 2.4.6 => 2.4.3 */ - -#ifndef HAVE_PCI_SET_MWI -#define pci_set_mwi(X) pci_write_config_word(X, \ - PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ - PCI_COMMAND_INVALIDATE); -#define pci_clear_mwi(X) pci_write_config_word(X, \ - PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ - ~PCI_COMMAND_INVALIDATE); -#endif - -/*****************************************************************************/ -/* 2.4.10 => 2.4.9 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) - -/**************************************/ -/* MODULE API */ - -#ifndef MODULE_LICENSE - #define MODULE_LICENSE(X) -#endif - -/**************************************/ -/* OTHER */ - -#undef min -#define min(x,y) ({ \ - const typeof(x) _x = (x); \ - const typeof(y) _y = (y); \ - (void) (&_x == &_y); \ - _x < _y ? _x : _y; }) - -#undef max -#define max(x,y) ({ \ - const typeof(x) _x = (x); \ - const typeof(y) _y = (y); \ - (void) (&_x == &_y); \ - _x > _y ? _x : _y; }) - -#define min_t(type,x,y) ({ \ - type _x = (x); \ - type _y = (y); \ - _x < _y ? _x : _y; }) - -#define max_t(type,x,y) ({ \ - type _x = (x); \ - type _y = (y); \ - _x > _y ? _x : _y; }) - -#ifndef list_for_each_safe -#define list_for_each_safe(pos, n, head) \ - for (pos = (head)->next, n = pos->next; pos != (head); \ - pos = n, n = pos->next) -#endif - -#ifndef ____cacheline_aligned_in_smp -#ifdef CONFIG_SMP -#define ____cacheline_aligned_in_smp ____cacheline_aligned -#else -#define ____cacheline_aligned_in_smp -#endif /* CONFIG_SMP */ -#endif - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) -extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); -#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) -extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); -#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) -#else /* 2.4.8 => 2.4.9 */ -extern int snprintf(char * buf, size_t size, const char *fmt, ...); -extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); -#endif -#endif /* 2.4.10 -> 2.4.6 */ - - -/*****************************************************************************/ -/* 2.4.12 => 2.4.10 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) -#ifndef HAVE_NETIF_MSG -#define HAVE_NETIF_MSG 1 -enum { - NETIF_MSG_DRV = 0x0001, - NETIF_MSG_PROBE = 0x0002, - NETIF_MSG_LINK = 0x0004, - NETIF_MSG_TIMER = 0x0008, - NETIF_MSG_IFDOWN = 0x0010, - NETIF_MSG_IFUP = 0x0020, - NETIF_MSG_RX_ERR = 0x0040, - NETIF_MSG_TX_ERR = 0x0080, - NETIF_MSG_TX_QUEUED = 0x0100, - NETIF_MSG_INTR = 0x0200, - NETIF_MSG_TX_DONE = 0x0400, - NETIF_MSG_RX_STATUS = 0x0800, - NETIF_MSG_PKTDATA = 0x1000, - NETIF_MSG_HW = 0x2000, - NETIF_MSG_WOL = 0x4000, -}; - -#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) -#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) -#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) -#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) -#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) -#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) -#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) -#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) -#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) -#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) -#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) -#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) -#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) -#endif /* !HAVE_NETIF_MSG */ -#endif /* 2.4.12 => 2.4.10 */ - -/*****************************************************************************/ -/* 2.4.13 => 2.4.12 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) - -/**************************************/ -/* PCI DMA MAPPING */ - -#ifndef virt_to_page - #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) -#endif - -#ifndef pci_map_page -#define pci_map_page _kc_pci_map_page -extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); -#endif - -#ifndef pci_unmap_page -#define pci_unmap_page _kc_pci_unmap_page -extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); -#endif - -/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ - -#undef DMA_32BIT_MASK -#define DMA_32BIT_MASK 0xffffffff -#undef DMA_64BIT_MASK -#define DMA_64BIT_MASK 0xffffffff - -/**************************************/ -/* OTHER */ - -#ifndef cpu_relax -#define cpu_relax() rep_nop() -#endif - -struct vlan_ethhdr { - unsigned char h_dest[ETH_ALEN]; - unsigned char h_source[ETH_ALEN]; - unsigned short h_vlan_proto; - unsigned short h_vlan_TCI; - unsigned short h_vlan_encapsulated_proto; -}; -#endif /* 2.4.13 => 2.4.12 */ - -/*****************************************************************************/ -/* 2.4.17 => 2.4.12 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) - -#ifndef __devexit_p - #define __devexit_p(x) &(x) -#endif - -#endif /* 2.4.17 => 2.4.13 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) -#define NETIF_MSG_HW 0x2000 -#define NETIF_MSG_WOL 0x4000 - -#ifndef netif_msg_hw -#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) -#endif -#ifndef netif_msg_wol -#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) -#endif -#endif /* 2.4.18 */ - -/*****************************************************************************/ - -/*****************************************************************************/ -/* 2.4.20 => 2.4.19 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) - -/* we won't support NAPI on less than 2.4.20 */ -#ifdef NAPI -#undef NAPI -#endif - -#endif /* 2.4.20 => 2.4.19 */ - -/*****************************************************************************/ -/* 2.4.22 => 2.4.17 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) -#define pci_name(x) ((x)->slot_name) - -#ifndef SUPPORTED_10000baseT_Full -#define SUPPORTED_10000baseT_Full (1 << 12) -#endif -#ifndef ADVERTISED_10000baseT_Full -#define ADVERTISED_10000baseT_Full (1 << 12) -#endif -#endif - -/*****************************************************************************/ -/* 2.4.22 => 2.4.17 */ - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) -#endif - -/*****************************************************************************/ -/*****************************************************************************/ -/* 2.4.23 => 2.4.22 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) -/*****************************************************************************/ -#ifdef NAPI -#ifndef netif_poll_disable -#define netif_poll_disable(x) _kc_netif_poll_disable(x) -static inline void _kc_netif_poll_disable(struct net_device *netdev) -{ - while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { - /* No hurry */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); - } -} -#endif -#ifndef netif_poll_enable -#define netif_poll_enable(x) _kc_netif_poll_enable(x) -static inline void _kc_netif_poll_enable(struct net_device *netdev) -{ - clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); -} -#endif -#endif /* NAPI */ -#ifndef netif_tx_disable -#define netif_tx_disable(x) _kc_netif_tx_disable(x) -static inline void _kc_netif_tx_disable(struct net_device *dev) -{ - spin_lock_bh(&dev->xmit_lock); - netif_stop_queue(dev); - spin_unlock_bh(&dev->xmit_lock); -} -#endif -#else /* 2.4.23 => 2.4.22 */ -#define HAVE_SCTP -#endif /* 2.4.23 => 2.4.22 */ - -/*****************************************************************************/ -/* 2.6.4 => 2.6.0 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ - ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ - LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) -#define ETHTOOL_OPS_COMPAT -#endif /* 2.6.4 => 2.6.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) -#define __user -#endif /* < 2.4.27 */ - -/*****************************************************************************/ -/* 2.5.71 => 2.4.x */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) -#define sk_protocol protocol -#define pci_get_device pci_find_device -#endif /* 2.5.70 => 2.4.x */ - -/*****************************************************************************/ -/* < 2.4.27 or 2.6.0 <= 2.6.5 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ - ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ - LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) - -#ifndef netif_msg_init -#define netif_msg_init _kc_netif_msg_init -static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) -{ - /* use default */ - if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) - return default_msg_enable_bits; - if (debug_value == 0) /* no output */ - return 0; - /* set low N bits */ - return (1 << debug_value) -1; -} -#endif - -#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ -/*****************************************************************************/ -#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ - (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ - ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) -#define netdev_priv(x) x->priv -#endif - -/*****************************************************************************/ -/* <= 2.5.0 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) -#include -#undef pci_register_driver -#define pci_register_driver pci_module_init - -/* - * Most of the dma compat code is copied/modifed from the 2.4.37 - * /include/linux/libata-compat.h header file - */ -/* These definitions mirror those in pci.h, so they can be used - * interchangeably with their PCI_ counterparts */ -enum dma_data_direction { - DMA_BIDIRECTIONAL = 0, - DMA_TO_DEVICE = 1, - DMA_FROM_DEVICE = 2, - DMA_NONE = 3, -}; - -struct device { - struct pci_dev pdev; -}; - -static inline struct pci_dev *to_pci_dev (struct device *dev) -{ - return (struct pci_dev *) dev; -} -static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) -{ - return (struct device *) pdev; -} -#define pdev_printk(lvl, pdev, fmt, args...) \ - printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) -#define dev_err(dev, fmt, args...) \ - pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) -#define dev_info(dev, fmt, args...) \ - pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) -#define dev_warn(dev, fmt, args...) \ - pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) -#define dev_notice(dev, fmt, args...) \ - pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) -#define dev_dbg(dev, fmt, args...) \ - pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) - -/* NOTE: dangerous! we ignore the 'gfp' argument */ -#define dma_alloc_coherent(dev,sz,dma,gfp) \ - pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) -#define dma_free_coherent(dev,sz,addr,dma_addr) \ - pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) - -#define dma_map_page(dev,a,b,c,d) \ - pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) -#define dma_unmap_page(dev,a,b,c) \ - pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) - -#define dma_map_single(dev,a,b,c) \ - pci_map_single(to_pci_dev(dev),(a),(b),(c)) -#define dma_unmap_single(dev,a,b,c) \ - pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) - -#define dma_map_sg(dev, sg, nents, dir) \ - pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) -#define dma_unmap_sg(dev, sg, nents, dir) \ - pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) - -#define dma_sync_single(dev,a,b,c) \ - pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) - -/* for range just sync everything, that's all the pci API can do */ -#define dma_sync_single_range(dev,addr,off,sz,dir) \ - pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) - -#define dma_set_mask(dev,mask) \ - pci_set_dma_mask(to_pci_dev(dev),(mask)) - -/* hlist_* code - double linked lists */ -struct hlist_head { - struct hlist_node *first; -}; - -struct hlist_node { - struct hlist_node *next, **pprev; -}; - -static inline void __hlist_del(struct hlist_node *n) -{ - struct hlist_node *next = n->next; - struct hlist_node **pprev = n->pprev; - *pprev = next; - if (next) - next->pprev = pprev; -} - -static inline void hlist_del(struct hlist_node *n) -{ - __hlist_del(n); - n->next = NULL; - n->pprev = NULL; -} - -static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) -{ - struct hlist_node *first = h->first; - n->next = first; - if (first) - first->pprev = &n->next; - h->first = n; - n->pprev = &h->first; -} - -static inline int hlist_empty(const struct hlist_head *h) -{ - return !h->first; -} -#define HLIST_HEAD_INIT { .first = NULL } -#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } -#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) -static inline void INIT_HLIST_NODE(struct hlist_node *h) -{ - h->next = NULL; - h->pprev = NULL; -} - -#ifndef might_sleep -#define might_sleep() -#endif -#else -static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) -{ - return &pdev->dev; -} -#endif /* <= 2.5.0 */ - -/*****************************************************************************/ -/* 2.5.28 => 2.4.23 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) - -#include -#define work_struct tq_struct -#undef INIT_WORK -#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) -#undef container_of -#define container_of list_entry -#define schedule_work schedule_task -#define flush_scheduled_work flush_scheduled_tasks -#define cancel_work_sync(x) flush_scheduled_work() - -#endif /* 2.5.28 => 2.4.17 */ - -/*****************************************************************************/ -/* 2.6.0 => 2.5.28 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -#ifndef read_barrier_depends -#define read_barrier_depends() rmb() -#endif - -#ifndef rcu_head -struct __kc_callback_head { - struct __kc_callback_head *next; - void (*func)(struct callback_head *head); -}; -#define rcu_head __kc_callback_head -#endif - -#undef get_cpu -#define get_cpu() smp_processor_id() -#undef put_cpu -#define put_cpu() do { } while(0) -#define MODULE_INFO(version, _version) -#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT -#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 -#endif -#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT -#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 -#endif -#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT -#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 -#endif - -#define dma_set_coherent_mask(dev,mask) 1 - -#undef dev_put -#define dev_put(dev) __dev_put(dev) - -#ifndef skb_fill_page_desc -#define skb_fill_page_desc _kc_skb_fill_page_desc -extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); -#endif - -#undef ALIGN -#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) - -#ifndef page_count -#define page_count(p) atomic_read(&(p)->count) -#endif - -#ifdef MAX_NUMNODES -#undef MAX_NUMNODES -#endif -#define MAX_NUMNODES 1 - -/* find_first_bit and find_next bit are not defined for most - * 2.4 kernels (except for the redhat 2.4.21 kernels - */ -#include -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) -#undef find_next_bit -#define find_next_bit _kc_find_next_bit -extern unsigned long _kc_find_next_bit(const unsigned long *addr, - unsigned long size, - unsigned long offset); -#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) - -#ifndef netdev_name -static inline const char *_kc_netdev_name(const struct net_device *dev) -{ - if (strchr(dev->name, '%')) - return "(unregistered net_device)"; - return dev->name; -} -#define netdev_name(netdev) _kc_netdev_name(netdev) -#endif /* netdev_name */ - -#ifndef strlcpy -#define strlcpy _kc_strlcpy -extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); -#endif /* strlcpy */ - -#ifndef do_div -#if BITS_PER_LONG == 64 -# define do_div(n,base) ({ \ - uint32_t __base = (base); \ - uint32_t __rem; \ - __rem = ((uint64_t)(n)) % __base; \ - (n) = ((uint64_t)(n)) / __base; \ - __rem; \ - }) -#elif BITS_PER_LONG == 32 -extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); -# define do_div(n,base) ({ \ - uint32_t __base = (base); \ - uint32_t __rem; \ - if (likely(((n) >> 32) == 0)) { \ - __rem = (uint32_t)(n) % __base; \ - (n) = (uint32_t)(n) / __base; \ - } else \ - __rem = _kc__div64_32(&(n), __base); \ - __rem; \ - }) -#else /* BITS_PER_LONG == ?? */ -# error do_div() does not yet support the C64 -#endif /* BITS_PER_LONG */ -#endif /* do_div */ - -#ifndef NSEC_PER_SEC -#define NSEC_PER_SEC 1000000000L -#endif - -#undef HAVE_I2C_SUPPORT -#else /* 2.6.0 */ - -#endif /* 2.6.0 => 2.5.28 */ -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) -#define dma_pool pci_pool -#define dma_pool_destroy pci_pool_destroy -#define dma_pool_alloc pci_pool_alloc -#define dma_pool_free pci_pool_free - -#define dma_pool_create(name,dev,size,align,allocation) \ - pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) -#endif /* < 2.6.3 */ - -/*****************************************************************************/ -/* 2.6.4 => 2.6.0 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) -#define MODULE_VERSION(_version) MODULE_INFO(version, _version) -#endif /* 2.6.4 => 2.6.0 */ - -/*****************************************************************************/ -/* 2.6.5 => 2.6.0 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) -#define dma_sync_single_for_cpu dma_sync_single -#define dma_sync_single_for_device dma_sync_single -#define dma_sync_single_range_for_cpu dma_sync_single_range -#define dma_sync_single_range_for_device dma_sync_single_range -#ifndef pci_dma_mapping_error -#define pci_dma_mapping_error _kc_pci_dma_mapping_error -static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) -{ - return dma_addr == 0; -} -#endif -#endif /* 2.6.5 => 2.6.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) -extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); -#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) -#endif /* < 2.6.4 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) -/* taken from 2.6 include/linux/bitmap.h */ -#undef bitmap_zero -#define bitmap_zero _kc_bitmap_zero -static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) -{ - if (nbits <= BITS_PER_LONG) - *dst = 0UL; - else { - int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0, len); - } -} -#define page_to_nid(x) 0 - -#endif /* < 2.6.6 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) -#undef if_mii -#define if_mii _kc_if_mii -static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) -{ - return (struct mii_ioctl_data *) &rq->ifr_ifru; -} - -#ifndef __force -#define __force -#endif -#endif /* < 2.6.7 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) -#ifndef PCI_EXP_DEVCTL -#define PCI_EXP_DEVCTL 8 -#endif -#ifndef PCI_EXP_DEVCTL_CERE -#define PCI_EXP_DEVCTL_CERE 0x0001 -#endif -#define PCI_EXP_FLAGS 2 /* Capabilities register */ -#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ -#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ -#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ -#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ -#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ -#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ -#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ -#define PCI_EXP_DEVCAP 4 /* Device capabilities */ -#define PCI_EXP_DEVSTA 10 /* Device Status */ -#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ - schedule_timeout((x * HZ)/1000 + 2); \ - } while (0) - -#endif /* < 2.6.8 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) -#include -#define __iomem - -#ifndef kcalloc -#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) -extern void *_kc_kzalloc(size_t size, int flags); -#endif -#define MSEC_PER_SEC 1000L -static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) -{ -#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) - return (MSEC_PER_SEC / HZ) * j; -#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) - return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); -#else - return (j * MSEC_PER_SEC) / HZ; -#endif -} -static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) -{ - if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) - return MAX_JIFFY_OFFSET; -#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) - return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); -#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) - return m * (HZ / MSEC_PER_SEC); -#else - return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; -#endif -} - -#define msleep_interruptible _kc_msleep_interruptible -static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) -{ - unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; - - while (timeout && !signal_pending(current)) { - __set_current_state(TASK_INTERRUPTIBLE); - timeout = schedule_timeout(timeout); - } - return _kc_jiffies_to_msecs(timeout); -} - -/* Basic mode control register. */ -#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ - -#ifndef __le16 -#define __le16 u16 -#endif -#ifndef __le32 -#define __le32 u32 -#endif -#ifndef __le64 -#define __le64 u64 -#endif -#ifndef __be16 -#define __be16 u16 -#endif -#ifndef __be32 -#define __be32 u32 -#endif -#ifndef __be64 -#define __be64 u64 -#endif - -static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) -{ - return (struct vlan_ethhdr *)skb->mac.raw; -} - -/* Wake-On-Lan options. */ -#define WAKE_PHY (1 << 0) -#define WAKE_UCAST (1 << 1) -#define WAKE_MCAST (1 << 2) -#define WAKE_BCAST (1 << 3) -#define WAKE_ARP (1 << 4) -#define WAKE_MAGIC (1 << 5) -#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ - -#define skb_header_pointer _kc_skb_header_pointer -static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, - int offset, int len, void *buffer) -{ - int hlen = skb_headlen(skb); - - if (hlen - offset >= len) - return skb->data + offset; - -#ifdef MAX_SKB_FRAGS - if (skb_copy_bits(skb, offset, buffer, len) < 0) - return NULL; - - return buffer; -#else - return NULL; -#endif - -#ifndef NETDEV_TX_OK -#define NETDEV_TX_OK 0 -#endif -#ifndef NETDEV_TX_BUSY -#define NETDEV_TX_BUSY 1 -#endif -#ifndef NETDEV_TX_LOCKED -#define NETDEV_TX_LOCKED -1 -#endif -} - -#ifndef __bitwise -#define __bitwise -#endif -#endif /* < 2.6.9 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) -#ifdef module_param_array_named -#undef module_param_array_named -#define module_param_array_named(name, array, type, nump, perm) \ - static struct kparam_array __param_arr_##name \ - = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ - sizeof(array[0]), array }; \ - module_param_call(name, param_array_set, param_array_get, \ - &__param_arr_##name, perm) -#endif /* module_param_array_named */ -/* - * num_online is broken for all < 2.6.10 kernels. This is needed to support - * Node module parameter of ixgbe. - */ -#undef num_online_nodes -#define num_online_nodes(n) 1 -extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); -#undef node_online_map -#define node_online_map _kcompat_node_online_map -#define pci_get_class pci_find_class -#endif /* < 2.6.10 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) -#define PCI_D0 0 -#define PCI_D1 1 -#define PCI_D2 2 -#define PCI_D3hot 3 -#define PCI_D3cold 4 -typedef int pci_power_t; -#define pci_choose_state(pdev,state) state -#define PMSG_SUSPEND 3 -#define PCI_EXP_LNKCTL 16 - -#undef NETIF_F_LLTX - -#ifndef ARCH_HAS_PREFETCH -#define prefetch(X) -#endif - -#ifndef NET_IP_ALIGN -#define NET_IP_ALIGN 2 -#endif - -#define KC_USEC_PER_SEC 1000000L -#define usecs_to_jiffies _kc_usecs_to_jiffies -static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) -{ -#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) - return (KC_USEC_PER_SEC / HZ) * j; -#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) - return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); -#else - return (j * KC_USEC_PER_SEC) / HZ; -#endif -} -static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) -{ - if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) - return MAX_JIFFY_OFFSET; -#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) - return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); -#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) - return m * (HZ / KC_USEC_PER_SEC); -#else - return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; -#endif -} - -#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ -#define PCI_EXP_LNKSTA 18 /* Link Status */ -#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ -#define PCI_EXP_SLTCTL 24 /* Slot Control */ -#define PCI_EXP_SLTSTA 26 /* Slot Status */ -#define PCI_EXP_RTCTL 28 /* Root Control */ -#define PCI_EXP_RTCAP 30 /* Root Capabilities */ -#define PCI_EXP_RTSTA 32 /* Root Status */ -#endif /* < 2.6.11 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) -#include -#define USE_REBOOT_NOTIFIER - -/* Generic MII registers. */ -#define MII_CTRL1000 0x09 /* 1000BASE-T control */ -#define MII_STAT1000 0x0a /* 1000BASE-T status */ -/* Advertisement control register. */ -#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ -#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ -/* Link partner ability register. */ -#define LPA_PAUSE_CAP 0x0400 /* Can pause */ -#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ -/* 1000BASE-T Control register */ -#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ -#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ -/* 1000BASE-T Status register */ -#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ -#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ - -#ifndef is_zero_ether_addr -#define is_zero_ether_addr _kc_is_zero_ether_addr -static inline int _kc_is_zero_ether_addr(const u8 *addr) -{ - return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); -} -#endif /* is_zero_ether_addr */ -#ifndef is_multicast_ether_addr -#define is_multicast_ether_addr _kc_is_multicast_ether_addr -static inline int _kc_is_multicast_ether_addr(const u8 *addr) -{ - return addr[0] & 0x01; -} -#endif /* is_multicast_ether_addr */ -#endif /* < 2.6.12 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) -#ifndef kstrdup -#define kstrdup _kc_kstrdup -extern char *_kc_kstrdup(const char *s, unsigned int gfp); -#endif -#endif /* < 2.6.13 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) -#define pm_message_t u32 -#ifndef kzalloc -#define kzalloc _kc_kzalloc -extern void *_kc_kzalloc(size_t size, int flags); -#endif - -/* Generic MII registers. */ -#define MII_ESTATUS 0x0f /* Extended Status */ -/* Basic mode status register. */ -#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ -/* Extended status register. */ -#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ -#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ - -#define SUPPORTED_Pause (1 << 13) -#define SUPPORTED_Asym_Pause (1 << 14) -#define ADVERTISED_Pause (1 << 13) -#define ADVERTISED_Asym_Pause (1 << 14) - -#if (!(RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) -#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) -#define gfp_t unsigned -#else -typedef unsigned gfp_t; -#endif -#endif /* !RHEL4.3->RHEL5.0 */ - -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) -#ifdef CONFIG_X86_64 -#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ - dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) -#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ - dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) -#endif -#endif -#endif /* < 2.6.14 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) -#ifndef kfree_rcu -/* this is placed here due to a lack of rcu_barrier in previous kernels */ -#define kfree_rcu(_ptr, _offset) kfree(_ptr) -#endif /* kfree_rcu */ -#ifndef vmalloc_node -#define vmalloc_node(a,b) vmalloc(a) -#endif /* vmalloc_node*/ - -#define setup_timer(_timer, _function, _data) \ -do { \ - (_timer)->function = _function; \ - (_timer)->data = _data; \ - init_timer(_timer); \ -} while (0) -#ifndef device_can_wakeup -#define device_can_wakeup(dev) (1) -#endif -#ifndef device_set_wakeup_enable -#define device_set_wakeup_enable(dev, val) do{}while(0) -#endif -#ifndef device_init_wakeup -#define device_init_wakeup(dev,val) do {} while (0) -#endif -static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) -{ - const u16 *a = (const u16 *) addr1; - const u16 *b = (const u16 *) addr2; - - return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; -} -#undef compare_ether_addr -#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) -#endif /* < 2.6.15 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) -#undef DEFINE_MUTEX -#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) -#define mutex_lock(x) down_interruptible(x) -#define mutex_unlock(x) up(x) - -#ifndef ____cacheline_internodealigned_in_smp -#ifdef CONFIG_SMP -#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp -#else -#define ____cacheline_internodealigned_in_smp -#endif /* CONFIG_SMP */ -#endif /* ____cacheline_internodealigned_in_smp */ -#undef HAVE_PCI_ERS -#else /* 2.6.16 and above */ -#undef HAVE_PCI_ERS -#define HAVE_PCI_ERS -#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) -#ifdef device_can_wakeup -#undef device_can_wakeup -#endif /* device_can_wakeup */ -#define device_can_wakeup(dev) 1 -#endif /* SLE_VERSION(10,4,0) */ -#endif /* < 2.6.16 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) -#ifndef dev_notice -#define dev_notice(dev, fmt, args...) \ - dev_printk(KERN_NOTICE, dev, fmt, ## args) -#endif - -#ifndef first_online_node -#define first_online_node 0 -#endif -#ifndef NET_SKB_PAD -#define NET_SKB_PAD 16 -#endif -#endif /* < 2.6.17 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) - -#ifndef IRQ_HANDLED -#define irqreturn_t void -#define IRQ_HANDLED -#define IRQ_NONE -#endif - -#ifndef IRQF_PROBE_SHARED -#ifdef SA_PROBEIRQ -#define IRQF_PROBE_SHARED SA_PROBEIRQ -#else -#define IRQF_PROBE_SHARED 0 -#endif -#endif - -#ifndef IRQF_SHARED -#define IRQF_SHARED SA_SHIRQ -#endif - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - -#ifndef FIELD_SIZEOF -#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) -#endif - -#ifndef skb_is_gso -#ifdef NETIF_F_TSO -#define skb_is_gso _kc_skb_is_gso -static inline int _kc_skb_is_gso(const struct sk_buff *skb) -{ - return skb_shinfo(skb)->gso_size; -} -#else -#define skb_is_gso(a) 0 -#endif -#endif - -#ifndef resource_size_t -#define resource_size_t unsigned long -#endif - -#ifdef skb_pad -#undef skb_pad -#endif -#define skb_pad(x,y) _kc_skb_pad(x, y) -int _kc_skb_pad(struct sk_buff *skb, int pad); -#ifdef skb_padto -#undef skb_padto -#endif -#define skb_padto(x,y) _kc_skb_padto(x, y) -static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) -{ - unsigned int size = skb->len; - if(likely(size >= len)) - return 0; - return _kc_skb_pad(skb, len - size); -} - -#ifndef DECLARE_PCI_UNMAP_ADDR -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ - dma_addr_t ADDR_NAME -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ - u32 LEN_NAME -#define pci_unmap_addr(PTR, ADDR_NAME) \ - ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ - (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) \ - ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ - (((PTR)->LEN_NAME) = (VAL)) -#endif /* DECLARE_PCI_UNMAP_ADDR */ -#endif /* < 2.6.18 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) -enum pcie_link_width { - PCIE_LNK_WIDTH_RESRV = 0x00, - PCIE_LNK_X1 = 0x01, - PCIE_LNK_X2 = 0x02, - PCIE_LNK_X4 = 0x04, - PCIE_LNK_X8 = 0x08, - PCIE_LNK_X12 = 0x0C, - PCIE_LNK_X16 = 0x10, - PCIE_LNK_X32 = 0x20, - PCIE_LNK_WIDTH_UNKNOWN = 0xFF, -}; - -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) -#define i_private u.generic_ip -#endif /* >= RHEL 5.0 */ - -#ifndef DIV_ROUND_UP -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) -#endif -#ifndef __ALIGN_MASK -#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) -#endif -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) -#if (!((RHEL_RELEASE_CODE && \ - ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ - RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ - (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) -typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); -#endif -#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) -#undef CONFIG_INET_LRO -#undef CONFIG_INET_LRO_MODULE -#undef CONFIG_FCOE -#undef CONFIG_FCOE_MODULE -#endif -typedef irqreturn_t (*new_handler_t)(int, void*); -static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) -#else /* 2.4.x */ -typedef void (*irq_handler_t)(int, void*, struct pt_regs *); -typedef void (*new_handler_t)(int, void*); -static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) -#endif /* >= 2.5.x */ -{ - irq_handler_t new_handler = (irq_handler_t) handler; - return request_irq(irq, new_handler, flags, devname, dev_id); -} - -#undef request_irq -#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) - -#define irq_handler_t new_handler_t -/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) -#define PCIE_CONFIG_SPACE_LEN 256 -#define PCI_CONFIG_SPACE_LEN 64 -#define PCIE_LINK_STATUS 0x12 -#define pci_config_space_ich8lan() do {} while(0) -#undef pci_save_state -extern int _kc_pci_save_state(struct pci_dev *); -#define pci_save_state(pdev) _kc_pci_save_state(pdev) -#undef pci_restore_state -extern void _kc_pci_restore_state(struct pci_dev *); -#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) -#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ - -#ifdef HAVE_PCI_ERS -#undef free_netdev -extern void _kc_free_netdev(struct net_device *); -#define free_netdev(netdev) _kc_free_netdev(netdev) -#endif -static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) -{ - return 0; -} -#define pci_disable_pcie_error_reporting(dev) do {} while (0) -#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) - -extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); -#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) -#ifndef bool -#define bool _Bool -#define true 1 -#define false 0 -#endif -#else /* 2.6.19 */ -#include -#include - -#define NEW_SKB_CSUM_HELP -#endif /* < 2.6.19 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) -#undef INIT_WORK -#define INIT_WORK(_work, _func) \ -do { \ - INIT_LIST_HEAD(&(_work)->entry); \ - (_work)->pending = 0; \ - (_work)->func = (void (*)(void *))_func; \ - (_work)->data = _work; \ - init_timer(&(_work)->timer); \ -} while (0) -#endif - -#ifndef PCI_VDEVICE -#define PCI_VDEVICE(ven, dev) \ - PCI_VENDOR_ID_##ven, (dev), \ - PCI_ANY_ID, PCI_ANY_ID, 0, 0 -#endif - -#ifndef PCI_VENDOR_ID_INTEL -#define PCI_VENDOR_ID_INTEL 0x8086 -#endif - -#ifndef round_jiffies -#define round_jiffies(x) x -#endif - -#define csum_offset csum - -#define HAVE_EARLY_VMALLOC_NODE -#define dev_to_node(dev) -1 -#undef set_dev_node -/* remove compiler warning with b=b, for unused variable */ -#define set_dev_node(a, b) do { (b) = (b); } while(0) - -#if (!(RHEL_RELEASE_CODE && \ - (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ - !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) -typedef __u16 __bitwise __sum16; -typedef __u32 __bitwise __wsum; -#endif - -#if (!(RHEL_RELEASE_CODE && \ - (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ - !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) -static inline __wsum csum_unfold(__sum16 n) -{ - return (__force __wsum)n; -} -#endif - -#else /* < 2.6.20 */ -#define HAVE_DEVICE_NUMA_NODE -#endif /* < 2.6.20 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) -#define to_net_dev(class) container_of(class, struct net_device, class_dev) -#define NETDEV_CLASS_DEV -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) -#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) -#define vlan_group_set_device(vg, id, dev) \ - do { \ - if (vg) vg->vlan_devices[id] = dev; \ - } while (0) -#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ -#define pci_channel_offline(pdev) (pdev->error_state && \ - pdev->error_state != pci_channel_io_normal) -#define pci_request_selected_regions(pdev, bars, name) \ - pci_request_regions(pdev, name) -#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); - -#ifndef __aligned -#define __aligned(x) __attribute__((aligned(x))) -#endif - -extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); -#define netdev_to_dev(netdev) \ - pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) -#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) -#define devm_kfree(dev, p) kfree(p) -#else /* 2.6.21 */ -static inline struct device *netdev_to_dev(struct net_device *netdev) -{ - return &netdev->dev; -} - -#endif /* < 2.6.21 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) -#define tcp_hdr(skb) (skb->h.th) -#define tcp_hdrlen(skb) (skb->h.th->doff << 2) -#define skb_transport_offset(skb) (skb->h.raw - skb->data) -#define skb_transport_header(skb) (skb->h.raw) -#define ipv6_hdr(skb) (skb->nh.ipv6h) -#define ip_hdr(skb) (skb->nh.iph) -#define skb_network_offset(skb) (skb->nh.raw - skb->data) -#define skb_network_header(skb) (skb->nh.raw) -#define skb_tail_pointer(skb) skb->tail -#define skb_reset_tail_pointer(skb) \ - do { \ - skb->tail = skb->data; \ - } while (0) -#define skb_set_tail_pointer(skb, offset) \ - do { \ - skb->tail = skb->data + offset; \ - } while (0) -#define skb_copy_to_linear_data(skb, from, len) \ - memcpy(skb->data, from, len) -#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ - memcpy(skb->data + offset, from, len) -#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) -#define pci_register_driver pci_module_init -#define skb_mac_header(skb) skb->mac.raw - -#ifdef NETIF_F_MULTI_QUEUE -#ifndef alloc_etherdev_mq -#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) -#endif -#endif /* NETIF_F_MULTI_QUEUE */ - -#ifndef ETH_FCS_LEN -#define ETH_FCS_LEN 4 -#endif -#define cancel_work_sync(x) flush_scheduled_work() -#ifndef udp_hdr -#define udp_hdr _udp_hdr -static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) -{ - return (struct udphdr *)skb_transport_header(skb); -} -#endif - -#ifdef cpu_to_be16 -#undef cpu_to_be16 -#endif -#define cpu_to_be16(x) __constant_htons(x) - -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) -enum { - DUMP_PREFIX_NONE, - DUMP_PREFIX_ADDRESS, - DUMP_PREFIX_OFFSET -}; -#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ -#ifndef hex_asc -#define hex_asc(x) "0123456789abcdef"[x] -#endif -#include -extern void _kc_print_hex_dump(const char *level, const char *prefix_str, - int prefix_type, int rowsize, int groupsize, - const void *buf, size_t len, bool ascii); -#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ - _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) -#ifndef ADVERTISED_2500baseX_Full -#define ADVERTISED_2500baseX_Full (1 << 15) -#endif -#ifndef SUPPORTED_2500baseX_Full -#define SUPPORTED_2500baseX_Full (1 << 15) -#endif - -#ifndef ETH_P_PAUSE -#define ETH_P_PAUSE 0x8808 -#endif - -static inline int compound_order(struct page *page) -{ - return 0; -} - -#ifndef SKB_WITH_OVERHEAD -#define SKB_WITH_OVERHEAD(X) \ - ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) -#endif -#else /* 2.6.22 */ -#define ETH_TYPE_TRANS_SETS_DEV -#define HAVE_NETDEV_STATS_IN_NETDEV -#endif /* < 2.6.22 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) -#undef SET_MODULE_OWNER -#define SET_MODULE_OWNER(dev) do { } while (0) -#endif /* > 2.6.22 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) -#define netif_subqueue_stopped(_a, _b) 0 -#ifndef PTR_ALIGN -#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) -#endif - -#ifndef CONFIG_PM_SLEEP -#define CONFIG_PM_SLEEP CONFIG_PM -#endif - -#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) -#define HAVE_ETHTOOL_GET_PERM_ADDR -#endif /* 2.6.14 through 2.6.22 */ - -static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) -{ - int delta = 0; - - if (headroom > (skb->data - skb->head)) - delta = headroom - (skb->data - skb->head); - - if (delta || skb_header_cloned(skb)) - return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, - GFP_ATOMIC); - return 0; -} -#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) -#endif /* < 2.6.23 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) -#ifndef ETH_FLAG_LRO -#define ETH_FLAG_LRO NETIF_F_LRO -#endif - -#ifndef ACCESS_ONCE -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) -#endif - -/* if GRO is supported then the napi struct must already exist */ -#ifndef NETIF_F_GRO -/* NAPI API changes in 2.6.24 break everything */ -struct napi_struct { - /* used to look up the real NAPI polling routine */ - int (*poll)(struct napi_struct *, int); - struct net_device *dev; - int weight; -}; -#endif - -#ifdef NAPI -extern int __kc_adapter_clean(struct net_device *, int *); -/* The following definitions are multi-queue aware, and thus we have a driver - * define list which determines which drivers support multiple queues, and - * thus need these stronger defines. If a driver does not support multi-queue - * functionality, you don't need to add it to this list. - */ -extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi); - -static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), int weight) -{ - struct net_device *poll_dev = napi_to_poll_dev(napi); - poll_dev->poll = __kc_adapter_clean; - poll_dev->priv = napi; - poll_dev->weight = weight; - set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); - set_bit(__LINK_STATE_START, &poll_dev->state); - dev_hold(poll_dev); - napi->poll = poll; - napi->weight = weight; - napi->dev = dev; -} -#define netif_napi_add __kc_mq_netif_napi_add - -static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) -{ - struct net_device *poll_dev = napi_to_poll_dev(napi); - WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); - dev_put(poll_dev); - memset(poll_dev, 0, sizeof(struct net_device)); -} - -#define netif_napi_del __kc_mq_netif_napi_del - -static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) -{ - return netif_running(napi->dev) && - netif_rx_schedule_prep(napi_to_poll_dev(napi)); -} -#define napi_schedule_prep __kc_mq_napi_schedule_prep - -static inline void __kc_mq_napi_schedule(struct napi_struct *napi) -{ - if (napi_schedule_prep(napi)) - __netif_rx_schedule(napi_to_poll_dev(napi)); -} -#define napi_schedule __kc_mq_napi_schedule - -#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) -#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) -#ifdef CONFIG_SMP -static inline void napi_synchronize(const struct napi_struct *n) -{ - struct net_device *dev = napi_to_poll_dev(n); - - while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { - /* No hurry. */ - msleep(1); - } -} -#else -#define napi_synchronize(n) barrier() -#endif /* CONFIG_SMP */ -#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) -static inline void _kc_napi_complete(struct napi_struct *napi) -{ -#ifdef NETIF_F_GRO - napi_gro_flush(napi); -#endif - netif_rx_complete(napi_to_poll_dev(napi)); -} -#define napi_complete _kc_napi_complete -#else /* NAPI */ - -/* The following definitions are only used if we don't support NAPI at all. */ - -static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), int weight) -{ - dev->poll = poll; - dev->weight = weight; - napi->poll = poll; - napi->weight = weight; - napi->dev = dev; -} -#define netif_napi_del(_a) do {} while (0) -#endif /* NAPI */ - -#undef dev_get_by_name -#define dev_get_by_name(_a, _b) dev_get_by_name(_b) -#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) -#ifndef DMA_BIT_MASK -#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) -#endif - -#ifdef NETIF_F_TSO6 -#define skb_is_gso_v6 _kc_skb_is_gso_v6 -static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) -{ - return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; -} -#endif /* NETIF_F_TSO6 */ - -#ifndef KERN_CONT -#define KERN_CONT "" -#endif -#ifndef pr_err -#define pr_err(fmt, arg...) \ - printk(KERN_ERR fmt, ##arg) -#endif - -#ifndef rounddown_pow_of_two -#define rounddown_pow_of_two(n) \ - __builtin_constant_p(n) ? ( \ - (n == 1) ? 0 : \ - (1UL << ilog2(n))) : \ - (1UL << (fls_long(n) - 1)) -#endif - -#ifndef BIT -#define BIT(nr) (1UL << (nr)) -#endif - -#else /* < 2.6.24 */ -#define HAVE_ETHTOOL_GET_SSET_COUNT -#define HAVE_NETDEV_NAPI_LIST -#endif /* < 2.6.24 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) -#define INCLUDE_PM_QOS_PARAMS_H -#include -#else /* >= 3.2.0 */ -#include -#endif /* else >= 3.2.0 */ -#endif /* > 2.6.24 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) -#define PM_QOS_CPU_DMA_LATENCY 1 - -#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) -#include -#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY -#define pm_qos_add_requirement(pm_qos_class, name, value) \ - set_acceptable_latency(name, value) -#define pm_qos_remove_requirement(pm_qos_class, name) \ - remove_acceptable_latency(name) -#define pm_qos_update_requirement(pm_qos_class, name, value) \ - modify_acceptable_latency(name, value) -#else -#define PM_QOS_DEFAULT_VALUE -1 -#define pm_qos_add_requirement(pm_qos_class, name, value) -#define pm_qos_remove_requirement(pm_qos_class, name) -#define pm_qos_update_requirement(pm_qos_class, name, value) { \ - if (value != PM_QOS_DEFAULT_VALUE) { \ - printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ - pci_name(adapter->pdev)); \ - } \ -} - -#endif /* > 2.6.18 */ - -#define pci_enable_device_mem(pdev) pci_enable_device(pdev) - -#ifndef DEFINE_PCI_DEVICE_TABLE -#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] -#endif /* DEFINE_PCI_DEVICE_TABLE */ - -#ifndef strict_strtol -#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) -static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) -{ - /* adapted from strict_strtoul() in 2.6.25 */ - char *tail; - long val; - size_t len; - - *res = 0; - len = strlen(buf); - if (!len) - return -EINVAL; - val = simple_strtol(buf, &tail, base); - if (tail == buf) - return -EINVAL; - if ((*tail == '\0') || - ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { - *res = val; - return 0; - } - - return -EINVAL; -} -#endif - -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) -#ifndef IXGBE_PROCFS -#define IXGBE_PROCFS -#endif /* IXGBE_PROCFS */ -#endif /* >= 2.6.0 */ - -#else /* < 2.6.25 */ - -#ifndef IXGBE_SYSFS -#define IXGBE_SYSFS -#endif /* IXGBE_SYSFS */ -#if IS_ENABLED(CONFIG_HWMON) -#ifndef IXGBE_HWMON -#define IXGBE_HWMON -#endif /* IXGBE_HWMON */ -#endif /* CONFIG_HWMON */ - -#endif /* < 2.6.25 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) -#ifndef clamp_t -#define clamp_t(type, val, min, max) ({ \ - type __val = (val); \ - type __min = (min); \ - type __max = (max); \ - __val = __val < __min ? __min : __val; \ - __val > __max ? __max : __val; }) -#endif /* clamp_t */ -#undef kzalloc_node -#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) - -extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); -#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) -#else /* < 2.6.26 */ -#define NETDEV_CAN_SET_GSO_MAX_SIZE -#include -#define HAVE_NETDEV_VLAN_FEATURES -#ifndef PCI_EXP_LNKCAP_ASPMS -#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ -#endif /* PCI_EXP_LNKCAP_ASPMS */ -#endif /* < 2.6.26 */ -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) -static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, - __u32 speed) -{ - ep->speed = (__u16)speed; - /* ep->speed_hi = (__u16)(speed >> 16); */ -} -#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set - -static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) -{ - /* no speed_hi before 2.6.27, and probably no need for it yet */ - return (__u32)ep->speed; -} -#define ethtool_cmd_speed _kc_ethtool_cmd_speed - -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) -#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) -#define ANCIENT_PM 1 -#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ - (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ - defined(CONFIG_PM_SLEEP)) -#define NEWER_PM 1 -#endif -#if defined(ANCIENT_PM) || defined(NEWER_PM) -#undef device_set_wakeup_enable -#define device_set_wakeup_enable(dev, val) \ - do { \ - u16 pmc = 0; \ - int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ - if (pm) { \ - pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ - &pmc); \ - } \ - (dev)->power.can_wakeup = !!(pmc >> 11); \ - (dev)->power.should_wakeup = (val && (pmc >> 11)); \ - } while (0) -#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ -#endif /* 2.6.15 through 2.6.27 */ -#ifndef netif_napi_del -#define netif_napi_del(_a) do {} while (0) -#ifdef NAPI -#ifdef CONFIG_NETPOLL -#undef netif_napi_del -#define netif_napi_del(_a) list_del(&(_a)->dev_list); -#endif -#endif -#endif /* netif_napi_del */ -#ifdef dma_mapping_error -#undef dma_mapping_error -#endif -#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) - -#ifdef CONFIG_NETDEVICES_MULTIQUEUE -#define HAVE_TX_MQ -#endif - -#ifndef DMA_ATTR_WEAK_ORDERING -#define DMA_ATTR_WEAK_ORDERING 0 -#endif - -#ifdef HAVE_TX_MQ -extern void _kc_netif_tx_stop_all_queues(struct net_device *); -extern void _kc_netif_tx_wake_all_queues(struct net_device *); -extern void _kc_netif_tx_start_all_queues(struct net_device *); -#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) -#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) -#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) -#undef netif_stop_subqueue -#define netif_stop_subqueue(_ndev,_qi) do { \ - if (netif_is_multiqueue((_ndev))) \ - netif_stop_subqueue((_ndev), (_qi)); \ - else \ - netif_stop_queue((_ndev)); \ - } while (0) -#undef netif_start_subqueue -#define netif_start_subqueue(_ndev,_qi) do { \ - if (netif_is_multiqueue((_ndev))) \ - netif_start_subqueue((_ndev), (_qi)); \ - else \ - netif_start_queue((_ndev)); \ - } while (0) -#else /* HAVE_TX_MQ */ -#define netif_tx_stop_all_queues(a) netif_stop_queue(a) -#define netif_tx_wake_all_queues(a) netif_wake_queue(a) -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) -#define netif_tx_start_all_queues(a) netif_start_queue(a) -#else -#define netif_tx_start_all_queues(a) do {} while (0) -#endif -#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) -#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) -#endif /* HAVE_TX_MQ */ -#ifndef NETIF_F_MULTI_QUEUE -#define NETIF_F_MULTI_QUEUE 0 -#define netif_is_multiqueue(a) 0 -#define netif_wake_subqueue(a, b) -#endif /* NETIF_F_MULTI_QUEUE */ - -#ifndef __WARN_printf -extern void __kc_warn_slowpath(const char *file, const int line, - const char *fmt, ...) __attribute__((format(printf, 3, 4))); -#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) -#endif /* __WARN_printf */ - -#ifndef WARN -#define WARN(condition, format...) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) \ - __WARN_printf(format); \ - unlikely(__ret_warn_on); \ -}) -#endif /* WARN */ -#undef HAVE_IXGBE_DEBUG_FS -#undef HAVE_IGB_DEBUG_FS -#else /* < 2.6.27 */ -#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set -static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, - __u32 speed) -{ - ep->speed = (__u16)(speed & 0xFFFF); - ep->speed_hi = (__u16)(speed >> 16); -} -#define HAVE_TX_MQ -#define HAVE_NETDEV_SELECT_QUEUE -#ifdef CONFIG_DEBUG_FS -#define HAVE_IXGBE_DEBUG_FS -#define HAVE_IGB_DEBUG_FS -#endif /* CONFIG_DEBUG_FS */ -#endif /* < 2.6.27 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) -#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ - pci_resource_len(pdev, bar)) -#define pci_wake_from_d3 _kc_pci_wake_from_d3 -#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep -extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); -extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); -#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) -#ifndef __skb_queue_head_init -static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) -{ - list->prev = list->next = (struct sk_buff *)list; - list->qlen = 0; -} -#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) -#endif - -#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ -#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ - -#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ -#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ - -#endif /* < 2.6.28 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) -#ifndef swap -#define swap(a, b) \ - do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) -#endif -#define pci_request_selected_regions_exclusive(pdev, bars, name) \ - pci_request_selected_regions(pdev, bars, name) -#ifndef CONFIG_NR_CPUS -#define CONFIG_NR_CPUS 1 -#endif /* CONFIG_NR_CPUS */ -#ifndef pcie_aspm_enabled -#define pcie_aspm_enabled() (1) -#endif /* pcie_aspm_enabled */ - -#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ - -#ifndef PCI_EXP_LNKSTA_CLS -#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ -#endif -#ifndef PCI_EXP_LNKSTA_NLW -#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ -#endif - -#ifndef pci_clear_master -extern void _kc_pci_clear_master(struct pci_dev *dev); -#define pci_clear_master(dev) _kc_pci_clear_master(dev) -#endif - -#ifndef PCI_EXP_LNKCTL_ASPMC -#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ -#endif -#else /* < 2.6.29 */ -#ifndef HAVE_NET_DEVICE_OPS -#define HAVE_NET_DEVICE_OPS -#endif -#ifdef CONFIG_DCB -#define HAVE_PFC_MODE_ENABLE -#endif /* CONFIG_DCB */ -#endif /* < 2.6.29 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) -#define NO_PTP_SUPPORT -#define skb_rx_queue_recorded(a) false -#define skb_get_rx_queue(a) 0 -#define skb_record_rx_queue(a, b) do {} while (0) -#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) -#undef CONFIG_FCOE -#undef CONFIG_FCOE_MODULE -#ifndef CONFIG_PCI_IOV -#undef pci_enable_sriov -#define pci_enable_sriov(a, b) -ENOTSUPP -#undef pci_disable_sriov -#define pci_disable_sriov(a) do {} while (0) -#endif /* CONFIG_PCI_IOV */ -#ifndef pr_cont -#define pr_cont(fmt, ...) \ - printk(KERN_CONT fmt, ##__VA_ARGS__) -#endif /* pr_cont */ -static inline void _kc_synchronize_irq(unsigned int a) -{ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) - synchronize_irq(); -#else /* < 2.5.28 */ - synchronize_irq(a); -#endif /* < 2.5.28 */ -} -#undef synchronize_irq -#define synchronize_irq(a) _kc_synchronize_irq(a) - -#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ - -#ifdef nr_cpus_node -#undef nr_cpus_node -#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) -#endif - -#else /* < 2.6.30 */ -#define HAVE_ASPM_QUIRKS -#endif /* < 2.6.30 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) -#define ETH_P_1588 0x88F7 -#define ETH_P_FIP 0x8914 -#ifndef netdev_uc_count -#define netdev_uc_count(dev) ((dev)->uc_count) -#endif -#ifndef netdev_for_each_uc_addr -#define netdev_for_each_uc_addr(uclist, dev) \ - for (uclist = dev->uc_list; uclist; uclist = uclist->next) -#endif -#ifndef PORT_OTHER -#define PORT_OTHER 0xff -#endif -#ifndef MDIO_PHY_ID_PRTAD -#define MDIO_PHY_ID_PRTAD 0x03e0 -#endif -#ifndef MDIO_PHY_ID_DEVAD -#define MDIO_PHY_ID_DEVAD 0x001f -#endif -#ifndef skb_dst -#define skb_dst(s) ((s)->dst) -#endif - -#ifndef SUPPORTED_1000baseKX_Full -#define SUPPORTED_1000baseKX_Full (1 << 17) -#endif -#ifndef SUPPORTED_10000baseKX4_Full -#define SUPPORTED_10000baseKX4_Full (1 << 18) -#endif -#ifndef SUPPORTED_10000baseKR_Full -#define SUPPORTED_10000baseKR_Full (1 << 19) -#endif - -#ifndef ADVERTISED_1000baseKX_Full -#define ADVERTISED_1000baseKX_Full (1 << 17) -#endif -#ifndef ADVERTISED_10000baseKX4_Full -#define ADVERTISED_10000baseKX4_Full (1 << 18) -#endif -#ifndef ADVERTISED_10000baseKR_Full -#define ADVERTISED_10000baseKR_Full (1 << 19) -#endif - -static inline unsigned long dev_trans_start(struct net_device *dev) -{ - return dev->trans_start; -} -#else /* < 2.6.31 */ -#ifndef HAVE_NETDEV_STORAGE_ADDRESS -#define HAVE_NETDEV_STORAGE_ADDRESS -#endif -#ifndef HAVE_NETDEV_HW_ADDR -#define HAVE_NETDEV_HW_ADDR -#endif -#ifndef HAVE_TRANS_START_IN_QUEUE -#define HAVE_TRANS_START_IN_QUEUE -#endif -#ifndef HAVE_INCLUDE_LINUX_MDIO_H -#define HAVE_INCLUDE_LINUX_MDIO_H -#endif -#include -#endif /* < 2.6.31 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) -#undef netdev_tx_t -#define netdev_tx_t int -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -#ifndef NETIF_F_FCOE_MTU -#define NETIF_F_FCOE_MTU (1 << 26) -#endif -#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -static inline int _kc_pm_runtime_get_sync() -{ - return 1; -} -#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() -#else /* 2.6.0 => 2.6.32 */ -static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) -{ - return 1; -} -#ifndef pm_runtime_get_sync -#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) -#endif -#endif /* 2.6.0 => 2.6.32 */ -#ifndef pm_runtime_put -#define pm_runtime_put(dev) do {} while (0) -#endif -#ifndef pm_runtime_put_sync -#define pm_runtime_put_sync(dev) do {} while (0) -#endif -#ifndef pm_runtime_resume -#define pm_runtime_resume(dev) do {} while (0) -#endif -#ifndef pm_schedule_suspend -#define pm_schedule_suspend(dev, t) do {} while (0) -#endif -#ifndef pm_runtime_set_suspended -#define pm_runtime_set_suspended(dev) do {} while (0) -#endif -#ifndef pm_runtime_disable -#define pm_runtime_disable(dev) do {} while (0) -#endif -#ifndef pm_runtime_put_noidle -#define pm_runtime_put_noidle(dev) do {} while (0) -#endif -#ifndef pm_runtime_set_active -#define pm_runtime_set_active(dev) do {} while (0) -#endif -#ifndef pm_runtime_enable -#define pm_runtime_enable(dev) do {} while (0) -#endif -#ifndef pm_runtime_get_noresume -#define pm_runtime_get_noresume(dev) do {} while (0) -#endif -#else /* < 2.6.32 */ -#if (RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) -#define HAVE_RHEL6_NET_DEVICE_EXTENDED -#endif /* RHEL >= 6.2 && RHEL < 7.0 */ -#if (RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) -#define HAVE_RHEL6_NET_DEVICE_OPS_EXT -#define HAVE_NDO_SET_FEATURES -#endif /* RHEL >= 6.6 && RHEL < 7.0 */ -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE -#define HAVE_NETDEV_OPS_FCOE_ENABLE -#endif -#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ -#ifdef CONFIG_DCB -#ifndef HAVE_DCBNL_OPS_GETAPP -#define HAVE_DCBNL_OPS_GETAPP -#endif -#endif /* CONFIG_DCB */ -#include -/* IOV bad DMA target work arounds require at least this kernel rev support */ -#define HAVE_PCIE_TYPE -#endif /* < 2.6.32 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) -#ifndef pci_pcie_cap -#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) -#endif -#ifndef IPV4_FLOW -#define IPV4_FLOW 0x10 -#endif /* IPV4_FLOW */ -#ifndef IPV6_FLOW -#define IPV6_FLOW 0x11 -#endif /* IPV6_FLOW */ -/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ -#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ - (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN -#define HAVE_NETDEV_OPS_FCOE_GETWWN -#endif -#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ -#endif /* RHEL6 or SLES11 SP1 */ -#ifndef __percpu -#define __percpu -#endif /* __percpu */ - -#ifndef PORT_DA -#define PORT_DA PORT_OTHER -#endif /* PORT_DA */ -#ifndef PORT_NONE -#define PORT_NONE PORT_OTHER -#endif - -#if ((RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) -#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) -#undef DEFINE_DMA_UNMAP_ADDR -#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME -#undef DEFINE_DMA_UNMAP_LEN -#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME -#undef dma_unmap_addr -#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) -#undef dma_unmap_addr_set -#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) -#undef dma_unmap_len -#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) -#undef dma_unmap_len_set -#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) -#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ -#endif /* RHEL_RELEASE_CODE */ - -#if (!(RHEL_RELEASE_CODE && \ - (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ - ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) -static inline bool pci_is_pcie(struct pci_dev *dev) -{ - return !!pci_pcie_cap(dev); -} -#endif /* RHEL_RELEASE_CODE */ - -#if (!(RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) -#define sk_tx_queue_get(_sk) (-1) -#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) -#endif /* !(RHEL >= 6.2) */ - -#if (RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) -#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -#define HAVE_ETHTOOL_GRXFHINDIR_SIZE -#define HAVE_ETHTOOL_SET_PHYS_ID -#define HAVE_ETHTOOL_GET_TS_INFO -#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) -#define HAVE_ETHTOOL_GSRSSH -#define HAVE_RHEL6_SRIOV_CONFIGURE -#define HAVE_RXFH_NONCONST -#endif /* RHEL > 6.5 */ -#endif /* RHEL >= 6.4 && RHEL < 7.0 */ - -#else /* < 2.6.33 */ -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN -#define HAVE_NETDEV_OPS_FCOE_GETWWN -#endif -#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ -#endif /* < 2.6.33 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) -#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) -#ifndef pci_num_vf -#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) -extern int _kc_pci_num_vf(struct pci_dev *dev); -#endif -#endif /* RHEL_RELEASE_CODE */ - -#ifndef dev_is_pci -#define dev_is_pci(d) ((d)->bus == &pci_bus_type) -#endif - -#ifndef ETH_FLAG_NTUPLE -#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE -#endif - -#ifndef netdev_mc_count -#define netdev_mc_count(dev) ((dev)->mc_count) -#endif -#ifndef netdev_mc_empty -#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) -#endif -#ifndef netdev_for_each_mc_addr -#define netdev_for_each_mc_addr(mclist, dev) \ - for (mclist = dev->mc_list; mclist; mclist = mclist->next) -#endif -#ifndef netdev_uc_count -#define netdev_uc_count(dev) ((dev)->uc.count) -#endif -#ifndef netdev_uc_empty -#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) -#endif -#ifndef netdev_for_each_uc_addr -#define netdev_for_each_uc_addr(ha, dev) \ - list_for_each_entry(ha, &dev->uc.list, list) -#endif -#ifndef dma_set_coherent_mask -#define dma_set_coherent_mask(dev,mask) \ - pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) -#endif -#ifndef pci_dev_run_wake -#define pci_dev_run_wake(pdev) (0) -#endif - -/* netdev logging taken from include/linux/netdevice.h */ -#ifndef netdev_name -static inline const char *_kc_netdev_name(const struct net_device *dev) -{ - if (dev->reg_state != NETREG_REGISTERED) - return "(unregistered net_device)"; - return dev->name; -} -#define netdev_name(netdev) _kc_netdev_name(netdev) -#endif /* netdev_name */ - -#undef netdev_printk -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -#define netdev_printk(level, netdev, format, args...) \ -do { \ - struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ - printk(level "%s: " format, pci_name(pdev), ##args); \ -} while(0) -#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) -#define netdev_printk(level, netdev, format, args...) \ -do { \ - struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ - struct device *dev = pci_dev_to_dev(pdev); \ - dev_printk(level, dev, "%s: " format, \ - netdev_name(netdev), ##args); \ -} while(0) -#else /* 2.6.21 => 2.6.34 */ -#define netdev_printk(level, netdev, format, args...) \ - dev_printk(level, (netdev)->dev.parent, \ - "%s: " format, \ - netdev_name(netdev), ##args) -#endif /* <2.6.0 <2.6.21 <2.6.34 */ -#undef netdev_emerg -#define netdev_emerg(dev, format, args...) \ - netdev_printk(KERN_EMERG, dev, format, ##args) -#undef netdev_alert -#define netdev_alert(dev, format, args...) \ - netdev_printk(KERN_ALERT, dev, format, ##args) -#undef netdev_crit -#define netdev_crit(dev, format, args...) \ - netdev_printk(KERN_CRIT, dev, format, ##args) -#undef netdev_err -#define netdev_err(dev, format, args...) \ - netdev_printk(KERN_ERR, dev, format, ##args) -#undef netdev_warn -#define netdev_warn(dev, format, args...) \ - netdev_printk(KERN_WARNING, dev, format, ##args) -#undef netdev_notice -#define netdev_notice(dev, format, args...) \ - netdev_printk(KERN_NOTICE, dev, format, ##args) -#undef netdev_info -#define netdev_info(dev, format, args...) \ - netdev_printk(KERN_INFO, dev, format, ##args) -#undef netdev_dbg -#if defined(DEBUG) -#define netdev_dbg(__dev, format, args...) \ - netdev_printk(KERN_DEBUG, __dev, format, ##args) -#elif defined(CONFIG_DYNAMIC_DEBUG) -#define netdev_dbg(__dev, format, args...) \ -do { \ - dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ - netdev_name(__dev), ##args); \ -} while (0) -#else /* DEBUG */ -#define netdev_dbg(__dev, format, args...) \ -({ \ - if (0) \ - netdev_printk(KERN_DEBUG, __dev, format, ##args); \ - 0; \ -}) -#endif /* DEBUG */ - -#undef netif_printk -#define netif_printk(priv, type, level, dev, fmt, args...) \ -do { \ - if (netif_msg_##type(priv)) \ - netdev_printk(level, (dev), fmt, ##args); \ -} while (0) - -#undef netif_emerg -#define netif_emerg(priv, type, dev, fmt, args...) \ - netif_level(emerg, priv, type, dev, fmt, ##args) -#undef netif_alert -#define netif_alert(priv, type, dev, fmt, args...) \ - netif_level(alert, priv, type, dev, fmt, ##args) -#undef netif_crit -#define netif_crit(priv, type, dev, fmt, args...) \ - netif_level(crit, priv, type, dev, fmt, ##args) -#undef netif_err -#define netif_err(priv, type, dev, fmt, args...) \ - netif_level(err, priv, type, dev, fmt, ##args) -#undef netif_warn -#define netif_warn(priv, type, dev, fmt, args...) \ - netif_level(warn, priv, type, dev, fmt, ##args) -#undef netif_notice -#define netif_notice(priv, type, dev, fmt, args...) \ - netif_level(notice, priv, type, dev, fmt, ##args) -#undef netif_info -#define netif_info(priv, type, dev, fmt, args...) \ - netif_level(info, priv, type, dev, fmt, ##args) -#undef netif_dbg -#define netif_dbg(priv, type, dev, fmt, args...) \ - netif_level(dbg, priv, type, dev, fmt, ##args) - -#ifdef SET_SYSTEM_SLEEP_PM_OPS -#define HAVE_SYSTEM_SLEEP_PM_OPS -#endif - -#ifndef for_each_set_bit -#define for_each_set_bit(bit, addr, size) \ - for ((bit) = find_first_bit((addr), (size)); \ - (bit) < (size); \ - (bit) = find_next_bit((addr), (size), (bit) + 1)) -#endif /* for_each_set_bit */ - -#ifndef DEFINE_DMA_UNMAP_ADDR -#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR -#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN -#define dma_unmap_addr pci_unmap_addr -#define dma_unmap_addr_set pci_unmap_addr_set -#define dma_unmap_len pci_unmap_len -#define dma_unmap_len_set pci_unmap_len_set -#endif /* DEFINE_DMA_UNMAP_ADDR */ - -#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) -#ifdef IGB_HWMON -#ifdef CONFIG_DEBUG_LOCK_ALLOC -#define sysfs_attr_init(attr) \ - do { \ - static struct lock_class_key __key; \ - (attr)->key = &__key; \ - } while (0) -#else -#define sysfs_attr_init(attr) do {} while (0) -#endif /* CONFIG_DEBUG_LOCK_ALLOC */ -#endif /* IGB_HWMON */ -#endif /* RHEL_RELEASE_CODE */ - -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -static inline bool _kc_pm_runtime_suspended() -{ - return false; -} -#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() -#else /* 2.6.0 => 2.6.34 */ -static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) -{ - return false; -} -#ifndef pm_runtime_suspended -#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) -#endif -#endif /* 2.6.0 => 2.6.34 */ - -#ifndef pci_bus_speed -/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ -enum _kc_pci_bus_speed { - _KC_PCIE_SPEED_2_5GT = 0x14, - _KC_PCIE_SPEED_5_0GT = 0x15, - _KC_PCIE_SPEED_8_0GT = 0x16, - _KC_PCI_SPEED_UNKNOWN = 0xff, -}; -#define pci_bus_speed _kc_pci_bus_speed -#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT -#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT -#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT -#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN -#endif /* pci_bus_speed */ - -#else /* < 2.6.34 */ -#define HAVE_SYSTEM_SLEEP_PM_OPS -#ifndef HAVE_SET_RX_MODE -#define HAVE_SET_RX_MODE -#endif - -#endif /* < 2.6.34 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) -ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, - const void __user *from, size_t count); -#define simple_write_to_buffer _kc_simple_write_to_buffer - -#ifndef PCI_EXP_LNKSTA_NLW_SHIFT -#define PCI_EXP_LNKSTA_NLW_SHIFT 4 -#endif - -#ifndef numa_node_id -#define numa_node_id() 0 -#endif -#ifndef numa_mem_id -#define numa_mem_id numa_node_id -#endif -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) -#ifdef HAVE_TX_MQ -#include -#ifndef CONFIG_NETDEVICES_MULTIQUEUE -int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); -#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ -static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, - unsigned int txq) -{ - dev->egress_subqueue_count = txq; - return 0; -} -#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ -#else /* HAVE_TX_MQ */ -static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, - unsigned int __always_unused txq) -{ - return 0; -} -#endif /* HAVE_TX_MQ */ -#define netif_set_real_num_tx_queues(dev, txq) \ - _kc_netif_set_real_num_tx_queues(dev, txq) -#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ -#ifndef ETH_FLAG_RXHASH -#define ETH_FLAG_RXHASH (1<<28) -#endif /* ETH_FLAG_RXHASH */ -#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) -#define HAVE_IRQ_AFFINITY_HINT -#endif -struct device_node; -#else /* < 2.6.35 */ -#define HAVE_STRUCT_DEVICE_OF_NODE -#define HAVE_PM_QOS_REQUEST_LIST -#define HAVE_IRQ_AFFINITY_HINT -#include -#endif /* < 2.6.35 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) -extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); -#define ethtool_op_set_flags _kc_ethtool_op_set_flags -extern u32 _kc_ethtool_op_get_flags(struct net_device *); -#define ethtool_op_get_flags _kc_ethtool_op_get_flags - -enum { - WQ_UNBOUND = 0, - WQ_RESCUER = 0, -}; - -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#ifdef NET_IP_ALIGN -#undef NET_IP_ALIGN -#endif -#define NET_IP_ALIGN 0 -#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ - -#ifdef NET_SKB_PAD -#undef NET_SKB_PAD -#endif - -#if (L1_CACHE_BYTES > 32) -#define NET_SKB_PAD L1_CACHE_BYTES -#else -#define NET_SKB_PAD 32 -#endif - -static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, - unsigned int length) -{ - struct sk_buff *skb; - - skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); - if (skb) { -#if (NET_IP_ALIGN + NET_SKB_PAD) - skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); -#endif - skb->dev = dev; - } - return skb; -} - -#ifdef netdev_alloc_skb_ip_align -#undef netdev_alloc_skb_ip_align -#endif -#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) - -#undef netif_level -#define netif_level(level, priv, type, dev, fmt, args...) \ -do { \ - if (netif_msg_##type(priv)) \ - netdev_##level(dev, fmt, ##args); \ -} while (0) - -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) -#undef usleep_range -#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) -#endif - -#define u64_stats_update_begin(a) do { } while(0) -#define u64_stats_update_end(a) do { } while(0) -#define u64_stats_fetch_begin(a) do { } while(0) -#define u64_stats_fetch_retry_bh(a,b) (0) -#define u64_stats_fetch_begin_bh(a) (0) - -#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) -#define HAVE_8021P_SUPPORT -#endif - -/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ -#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ - !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) -static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) -{ - return; -} -#endif - -#else /* < 2.6.36 */ - -#define msleep(x) do { if (x > 20) \ - msleep(x); \ - else \ - usleep_range(1000 * x, 2000 * x); \ - } while (0) - -#define HAVE_PM_QOS_REQUEST_ACTIVE -#define HAVE_8021P_SUPPORT -#define HAVE_NDO_GET_STATS64 -#endif /* < 2.6.36 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) -#define HAVE_NON_CONST_PCI_DRIVER_NAME -#ifndef netif_set_real_num_tx_queues -static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, - unsigned int txq) -{ - netif_set_real_num_tx_queues(dev, txq); - return 0; -} -#define netif_set_real_num_tx_queues(dev, txq) \ - _kc_netif_set_real_num_tx_queues(dev, txq) -#endif -#ifndef netif_set_real_num_rx_queues -static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, - unsigned int __always_unused rxq) -{ - return 0; -} -#define netif_set_real_num_rx_queues(dev, rxq) \ - __kc_netif_set_real_num_rx_queues((dev), (rxq)) -#endif -#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR -#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) -#endif -#ifndef VLAN_N_VID -#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN -#endif /* VLAN_N_VID */ -#ifndef ETH_FLAG_TXVLAN -#define ETH_FLAG_TXVLAN (1 << 7) -#endif /* ETH_FLAG_TXVLAN */ -#ifndef ETH_FLAG_RXVLAN -#define ETH_FLAG_RXVLAN (1 << 8) -#endif /* ETH_FLAG_RXVLAN */ - -#define WQ_MEM_RECLAIM WQ_RESCUER - -static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) -{ - WARN_ON(skb->ip_summed != CHECKSUM_NONE); -} -#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) - -static inline void *_kc_vzalloc_node(unsigned long size, int node) -{ - void *addr = vmalloc_node(size, node); - if (addr) - memset(addr, 0, size); - return addr; -} -#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) - -static inline void *_kc_vzalloc(unsigned long size) -{ - void *addr = vmalloc(size); - if (addr) - memset(addr, 0, size); - return addr; -} -#define vzalloc(_size) _kc_vzalloc(_size) - -#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ - (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) -static inline __be16 vlan_get_protocol(const struct sk_buff *skb) -{ - if (vlan_tx_tag_present(skb) || - skb->protocol != cpu_to_be16(ETH_P_8021Q)) - return skb->protocol; - - if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) - return 0; - - return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; -} -#endif /* !RHEL5.7+ || RHEL6.0 */ - -#ifdef HAVE_HW_TIME_STAMP -#define SKBTX_HW_TSTAMP (1 << 0) -#define SKBTX_IN_PROGRESS (1 << 2) -#define SKB_SHARED_TX_IS_UNION -#endif - -#ifndef device_wakeup_enable -#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) -#endif - -#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) -#ifndef HAVE_VLAN_RX_REGISTER -#define HAVE_VLAN_RX_REGISTER -#endif -#endif /* > 2.4.18 */ -#endif /* < 2.6.37 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) -#define skb_checksum_start_offset(skb) skb_transport_offset(skb) -#else /* 2.6.22 -> 2.6.37 */ -static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) -{ - return skb->csum_start - skb_headroom(skb); -} -#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) -#endif /* 2.6.22 -> 2.6.37 */ -#if IS_ENABLED(CONFIG_DCB) -#ifndef IEEE_8021QAZ_MAX_TCS -#define IEEE_8021QAZ_MAX_TCS 8 -#endif -#ifndef DCB_CAP_DCBX_HOST -#define DCB_CAP_DCBX_HOST 0x01 -#endif -#ifndef DCB_CAP_DCBX_LLD_MANAGED -#define DCB_CAP_DCBX_LLD_MANAGED 0x02 -#endif -#ifndef DCB_CAP_DCBX_VER_CEE -#define DCB_CAP_DCBX_VER_CEE 0x04 -#endif -#ifndef DCB_CAP_DCBX_VER_IEEE -#define DCB_CAP_DCBX_VER_IEEE 0x08 -#endif -#ifndef DCB_CAP_DCBX_STATIC -#define DCB_CAP_DCBX_STATIC 0x10 -#endif -#endif /* CONFIG_DCB */ -#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) -#define CONFIG_XPS -#endif /* RHEL_RELEASE_VERSION(6,2) */ -#endif /* < 2.6.38 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) -#ifndef TC_BITMASK -#define TC_BITMASK 15 -#endif -#ifndef NETIF_F_RXCSUM -#define NETIF_F_RXCSUM (1 << 29) -#endif -#ifndef skb_queue_reverse_walk_safe -#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ - for (skb = (queue)->prev, tmp = skb->prev; \ - skb != (struct sk_buff *)(queue); \ - skb = tmp, tmp = skb->prev) -#endif -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -#ifndef FCOE_MTU -#define FCOE_MTU 2158 -#endif -#endif -#if IS_ENABLED(CONFIG_DCB) -#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE -#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 -#endif -#endif -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) -#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) -#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) -#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) -#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) -extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); -#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) -extern u8 _kc_netdev_get_num_tc(struct net_device *dev); -#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) -extern int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); -#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) -#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) -#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) -extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); -#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) -#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) -#else /* RHEL6.1 or greater */ -#ifndef HAVE_MQPRIO -#define HAVE_MQPRIO -#endif /* HAVE_MQPRIO */ -#if IS_ENABLED(CONFIG_DCB) -#ifndef HAVE_DCBNL_IEEE -#define HAVE_DCBNL_IEEE -#ifndef IEEE_8021QAZ_TSA_STRICT -#define IEEE_8021QAZ_TSA_STRICT 0 -#endif -#ifndef IEEE_8021QAZ_TSA_ETS -#define IEEE_8021QAZ_TSA_ETS 2 -#endif -#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE -#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 -#endif -#endif -#endif /* CONFIG_DCB */ -#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ - -#ifndef udp_csum -#define udp_csum __kc_udp_csum -static inline __wsum __kc_udp_csum(struct sk_buff *skb) -{ - __wsum csum = csum_partial(skb_transport_header(skb), - sizeof(struct udphdr), skb->csum); - - for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { - csum = csum_add(csum, skb->csum); - } - return csum; -} -#endif /* udp_csum */ -#else /* < 2.6.39 */ -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET -#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET -#endif -#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ -#ifndef HAVE_MQPRIO -#define HAVE_MQPRIO -#endif -#ifndef HAVE_SETUP_TC -#define HAVE_SETUP_TC -#endif -#ifdef CONFIG_DCB -#ifndef HAVE_DCBNL_IEEE -#define HAVE_DCBNL_IEEE -#endif -#endif /* CONFIG_DCB */ -#ifndef HAVE_NDO_SET_FEATURES -#define HAVE_NDO_SET_FEATURES -#endif -#define HAVE_IRQ_AFFINITY_NOTIFY -#endif /* < 2.6.39 */ - -/*****************************************************************************/ -/* use < 2.6.40 because of a Fedora 15 kernel update where they - * updated the kernel version to 2.6.40.x and they back-ported 3.0 features - * like set_phys_id for ethtool. - */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) -#ifdef ETHTOOL_GRXRINGS -#ifndef FLOW_EXT -#define FLOW_EXT 0x80000000 -union _kc_ethtool_flow_union { - struct ethtool_tcpip4_spec tcp_ip4_spec; - struct ethtool_usrip4_spec usr_ip4_spec; - __u8 hdata[60]; -}; -struct _kc_ethtool_flow_ext { - __be16 vlan_etype; - __be16 vlan_tci; - __be32 data[2]; -}; -struct _kc_ethtool_rx_flow_spec { - __u32 flow_type; - union _kc_ethtool_flow_union h_u; - struct _kc_ethtool_flow_ext h_ext; - union _kc_ethtool_flow_union m_u; - struct _kc_ethtool_flow_ext m_ext; - __u64 ring_cookie; - __u32 location; -}; -#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec -#endif /* FLOW_EXT */ -#endif - -#define pci_disable_link_state_locked pci_disable_link_state - -#ifndef PCI_LTR_VALUE_MASK -#define PCI_LTR_VALUE_MASK 0x000003ff -#endif -#ifndef PCI_LTR_SCALE_MASK -#define PCI_LTR_SCALE_MASK 0x00001c00 -#endif -#ifndef PCI_LTR_SCALE_SHIFT -#define PCI_LTR_SCALE_SHIFT 10 -#endif - -#else /* < 2.6.40 */ -#define HAVE_ETHTOOL_SET_PHYS_ID -#endif /* < 2.6.40 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) -#define USE_LEGACY_PM_SUPPORT -#ifndef kfree_rcu -#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) -#endif /* kfree_rcu */ -#ifndef kstrtol_from_user -#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) -static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, - unsigned int base, long *res) -{ - /* sign, base 2 representation, newline, terminator */ - char buf[1 + sizeof(long) * 8 + 1 + 1]; - - count = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, s, count)) - return -EFAULT; - buf[count] = '\0'; - return strict_strtol(buf, base, res); -} -#endif - -/* 20000base_blah_full Supported and Advertised Registers */ -#define SUPPORTED_20000baseMLD2_Full (1 << 21) -#define SUPPORTED_20000baseKR2_Full (1 << 22) -#define ADVERTISED_20000baseMLD2_Full (1 << 21) -#define ADVERTISED_20000baseKR2_Full (1 << 22) -#endif /* < 3.0.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) -#ifndef __netdev_alloc_skb_ip_align -#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) -#endif /* __netdev_alloc_skb_ip_align */ -#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) -#define dcb_ieee_delapp(dev, app) 0 -#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) - -/* 1000BASE-T Control register */ -#define CTL1000_AS_MASTER 0x0800 -#define CTL1000_ENABLE_MASTER 0x1000 - -/* kernels less than 3.0.0 don't have this */ -#ifndef ETH_P_8021AD -#define ETH_P_8021AD 0x88A8 -#endif - -/* Stub definition for !CONFIG_OF is introduced later */ -#ifdef CONFIG_OF -static inline struct device_node * -pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) -{ -#ifdef HAVE_STRUCT_DEVICE_OF_NODE - return pdev ? pdev->dev.of_node : NULL; -#else - return NULL; -#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ -} -#endif /* CONFIG_OF */ -#else /* < 3.1.0 */ -#ifndef HAVE_DCBNL_IEEE_DELAPP -#define HAVE_DCBNL_IEEE_DELAPP -#endif -#endif /* < 3.1.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) -#ifndef dma_zalloc_coherent -#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) -static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); - if (ret) - memset(ret, 0, size); - return ret; -} -#endif -#ifdef ETHTOOL_GRXRINGS -#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS -#endif /* ETHTOOL_GRXRINGS */ - -#ifndef skb_frag_size -#define skb_frag_size(frag) _kc_skb_frag_size(frag) -static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) -{ - return frag->size; -} -#endif /* skb_frag_size */ - -#ifndef skb_frag_size_sub -#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) -static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) -{ - frag->size -= delta; -} -#endif /* skb_frag_size_sub */ - -#ifndef skb_frag_page -#define skb_frag_page(frag) _kc_skb_frag_page(frag) -static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) -{ - return frag->page; -} -#endif /* skb_frag_page */ - -#ifndef skb_frag_address -#define skb_frag_address(frag) _kc_skb_frag_address(frag) -static inline void *_kc_skb_frag_address(const skb_frag_t *frag) -{ - return page_address(skb_frag_page(frag)) + frag->page_offset; -} -#endif /* skb_frag_address */ - -#ifndef skb_frag_dma_map -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) -#include -#endif -#define skb_frag_dma_map(dev,frag,offset,size,dir) \ - _kc_skb_frag_dma_map(dev,frag,offset,size,dir) -static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, - const skb_frag_t *frag, - size_t offset, size_t size, - enum dma_data_direction dir) -{ - return dma_map_page(dev, skb_frag_page(frag), - frag->page_offset + offset, size, dir); -} -#endif /* skb_frag_dma_map */ - -#ifndef __skb_frag_unref -#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) -static inline void __kc_skb_frag_unref(skb_frag_t *frag) -{ - put_page(skb_frag_page(frag)); -} -#endif /* __skb_frag_unref */ - -#ifndef SPEED_UNKNOWN -#define SPEED_UNKNOWN -1 -#endif -#ifndef DUPLEX_UNKNOWN -#define DUPLEX_UNKNOWN 0xff -#endif -#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ - (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) -#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED -#define HAVE_PCI_DEV_FLAGS_ASSIGNED -#endif -#endif -#else /* < 3.2.0 */ -#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED -#define HAVE_PCI_DEV_FLAGS_ASSIGNED -#define HAVE_VF_SPOOFCHK_CONFIGURE -#endif -#ifndef HAVE_SKB_L4_RXHASH -#define HAVE_SKB_L4_RXHASH -#endif -#define HAVE_IOMMU_PRESENT -#define HAVE_PM_QOS_REQUEST_LIST_NEW -#endif /* < 3.2.0 */ - -#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) -#undef ixgbe_get_netdev_tc_txq -#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) -#endif -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) -/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than - * alloc_workqueue() to avoid compiler warning from -Wvarargs - */ -static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) -_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, - const char *fmt, ...) -{ - struct workqueue_struct *wq; - va_list args, temp; - unsigned int len; - char *p; - - va_start(args, fmt); - va_copy(temp, args); - len = vsnprintf(NULL, 0, fmt, temp); - va_end(temp); - - p = kmalloc(len + 1, GFP_KERNEL); - if (!p) { - va_end(args); - return NULL; - } - - vsnprintf(p, len + 1, fmt, args); - va_end(args); -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) - wq = create_workqueue(p); -#else - wq = alloc_workqueue(p, flags, max_active); -#endif - kfree(p); - - return wq; -} -#ifdef alloc_workqueue -#undef alloc_workqueue -#endif -#define alloc_workqueue(fmt, flags, max_active, args...) \ - _kc_alloc_workqueue(flags, max_active, fmt, ##args) - -#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) -typedef u32 netdev_features_t; -#endif -#undef PCI_EXP_TYPE_RC_EC -#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ -#ifndef CONFIG_BQL -#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) -#define netdev_completed_queue(_n, _p, _b) do {} while (0) -#define netdev_tx_sent_queue(_q, _b) do {} while (0) -#define netdev_sent_queue(_n, _b) do {} while (0) -#define netdev_tx_reset_queue(_q) do {} while (0) -#define netdev_reset_queue(_n) do {} while (0) -#endif -#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) -#define HAVE_ETHTOOL_GRXFHINDIR_SIZE -#endif /* SLE_VERSION(11,3,0) */ -#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) -#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) -static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, - u8 *nexthdrp, - __be16 __always_unused *frag_offp) -{ - return ipv6_skip_exthdr(skb, start, nexthdrp); -} -#undef ipv6_skip_exthdr -#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) -#endif /* !SLES11sp4 or greater */ - -#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ - !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) -static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) -{ - return index % n_rx_rings; -} -#endif - -#else /* ! < 3.3.0 */ -#define HAVE_ETHTOOL_GRXFHINDIR_SIZE -#define HAVE_INT_NDO_VLAN_RX_ADD_VID -#ifdef ETHTOOL_SRXNTUPLE -#undef ETHTOOL_SRXNTUPLE -#endif -#endif /* < 3.3.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) -#ifndef NETIF_F_RXFCS -#define NETIF_F_RXFCS 0 -#endif /* NETIF_F_RXFCS */ -#ifndef NETIF_F_RXALL -#define NETIF_F_RXALL 0 -#endif /* NETIF_F_RXALL */ - -#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) -#define NUMTCS_RETURNS_U8 - -int _kc_simple_open(struct inode *inode, struct file *file); -#define simple_open _kc_simple_open -#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ - -#ifndef skb_add_rx_frag -#define skb_add_rx_frag _kc_skb_add_rx_frag -extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, - int, int, unsigned int); -#endif -#ifdef NET_ADDR_RANDOM -#define eth_hw_addr_random(N) do { \ - eth_random_addr(N->dev_addr); \ - N->addr_assign_type |= NET_ADDR_RANDOM; \ - } while (0) -#else /* NET_ADDR_RANDOM */ -#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) -#endif /* NET_ADDR_RANDOM */ - -#ifndef for_each_set_bit_from -#define for_each_set_bit_from(bit, addr, size) \ - for ((bit) = find_next_bit((addr), (size), (bit)); \ - (bit) < (size); \ - (bit) = find_next_bit((addr), (size), (bit) + 1)) -#endif /* for_each_set_bit_from */ - -#else /* < 3.4.0 */ -#include -#endif /* >= 3.4.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ - ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) -#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) -#define HAVE_PTP_1588_CLOCK -#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ -#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) - -#ifndef BITS_PER_LONG_LONG -#define BITS_PER_LONG_LONG 64 -#endif - -#ifndef ether_addr_equal -static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) -{ - return !compare_ether_addr(addr1, addr2); -} -#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) -#endif - -/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ -#ifdef CONFIG_OF_NET -static inline int of_get_phy_mode(struct device_node __always_unused *np) -{ - return -ENODEV; -} - -static inline const void * -of_get_mac_address(struct device_node __always_unused *np) -{ - return NULL; -} -#endif -#else -#include -#define HAVE_FDB_OPS -#define HAVE_ETHTOOL_GET_TS_INFO -#endif /* < 3.5.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) -#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ - -#ifndef MDIO_EEE_100TX -#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ -#endif -#ifndef MDIO_EEE_1000T -#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ -#endif -#ifndef MDIO_EEE_10GT -#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ -#endif -#ifndef MDIO_EEE_1000KX -#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ -#endif -#ifndef MDIO_EEE_10GKX4 -#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ -#endif -#ifndef MDIO_EEE_10GKR -#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ -#endif - -#ifndef __GFP_MEMALLOC -#define __GFP_MEMALLOC 0 -#endif - -#ifndef eth_broadcast_addr -#define eth_broadcast_addr _kc_eth_broadcast_addr -static inline void _kc_eth_broadcast_addr(u8 *addr) -{ - memset(addr, 0xff, ETH_ALEN); -} -#endif - -#ifndef eth_random_addr -#define eth_random_addr _kc_eth_random_addr -static inline void _kc_eth_random_addr(u8 *addr) -{ - get_random_bytes(addr, ETH_ALEN); - addr[0] &= 0xfe; /* clear multicast */ - addr[0] |= 0x02; /* set local assignment */ -} -#endif /* eth_random_addr */ - -#ifndef DMA_ATTR_SKIP_CPU_SYNC -#define DMA_ATTR_SKIP_CPU_SYNC 0 -#endif -#else /* < 3.6.0 */ -#define HAVE_STRUCT_PAGE_PFMEMALLOC -#endif /* < 3.6.0 */ - -/******************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) -#ifndef ADVERTISED_40000baseKR4_Full -/* these defines were all added in one commit, so should be safe - * to trigger activiation on one define - */ -#define SUPPORTED_40000baseKR4_Full (1 << 23) -#define SUPPORTED_40000baseCR4_Full (1 << 24) -#define SUPPORTED_40000baseSR4_Full (1 << 25) -#define SUPPORTED_40000baseLR4_Full (1 << 26) -#define ADVERTISED_40000baseKR4_Full (1 << 23) -#define ADVERTISED_40000baseCR4_Full (1 << 24) -#define ADVERTISED_40000baseSR4_Full (1 << 25) -#define ADVERTISED_40000baseLR4_Full (1 << 26) -#endif - -#ifndef mmd_eee_cap_to_ethtool_sup_t -/** - * mmd_eee_cap_to_ethtool_sup_t - * @eee_cap: value of the MMD EEE Capability register - * - * A small helper function that translates MMD EEE Capability (3.20) bits - * to ethtool supported settings. - */ -static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) -{ - u32 supported = 0; - - if (eee_cap & MDIO_EEE_100TX) - supported |= SUPPORTED_100baseT_Full; - if (eee_cap & MDIO_EEE_1000T) - supported |= SUPPORTED_1000baseT_Full; - if (eee_cap & MDIO_EEE_10GT) - supported |= SUPPORTED_10000baseT_Full; - if (eee_cap & MDIO_EEE_1000KX) - supported |= SUPPORTED_1000baseKX_Full; - if (eee_cap & MDIO_EEE_10GKX4) - supported |= SUPPORTED_10000baseKX4_Full; - if (eee_cap & MDIO_EEE_10GKR) - supported |= SUPPORTED_10000baseKR_Full; - - return supported; -} -#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ - __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) -#endif /* mmd_eee_cap_to_ethtool_sup_t */ - -#ifndef mmd_eee_adv_to_ethtool_adv_t -/** - * mmd_eee_adv_to_ethtool_adv_t - * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers - * - * A small helper function that translates the MMD EEE Advertisement (7.60) - * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement - * settings. - */ -static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) -{ - u32 adv = 0; - - if (eee_adv & MDIO_EEE_100TX) - adv |= ADVERTISED_100baseT_Full; - if (eee_adv & MDIO_EEE_1000T) - adv |= ADVERTISED_1000baseT_Full; - if (eee_adv & MDIO_EEE_10GT) - adv |= ADVERTISED_10000baseT_Full; - if (eee_adv & MDIO_EEE_1000KX) - adv |= ADVERTISED_1000baseKX_Full; - if (eee_adv & MDIO_EEE_10GKX4) - adv |= ADVERTISED_10000baseKX4_Full; - if (eee_adv & MDIO_EEE_10GKR) - adv |= ADVERTISED_10000baseKR_Full; - - return adv; -} - -#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ - __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) -#endif /* mmd_eee_adv_to_ethtool_adv_t */ - -#ifndef ethtool_adv_to_mmd_eee_adv_t -/** - * ethtool_adv_to_mmd_eee_adv_t - * @adv: the ethtool advertisement settings - * - * A small helper function that translates ethtool advertisement settings - * to EEE advertisements for the MMD EEE Advertisement (7.60) and - * MMD EEE Link Partner Ability (7.61) registers. - */ -static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) -{ - u16 reg = 0; - - if (adv & ADVERTISED_100baseT_Full) - reg |= MDIO_EEE_100TX; - if (adv & ADVERTISED_1000baseT_Full) - reg |= MDIO_EEE_1000T; - if (adv & ADVERTISED_10000baseT_Full) - reg |= MDIO_EEE_10GT; - if (adv & ADVERTISED_1000baseKX_Full) - reg |= MDIO_EEE_1000KX; - if (adv & ADVERTISED_10000baseKX4_Full) - reg |= MDIO_EEE_10GKX4; - if (adv & ADVERTISED_10000baseKR_Full) - reg |= MDIO_EEE_10GKR; - - return reg; -} -#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) -#endif /* ethtool_adv_to_mmd_eee_adv_t */ - -#ifndef pci_pcie_type -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) -static inline u8 pci_pcie_type(struct pci_dev *pdev) -{ - int pos; - u16 reg16; - - pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); - BUG_ON(!pos); - pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); - return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; -} -#else /* < 2.6.24 */ -#define pci_pcie_type(x) (x)->pcie_type -#endif /* < 2.6.24 */ -#endif /* pci_pcie_type */ - -#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ - ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ - ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) -#define ptp_clock_register(caps, args...) ptp_clock_register(caps) -#endif - -#ifndef pcie_capability_read_word -int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); -#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) -#endif /* pcie_capability_read_word */ - -#ifndef pcie_capability_write_word -int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); -#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) -#endif /* pcie_capability_write_word */ - -#ifndef pcie_capability_clear_and_set_word -int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, - u16 clear, u16 set); -#define pcie_capability_clear_and_set_word(d,p,c,s) \ - __kc_pcie_capability_clear_and_set_word(d,p,c,s) -#endif /* pcie_capability_clear_and_set_word */ - -#ifndef pcie_capability_clear_word -int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, - u16 clear); -#define pcie_capability_clear_word(d, p, c) \ - __kc_pcie_capability_clear_word(d, p, c) -#endif /* pcie_capability_clear_word */ - -#ifndef PCI_EXP_LNKSTA2 -#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ -#endif - -#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) -#define USE_CONST_DEV_UC_CHAR -#endif - -#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) -#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) -#endif /* !RHEL6.8+ */ - -#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) -#include -#else - -#define DEFINE_HASHTABLE(name, bits) \ - struct hlist_head name[1 << (bits)] = \ - { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } - -#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ - struct hlist_head name[1 << (bits)] __read_mostly = \ - { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } - -#define DECLARE_HASHTABLE(name, bits) \ - struct hlist_head name[1 << (bits)] - -#define HASH_SIZE(name) (ARRAY_SIZE(name)) -#define HASH_BITS(name) ilog2(HASH_SIZE(name)) - -/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ -#define hash_min(val, bits) \ - (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) - -static inline void __hash_init(struct hlist_head *ht, unsigned int sz) -{ - unsigned int i; - - for (i = 0; i < sz; i++) - INIT_HLIST_HEAD(&ht[i]); -} - -#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) - -#define hash_add(hashtable, node, key) \ - hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) - -static inline bool hash_hashed(struct hlist_node *node) -{ - return !hlist_unhashed(node); -} - -static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) -{ - unsigned int i; - - for (i = 0; i < sz; i++) - if (!hlist_empty(&ht[i])) - return false; - - return true; -} - -#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) - -static inline void hash_del(struct hlist_node *node) -{ - hlist_del_init(node); -} -#endif /* RHEL >= 6.6 */ - -#else /* >= 3.7.0 */ -#include -#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS -#define USE_CONST_DEV_UC_CHAR -#endif /* >= 3.7.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ - !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) -#ifndef pci_sriov_set_totalvfs -static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) -{ - return 0; -} -#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) -#endif -#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ -#ifndef PCI_EXP_LNKCTL_ASPM_L0S -#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ -#endif -#ifndef PCI_EXP_LNKCTL_ASPM_L1 -#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ -#endif -#define HAVE_CONFIG_HOTPLUG -/* Reserved Ethernet Addresses per IEEE 802.1Q */ -static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { - 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; - -#ifndef is_link_local_ether_addr -static inline bool __kc_is_link_local_ether_addr(const u8 *addr) -{ - __be16 *a = (__be16 *)addr; - static const __be16 *b = (const __be16 *)eth_reserved_addr_base; - static const __be16 m = cpu_to_be16(0xfff0); - - return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; -} -#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) -#endif /* is_link_local_ether_addr */ -int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, - int target, unsigned short *fragoff, int *flags); -#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) - -#ifndef FLOW_MAC_EXT -#define FLOW_MAC_EXT 0x40000000 -#endif /* FLOW_MAC_EXT */ - -#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) -#define HAVE_SRIOV_CONFIGURE -#endif - -#else /* >= 3.8.0 */ -#ifndef __devinit -#define __devinit -#endif - -#ifndef __devinitdata -#define __devinitdata -#endif - -#ifndef __devinitconst -#define __devinitconst -#endif - -#ifndef __devexit -#define __devexit -#endif - -#ifndef __devexit_p -#define __devexit_p -#endif - -#ifndef HAVE_ENCAP_CSUM_OFFLOAD -#define HAVE_ENCAP_CSUM_OFFLOAD -#endif - -#ifndef HAVE_GRE_ENCAP_OFFLOAD -#define HAVE_GRE_ENCAP_OFFLOAD -#endif - -#ifndef HAVE_SRIOV_CONFIGURE -#define HAVE_SRIOV_CONFIGURE -#endif - -#define HAVE_BRIDGE_ATTRIBS -#ifndef BRIDGE_MODE_VEB -#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ -#endif /* BRIDGE_MODE_VEB */ -#ifndef BRIDGE_MODE_VEPA -#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ -#endif /* BRIDGE_MODE_VEPA */ -#endif /* >= 3.8.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) - -#undef BUILD_BUG_ON -#ifdef __CHECKER__ -#define BUILD_BUG_ON(condition) (0) -#else /* __CHECKER__ */ -#ifndef __compiletime_warning -#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) -#define __compiletime_warning(message) __attribute__((warning(message))) -#else /* __GNUC__ */ -#define __compiletime_warning(message) -#endif /* __GNUC__ */ -#endif /* __compiletime_warning */ -#ifndef __compiletime_error -#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) -#define __compiletime_error(message) __attribute__((error(message))) -#define __compiletime_error_fallback(condition) do { } while (0) -#else /* __GNUC__ */ -#define __compiletime_error(message) -#define __compiletime_error_fallback(condition) \ - do { ((void)sizeof(char[1 - 2 * condition])); } while (0) -#endif /* __GNUC__ */ -#else /* __compiletime_error */ -#define __compiletime_error_fallback(condition) do { } while (0) -#endif /* __compiletime_error */ -#define __compiletime_assert(condition, msg, prefix, suffix) \ - do { \ - bool __cond = !(condition); \ - extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (__cond) \ - prefix ## suffix(); \ - __compiletime_error_fallback(__cond); \ - } while (0) - -#define _compiletime_assert(condition, msg, prefix, suffix) \ - __compiletime_assert(condition, msg, prefix, suffix) -#define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) -#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) -#ifndef __OPTIMIZE__ -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#else /* __OPTIMIZE__ */ -#define BUILD_BUG_ON(condition) \ - BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) -#endif /* __OPTIMIZE__ */ -#endif /* __CHECKER__ */ - -#undef hlist_entry -#define hlist_entry(ptr, type, member) container_of(ptr,type,member) - -#undef hlist_entry_safe -#define hlist_entry_safe(ptr, type, member) \ - ({ typeof(ptr) ____ptr = (ptr); \ - ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ - }) - -#undef hlist_for_each_entry -#define hlist_for_each_entry(pos, head, member) \ - for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ - pos; \ - pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) - -#undef hlist_for_each_entry_safe -#define hlist_for_each_entry_safe(pos, n, head, member) \ - for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ - pos && ({ n = pos->member.next; 1; }); \ - pos = hlist_entry_safe(n, typeof(*pos), member)) - -#undef hlist_for_each_entry_continue -#define hlist_for_each_entry_continue(pos, member) \ - for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ - pos; \ - pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) - -#undef hlist_for_each_entry_from -#define hlist_for_each_entry_from(pos, member) \ - for (; pos; \ - pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) - -#undef hash_for_each -#define hash_for_each(name, bkt, obj, member) \ - for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ - (bkt)++)\ - hlist_for_each_entry(obj, &name[bkt], member) - -#undef hash_for_each_safe -#define hash_for_each_safe(name, bkt, tmp, obj, member) \ - for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ - (bkt)++)\ - hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) - -#undef hash_for_each_possible -#define hash_for_each_possible(name, obj, member, key) \ - hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) - -#undef hash_for_each_possible_safe -#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ - hlist_for_each_entry_safe(obj, tmp,\ - &name[hash_min(key, HASH_BITS(name))], member) - -#ifdef CONFIG_XPS -extern int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); -#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) -#else /* CONFIG_XPS */ -#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) -#endif /* CONFIG_XPS */ - -#ifdef HAVE_NETDEV_SELECT_QUEUE -#define _kc_hashrnd 0xd631614b /* not so random hash salt */ -extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); -#define __netdev_pick_tx __kc_netdev_pick_tx -#endif /* HAVE_NETDEV_SELECT_QUEUE */ -#else -#define HAVE_BRIDGE_FILTER -#define HAVE_FDB_DEL_NLATTR -#endif /* < 3.9.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -#ifndef NAPI_POLL_WEIGHT -#define NAPI_POLL_WEIGHT 64 -#endif -#ifdef CONFIG_PCI_IOV -extern int __kc_pci_vfs_assigned(struct pci_dev *dev); -#else -static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) -{ - return 0; -} -#endif -#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) - -#ifndef list_first_entry_or_null -#define list_first_entry_or_null(ptr, type, member) \ - (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) -#endif - -#ifndef VLAN_TX_COOKIE_MAGIC -static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, - u16 vlan_tci) -{ -#ifdef VLAN_TAG_PRESENT - vlan_tci |= VLAN_TAG_PRESENT; -#endif - skb->vlan_tci = vlan_tci; - return skb; -} -#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ - __kc__vlan_hwaccel_put_tag(skb, vlan_tci) -#endif - -#ifdef HAVE_FDB_OPS -#ifdef USE_CONST_DEV_UC_CHAR -extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 flags); -#ifdef HAVE_FDB_DEL_NLATTR -extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr); -#else -extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, - const unsigned char *addr); -#endif -#else -extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr, u16 flags); -extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr); -#endif -#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add -#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del -#endif /* HAVE_FDB_OPS */ - -#ifndef PCI_DEVID -#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) -#endif - -/* The definitions for these functions when CONFIG_OF_NET is defined are - * pulled in from . For kernels older than 3.5 we already have - * backports for when CONFIG_OF_NET is true. These are separated and - * duplicated in order to cover all cases so that all kernels get either the - * real definitions (when CONFIG_OF_NET is defined) or the stub definitions - * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real - * definitions). - */ -#ifndef CONFIG_OF_NET -static inline int of_get_phy_mode(struct device_node __always_unused *np) -{ - return -ENODEV; -} - -static inline const void * -of_get_mac_address(struct device_node __always_unused *np) -{ - return NULL; -} -#endif - -#else /* >= 3.10.0 */ -#define HAVE_ENCAP_TSO_OFFLOAD -#define USE_DEFAULT_FDB_DEL_DUMP -#define HAVE_SKB_INNER_NETWORK_HEADER -#if (RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0))) -#define HAVE_RHEL7_PCI_DRIVER_RH -#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) -#define HAVE_RHEL7_PCI_RESET_NOTIFY -#endif /* RHEL >= 7.2 */ -#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) -#define HAVE_RHEL7_NET_DEVICE_OPS_EXT -#define HAVE_GENEVE_RX_OFFLOAD -#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) -#define HAVE_UDP_ENC_TUNNEL -#endif -#ifdef ETHTOOL_GLINKSETTINGS -#define HAVE_ETHTOOL_25G_BITS -#endif /* ETHTOOL_GLINKSETTINGS */ -#endif /* RHEL >= 7.3 */ - -/* new hooks added to net_device_ops_extended in RHEL7.4 */ -#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) -#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN -#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL -#define HAVE_UDP_ENC_RX_OFFLOAD -#endif /* RHEL >= 7.4 */ - -#endif /* RHEL >= 7.0 && RHEL < 8.0 */ -#endif /* >= 3.10.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) -#define netdev_notifier_info_to_dev(ptr) ptr -#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ - (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) -#define HAVE_NDO_SET_VF_LINK_STATE -#endif -#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) -#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK -#endif -#else /* >= 3.11.0 */ -#define HAVE_NDO_SET_VF_LINK_STATE -#define HAVE_SKB_INNER_PROTOCOL -#define HAVE_MPLS_FEATURES -#endif /* >= 3.11.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) -extern int __kc_pcie_get_minimum_link(struct pci_dev *dev, - enum pci_bus_speed *speed, - enum pcie_link_width *width); -#ifndef pcie_get_minimum_link -#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) -#endif -#else /* >= 3.12.0 */ -#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) -#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK -#endif -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) -#define HAVE_VXLAN_RX_OFFLOAD -#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) -#define HAVE_UDP_ENC_TUNNEL -#endif -#endif /* < 4.8.0 */ -#define HAVE_NDO_GET_PHYS_PORT_ID -#endif /* >= 3.12.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) -#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) -extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); -#ifndef u64_stats_init -#define u64_stats_init(a) do { } while(0) -#endif -#ifndef BIT_ULL -#define BIT_ULL(n) (1ULL << (n)) -#endif - -#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) -#undef HAVE_STRUCT_PAGE_PFMEMALLOC -#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT -#endif -#ifndef list_next_entry -#define list_next_entry(pos, member) \ - list_entry((pos)->member.next, typeof(*(pos)), member) -#endif -#ifndef list_prev_entry -#define list_prev_entry(pos, member) \ - list_entry((pos)->member.prev, typeof(*(pos)), member) -#endif - -#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) -#define devm_kcalloc(dev, cnt, size, flags) \ - devm_kzalloc(dev, cnt * size, flags) -#endif /* > 2.6.20 */ - -#else /* >= 3.13.0 */ -#define HAVE_VXLAN_CHECKS -#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) -#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK -#else -#define HAVE_NDO_SELECT_QUEUE_ACCEL -#endif -#define HAVE_NET_GET_RANDOM_ONCE -#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS -#endif - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) - -#ifndef U16_MAX -#define U16_MAX ((u16)~0U) -#endif - -#ifndef U32_MAX -#define U32_MAX ((u32)~0U) -#endif - -#define dev_consume_skb_any(x) dev_kfree_skb_any(x) - -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ - !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) - -/* it isn't expected that this would be a #define unless we made it so */ -#ifndef skb_set_hash - -#define PKT_HASH_TYPE_NONE 0 -#define PKT_HASH_TYPE_L2 1 -#define PKT_HASH_TYPE_L3 2 -#define PKT_HASH_TYPE_L4 3 - -enum _kc_pkt_hash_types { - _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, - _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, - _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, - _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, -}; -#define pkt_hash_types _kc_pkt_hash_types - -#define skb_set_hash __kc_skb_set_hash -static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, - u32 __maybe_unused hash, - int __maybe_unused type) -{ -#ifdef HAVE_SKB_L4_RXHASH - skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); -#endif -#ifdef NETIF_F_RXHASH - skb->rxhash = hash; -#endif -} -#endif /* !skb_set_hash */ - -#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ - -#ifndef HAVE_VXLAN_RX_OFFLOAD -#define HAVE_VXLAN_RX_OFFLOAD -#endif /* HAVE_VXLAN_RX_OFFLOAD */ - -#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) -#define HAVE_UDP_ENC_TUNNEL -#endif - -#ifndef HAVE_VXLAN_CHECKS -#define HAVE_VXLAN_CHECKS -#endif /* HAVE_VXLAN_CHECKS */ -#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ - -#ifndef pci_enable_msix_range -extern int __kc_pci_enable_msix_range(struct pci_dev *dev, - struct msix_entry *entries, - int minvec, int maxvec); -#define pci_enable_msix_range __kc_pci_enable_msix_range -#endif - -#ifndef ether_addr_copy -#define ether_addr_copy __kc_ether_addr_copy -static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) -{ -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) - *(u32 *)dst = *(const u32 *)src; - *(u16 *)(dst + 4) = *(const u16 *)(src + 4); -#else - u16 *a = (u16 *)dst; - const u16 *b = (const u16 *)src; - - a[0] = b[0]; - a[1] = b[1]; - a[2] = b[2]; -#endif -} -#endif /* ether_addr_copy */ - -#else /* >= 3.14.0 */ - -/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ -#ifndef HAVE_NDO_DFWD_OPS -#define HAVE_NDO_DFWD_OPS -#endif -#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK -#endif /* 3.14.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) - -#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ - !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) -#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh -#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh -#endif - -char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); -#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) - -#else -#define HAVE_PTP_1588_CLOCK_PINS -#define HAVE_NETDEV_PORT -#endif /* 3.15.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) -#ifndef smp_mb__before_atomic -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() -#endif -#ifndef __dev_uc_sync -#ifdef HAVE_SET_RX_MODE -#ifdef NETDEV_HW_ADDR_T_UNICAST -int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, - struct net_device *dev, - int (*sync)(struct net_device *, const unsigned char *), - int (*unsync)(struct net_device *, const unsigned char *)); -void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, - struct net_device *dev, - int (*unsync)(struct net_device *, const unsigned char *)); -#endif -#ifndef NETDEV_HW_ADDR_T_MULTICAST -int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, - struct net_device *dev, - int (*sync)(struct net_device *, const unsigned char *), - int (*unsync)(struct net_device *, const unsigned char *)); -void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, - struct net_device *dev, - int (*unsync)(struct net_device *, const unsigned char *)); -#endif -#endif /* HAVE_SET_RX_MODE */ - -static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, - int __maybe_unused (*sync)(struct net_device *, const unsigned char *), - int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) -{ -#ifdef NETDEV_HW_ADDR_T_UNICAST - return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); -#elif defined(HAVE_SET_RX_MODE) - return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, - dev, sync, unsync); -#else - return 0; -#endif -} -#define __dev_uc_sync __kc_dev_uc_sync - -static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, - int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) -{ -#ifdef HAVE_SET_RX_MODE -#ifdef NETDEV_HW_ADDR_T_UNICAST - __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); -#else /* NETDEV_HW_ADDR_T_MULTICAST */ - __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); -#endif /* NETDEV_HW_ADDR_T_UNICAST */ -#endif /* HAVE_SET_RX_MODE */ -} -#define __dev_uc_unsync __kc_dev_uc_unsync - -static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, - int __maybe_unused (*sync)(struct net_device *, const unsigned char *), - int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) -{ -#ifdef NETDEV_HW_ADDR_T_MULTICAST - return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); -#elif defined(HAVE_SET_RX_MODE) - return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, - dev, sync, unsync); -#else - return 0; -#endif - -} -#define __dev_mc_sync __kc_dev_mc_sync - -static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, - int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) -{ -#ifdef HAVE_SET_RX_MODE -#ifdef NETDEV_HW_ADDR_T_MULTICAST - __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); -#else /* NETDEV_HW_ADDR_T_MULTICAST */ - __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); -#endif /* NETDEV_HW_ADDR_T_MULTICAST */ -#endif /* HAVE_SET_RX_MODE */ -} -#define __dev_mc_unsync __kc_dev_mc_unsync -#endif /* __dev_uc_sync */ - -#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) -#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -#endif - -#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM -/* if someone backports this, hopefully they backport as a #define. - * declare it as zero on older kernels so that if it get's or'd in - * it won't effect anything, therefore preventing core driver changes - */ -#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 -#define SKB_GSO_UDP_TUNNEL_CSUM 0 -#endif -extern void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, - unsigned int gfp); -#define devm_kmemdup __kc_devm_kmemdup - -#else -#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY -#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -#endif /* 3.16.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) -#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ - RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ - !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) -#ifndef timespec64 -#define timespec64 timespec -static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) -{ - return ts; -} -static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) -{ - return ts64; -} -#define timespec64_equal timespec_equal -#define timespec64_compare timespec_compare -#define set_normalized_timespec64 set_normalized_timespec -#define timespec64_add_safe timespec_add_safe -#define timespec64_add timespec_add -#define timespec64_sub timespec_sub -#define timespec64_valid timespec_valid -#define timespec64_valid_strict timespec_valid_strict -#define timespec64_to_ns timespec_to_ns -#define ns_to_timespec64 ns_to_timespec -#define ktime_to_timespec64 ktime_to_timespec -#define timespec64_add_ns timespec_add_ns -#endif /* timespec64 */ -#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(7,4)) -#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) -#endif - -#else -#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT -#include -#endif /* 3.17.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) -#ifndef NO_PTP_SUPPORT -#include -extern struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); -extern void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, - struct skb_shared_hwtstamps *hwtstamps); -#define skb_clone_sk __kc_skb_clone_sk -#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp -#endif -extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); -#define eth_get_headlen __kc_eth_get_headlen -#ifndef ETH_P_XDSA -#define ETH_P_XDSA 0x00F8 -#endif -/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ -#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) -#define HAVE_SKBUFF_CSUM_LEVEL -#endif /* >= RH 7.1 */ - -#undef GENMASK -#define GENMASK(h, l) \ - (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) -#undef GENMASK_ULL -#define GENMASK_ULL(h, l) \ - (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) - -#else /* 3.18.0 */ -#define HAVE_SKBUFF_CSUM_LEVEL -#define HAVE_SKB_XMIT_MORE -#define HAVE_SKB_INNER_PROTOCOL_TYPE -#endif /* 3.18.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) -#else -#define HAVE_NDO_FEATURES_CHECK -#endif /* 3.18.4 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) -/* netdev_phys_port_id renamed to netdev_phys_item_id */ -#define netdev_phys_item_id netdev_phys_port_id - -static inline void _kc_napi_complete_done(struct napi_struct *napi, - int __always_unused work_done) { - napi_complete(napi); -} -#define napi_complete_done _kc_napi_complete_done - -#ifndef NETDEV_RSS_KEY_LEN -#define NETDEV_RSS_KEY_LEN (13 * 4) -#endif -#if ( !(RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ) -#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) -#endif /* RHEL_RELEASE_CODE */ -extern void __kc_netdev_rss_key_fill(void *buffer, size_t len); -#define SPEED_20000 20000 -#define SPEED_40000 40000 -#ifndef dma_rmb -#define dma_rmb() rmb() -#endif -#ifndef dev_alloc_pages -#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) -#endif -#ifndef dev_alloc_page -#define dev_alloc_page() dev_alloc_pages(0) -#endif -#if !defined(eth_skb_pad) && !defined(skb_put_padto) -/** - * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size - * @skb: buffer to pad - * @len: minimal length - * - * Pads up a buffer to ensure the trailing bytes exist and are - * blanked. If the buffer already contains sufficient data it - * is untouched. Otherwise it is extended. Returns zero on - * success. The skb is freed on error. - */ -static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) -{ - unsigned int size = skb->len; - - if (unlikely(size < len)) { - len -= size; - if (skb_pad(skb, len)) - return -ENOMEM; - __skb_put(skb, len); - } - return 0; -} -#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) - -static inline int __kc_eth_skb_pad(struct sk_buff *skb) -{ - return __kc_skb_put_padto(skb, ETH_ZLEN); -} -#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) -#endif /* eth_skb_pad && skb_put_padto */ - -#ifndef SKB_ALLOC_NAPI -/* RHEL 7.2 backported napi_alloc_skb and friends */ -static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) -{ - return netdev_alloc_skb_ip_align(napi->dev, length); -} -#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) -#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) -#endif /* SKB_ALLOC_NAPI */ -#define HAVE_CONFIG_PM_RUNTIME -#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ - (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) -#define HAVE_RXFH_HASHFUNC -#endif /* 6.7 < RHEL < 7.0 */ -#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) -#define HAVE_RXFH_HASHFUNC -#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS -#endif /* RHEL > 7.1 */ -#ifndef napi_schedule_irqoff -#define napi_schedule_irqoff napi_schedule -#endif -#ifndef READ_ONCE -#define READ_ONCE(_x) ACCESS_ONCE(_x) -#endif -#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) -#define HAVE_NDO_FDB_ADD_VID -#endif -#ifndef ETH_MODULE_SFF_8636 -#define ETH_MODULE_SFF_8636 0x3 -#endif -#ifndef ETH_MODULE_SFF_8636_LEN -#define ETH_MODULE_SFF_8636_LEN 256 -#endif -#ifndef ETH_MODULE_SFF_8436 -#define ETH_MODULE_SFF_8436 0x4 -#endif -#ifndef ETH_MODULE_SFF_8436_LEN -#define ETH_MODULE_SFF_8436_LEN 256 -#endif -#else /* 3.19.0 */ -#define HAVE_NDO_FDB_ADD_VID -#define HAVE_RXFH_HASHFUNC -#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS -#endif /* 3.19.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) -/* vlan_tx_xx functions got renamed to skb_vlan */ -#ifndef skb_vlan_tag_get -#define skb_vlan_tag_get vlan_tx_tag_get -#endif -#ifndef skb_vlan_tag_present -#define skb_vlan_tag_present vlan_tx_tag_present -#endif -#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) -#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H -#endif -#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) -#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS -#endif -#else -#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H -#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS -#endif /* 3.20.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) -/* Definition for CONFIG_OF was introduced earlier */ -#if !defined(CONFIG_OF) && \ - !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) -static inline struct device_node * -pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } -#endif /* !CONFIG_OF && RHEL < 7.3 */ -#endif /* < 4.0 */ - -/*****************************************************************************/ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) -#ifndef NO_PTP_SUPPORT -#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H -#include -#else -#include -#endif -static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) -{ - tc->nsec += delta; -} - -static inline struct net_device * -of_find_net_device_by_node(struct device_node __always_unused *np) -{ - return NULL; -} - -#define timecounter_adjtime __kc_timecounter_adjtime -#endif -#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ - (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) -#define HAVE_NDO_SET_VF_RSS_QUERY_EN -#endif -#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) -#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS -#endif -#else /* >= 4,1,0 */ -#define HAVE_PTP_CLOCK_INFO_GETTIME64 -#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS -#define HAVE_PASSTHRU_FEATURES_CHECK -#define HAVE_NDO_SET_VF_RSS_QUERY_EN -#endif /* 4,1,0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) -#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ - !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ - (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ - !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ - (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ - !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) -static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) -{ -#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC - return page->pfmemalloc; -#else - return false; -#endif -} -#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ -#else -#undef HAVE_STRUCT_PAGE_PFMEMALLOC -#endif /* 4.1.9 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) -#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ - !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) -#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL -#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL -#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 -static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) -{ - return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; -}; - -static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) -{ - return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> - ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; -}; -#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ -#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) -#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT -#endif -#else -#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT -#endif /* 4.2.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) -#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) -#define HAVE_NDO_SET_VF_TRUST -#endif /* (RHEL_RELEASE >= 7.3) */ -#ifndef CONFIG_64BIT -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) -#include /* 32-bit readq/writeq */ -#else /* 3.3.0 => 4.3.x */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) -#include -#endif /* 2.6.26 => 3.3.0 */ -#ifndef readq -static inline __u64 readq(const volatile void __iomem *addr) -{ - const volatile u32 __iomem *p = addr; - u32 low, high; - - low = readl(p); - high = readl(p + 1); - - return low + ((u64)high << 32); -} -#define readq readq -#endif - -#ifndef writeq -static inline void writeq(__u64 val, volatile void __iomem *addr) -{ - writel(val, addr); - writel(val >> 32, addr + 4); -} -#define writeq writeq -#endif -#endif /* < 3.3.0 */ -#endif /* !CONFIG_64BIT */ -#else /* < 4.4.0 */ -#define HAVE_NDO_SET_VF_TRUST - -#ifndef CONFIG_64BIT -#include /* 32-bit readq/writeq */ -#endif /* !CONFIG_64BIT */ -#endif /* 4.4.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) -/* protect against a likely backport */ -#ifndef NETIF_F_CSUM_MASK -#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM -#endif /* NETIF_F_CSUM_MASK */ -#ifndef NETIF_F_SCTP_CRC -#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM -#endif /* NETIF_F_SCTP_CRC */ -#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) -#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address -extern int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, - u8 *mac_addr __maybe_unused); -#endif /* !(RHEL_RELEASE >= 7.3) */ -#else /* 4.5.0 */ -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) -#define HAVE_GENEVE_RX_OFFLOAD -#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) -#define HAVE_UDP_ENC_TUNNEL -#endif -#endif /* < 4.8.0 */ -#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD -#endif /* 4.5.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) -#if !(UBUNTU_VERSION_CODE && \ - UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ - !(RHEL_RELEASE_CODE && \ - (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ - !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) -static inline void napi_consume_skb(struct sk_buff *skb, - int __always_unused budget) -{ - dev_consume_skb_any(skb); -} - -#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ -#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ - !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) -static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) -{ - * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); -} -#endif - -#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) -static inline void page_ref_inc(struct page *page) -{ - get_page(page); -} -#else -#define HAVE_PAGE_COUNT_BULK_UPDATE -#endif - -#else /* 4.6.0 */ -#define HAVE_PAGE_COUNT_BULK_UPDATE -#endif /* 4.6.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) -#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) ||\ - (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) -#define HAVE_NETIF_TRANS_UPDATE -#endif -#else /* 4.7.0 */ -#define HAVE_NETIF_TRANS_UPDATE -#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE -#ifdef ETHTOOL_GLINKSETTINGS -#define HAVE_ETHTOOL_25G_BITS -#endif /* ETHTOOL_GLINKSETTINGS */ -#endif /* 4.7.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) -#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) -enum udp_parsable_tunnel_type { - UDP_TUNNEL_TYPE_VXLAN, - UDP_TUNNEL_TYPE_GENEVE, -}; -struct udp_tunnel_info { - unsigned short type; - sa_family_t sa_family; - __be16 port; -}; -#endif - -#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ - !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) -static inline int -#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME -pci_request_io_regions(struct pci_dev *pdev, char *name) -#else -pci_request_io_regions(struct pci_dev *pdev, const char *name) -#endif -{ - return pci_request_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_IO), name); -} - -static inline void -pci_release_io_regions(struct pci_dev *pdev) -{ - return pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_IO)); -} - -static inline int -#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME -pci_request_mem_regions(struct pci_dev *pdev, char *name) -#else -pci_request_mem_regions(struct pci_dev *pdev, const char *name) -#endif -{ - return pci_request_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), name); -} - -static inline void -pci_release_mem_regions(struct pci_dev *pdev) -{ - return pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); -} -#endif /* !SLE_VERSION(12,3,0) */ -#else -#define HAVE_UDP_ENC_RX_OFFLOAD -#endif /* 4.8.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) -#else -#endif /* 4.9.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) -#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) -#define HAVE_STRUCT_DMA_ATTRS -#define HAVE_NETDEVICE_MIN_MAX_MTU -#endif - -#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) -#ifndef dma_map_page_attrs -#define dma_map_page_attrs __kc_dma_map_page_attrs -static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, - struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir, - unsigned long __always_unused attrs) -{ - return dma_map_page(dev, page, offset, size, dir); -} -#endif - -#ifndef dma_unmap_page_attrs -#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs -static inline void __kc_dma_unmap_page_attrs(struct device *dev, - dma_addr_t addr, size_t size, - enum dma_data_direction dir, - unsigned long __always_unused attrs) -{ - dma_unmap_page(dev, addr, size, dir); -} -#endif - -static inline void __page_frag_cache_drain(struct page *page, - unsigned int count) -{ -#ifdef HAVE_PAGE_COUNT_BULK_UPDATE - if (!page_ref_sub_and_test(page, count)) - return; - - init_page_count(page); -#else - BUG_ON(count > 1); - if (!count) - return; -#endif - __free_pages(page, compound_order(page)); -} -#endif /* !SLE_VERSION(12,3,0) */ -#ifndef ETH_MIN_MTU -#define ETH_MIN_MTU 68 -#endif /* ETH_MIN_MTU */ -#else -#define HAVE_NETDEVICE_MIN_MAX_MTU -#define HAVE_SWIOTLB_SKIP_CPU_SYNC -#define HAVE_NETDEV_TC_RESETS_XPS -#endif /* 4.10.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) -#ifdef CONFIG_NET_RX_BUSY_POLL -#define HAVE_NDO_BUSY_POLL -#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) -#define HAVE_VOID_NDO_GET_STATS64 -#endif -#endif -#else /* > 4.11 */ -#define HAVE_VOID_NDO_GET_STATS64 -#endif /* 4.11.0 */ - -/*****************************************************************************/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) -#else /* > 4.13 */ -#define HAVE_HWTSTAMP_FILTER_NTP_ALL -#endif /* 4.13.0 */ - -#endif /* _KCOMPAT_H_ */ diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat_ethtool.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat_ethtool.c deleted file mode 100644 index 16fbd7475720..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/kcompat_ethtool.c +++ /dev/null @@ -1,1169 +0,0 @@ -/******************************************************************************* - - Intel(R) 10GbE PCI Express Linux Network Driver - Copyright(c) 1999 - 2017 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* - * net/core/ethtool.c - Ethtool ioctl handler - * Copyright (c) 2003 Matthew Wilcox - * - * This file is where we call all the ethtool_ops commands to get - * the information ethtool needs. We fall back to calling do_ioctl() - * for drivers which haven't been converted to ethtool_ops yet. - * - * It's GPL, stupid. - * - * Modification by sfeldma@pobox.com to work as backward compat - * solution for pre-ethtool_ops kernels. - * - copied struct ethtool_ops from ethtool.h - * - defined SET_ETHTOOL_OPS - * - put in some #ifndef NETIF_F_xxx wrappers - * - changes refs to dev->ethtool_ops to ethtool_ops - * - changed dev_ethtool to ethtool_ioctl - * - remove EXPORT_SYMBOL()s - * - added _kc_ prefix in built-in ethtool_op_xxx ops. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "kcompat.h" - -#undef SUPPORTED_10000baseT_Full -#define SUPPORTED_10000baseT_Full (1 << 12) -#undef ADVERTISED_10000baseT_Full -#define ADVERTISED_10000baseT_Full (1 << 12) -#undef SPEED_10000 -#define SPEED_10000 10000 - -#undef ethtool_ops -#define ethtool_ops _kc_ethtool_ops - -struct _kc_ethtool_ops { - int (*get_settings)(struct net_device *, struct ethtool_cmd *); - int (*set_settings)(struct net_device *, struct ethtool_cmd *); - void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); - int (*get_regs_len)(struct net_device *); - void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); - void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); - int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); - u32 (*get_msglevel)(struct net_device *); - void (*set_msglevel)(struct net_device *, u32); - int (*nway_reset)(struct net_device *); - u32 (*get_link)(struct net_device *); - int (*get_eeprom_len)(struct net_device *); - int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); - int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); - int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); - int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); - void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); - int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); - void (*get_pauseparam)(struct net_device *, - struct ethtool_pauseparam*); - int (*set_pauseparam)(struct net_device *, - struct ethtool_pauseparam*); - u32 (*get_rx_csum)(struct net_device *); - int (*set_rx_csum)(struct net_device *, u32); - u32 (*get_tx_csum)(struct net_device *); - int (*set_tx_csum)(struct net_device *, u32); - u32 (*get_sg)(struct net_device *); - int (*set_sg)(struct net_device *, u32); - u32 (*get_tso)(struct net_device *); - int (*set_tso)(struct net_device *, u32); - int (*self_test_count)(struct net_device *); - void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); - void (*get_strings)(struct net_device *, u32 stringset, u8 *); - int (*phys_id)(struct net_device *, u32); - int (*get_stats_count)(struct net_device *); - void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, - u64 *); -} *ethtool_ops = NULL; - -#undef SET_ETHTOOL_OPS -#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) - -/* - * Some useful ethtool_ops methods that are device independent. If we find that - * all drivers want to do the same thing here, we can turn these into dev_() - * function calls. - */ - -#undef ethtool_op_get_link -#define ethtool_op_get_link _kc_ethtool_op_get_link -u32 _kc_ethtool_op_get_link(struct net_device *dev) -{ - return netif_carrier_ok(dev) ? 1 : 0; -} - -#undef ethtool_op_get_tx_csum -#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum -u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) -{ -#ifdef NETIF_F_IP_CSUM - return (dev->features & NETIF_F_IP_CSUM) != 0; -#else - return 0; -#endif -} - -#undef ethtool_op_set_tx_csum -#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum -int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) -{ -#ifdef NETIF_F_IP_CSUM - if (data) -#ifdef NETIF_F_IPV6_CSUM - dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - else - dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); -#else - dev->features |= NETIF_F_IP_CSUM; - else - dev->features &= ~NETIF_F_IP_CSUM; -#endif -#endif - - return 0; -} - -#undef ethtool_op_get_sg -#define ethtool_op_get_sg _kc_ethtool_op_get_sg -u32 _kc_ethtool_op_get_sg(struct net_device *dev) -{ -#ifdef NETIF_F_SG - return (dev->features & NETIF_F_SG) != 0; -#else - return 0; -#endif -} - -#undef ethtool_op_set_sg -#define ethtool_op_set_sg _kc_ethtool_op_set_sg -int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) -{ -#ifdef NETIF_F_SG - if (data) - dev->features |= NETIF_F_SG; - else - dev->features &= ~NETIF_F_SG; -#endif - - return 0; -} - -#undef ethtool_op_get_tso -#define ethtool_op_get_tso _kc_ethtool_op_get_tso -u32 _kc_ethtool_op_get_tso(struct net_device *dev) -{ -#ifdef NETIF_F_TSO - return (dev->features & NETIF_F_TSO) != 0; -#else - return 0; -#endif -} - -#undef ethtool_op_set_tso -#define ethtool_op_set_tso _kc_ethtool_op_set_tso -int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) -{ -#ifdef NETIF_F_TSO - if (data) - dev->features |= NETIF_F_TSO; - else - dev->features &= ~NETIF_F_TSO; -#endif - - return 0; -} - -/* Handlers for each ethtool command */ - -static int ethtool_get_settings(struct net_device *dev, void *useraddr) -{ - struct ethtool_cmd cmd = { ETHTOOL_GSET }; - int err; - - if (!ethtool_ops->get_settings) - return -EOPNOTSUPP; - - err = ethtool_ops->get_settings(dev, &cmd); - if (err < 0) - return err; - - if (copy_to_user(useraddr, &cmd, sizeof(cmd))) - return -EFAULT; - return 0; -} - -static int ethtool_set_settings(struct net_device *dev, void *useraddr) -{ - struct ethtool_cmd cmd; - - if (!ethtool_ops->set_settings) - return -EOPNOTSUPP; - - if (copy_from_user(&cmd, useraddr, sizeof(cmd))) - return -EFAULT; - - return ethtool_ops->set_settings(dev, &cmd); -} - -static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) -{ - struct ethtool_drvinfo info; - struct ethtool_ops *ops = ethtool_ops; - - if (!ops->get_drvinfo) - return -EOPNOTSUPP; - - memset(&info, 0, sizeof(info)); - info.cmd = ETHTOOL_GDRVINFO; - ops->get_drvinfo(dev, &info); - - if (ops->self_test_count) - info.testinfo_len = ops->self_test_count(dev); - if (ops->get_stats_count) - info.n_stats = ops->get_stats_count(dev); - if (ops->get_regs_len) - info.regdump_len = ops->get_regs_len(dev); - if (ops->get_eeprom_len) - info.eedump_len = ops->get_eeprom_len(dev); - - if (copy_to_user(useraddr, &info, sizeof(info))) - return -EFAULT; - return 0; -} - -static int ethtool_get_regs(struct net_device *dev, char *useraddr) -{ - struct ethtool_regs regs; - struct ethtool_ops *ops = ethtool_ops; - void *regbuf; - int reglen, ret; - - if (!ops->get_regs || !ops->get_regs_len) - return -EOPNOTSUPP; - - if (copy_from_user(®s, useraddr, sizeof(regs))) - return -EFAULT; - - reglen = ops->get_regs_len(dev); - if (regs.len > reglen) - regs.len = reglen; - - regbuf = kmalloc(reglen, GFP_USER); - if (!regbuf) - return -ENOMEM; - - ops->get_regs(dev, ®s, regbuf); - - ret = -EFAULT; - if (copy_to_user(useraddr, ®s, sizeof(regs))) - goto out; - useraddr += offsetof(struct ethtool_regs, data); - if (copy_to_user(useraddr, regbuf, reglen)) - goto out; - ret = 0; - -out: - kfree(regbuf); - return ret; -} - -static int ethtool_get_wol(struct net_device *dev, char *useraddr) -{ - struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; - - if (!ethtool_ops->get_wol) - return -EOPNOTSUPP; - - ethtool_ops->get_wol(dev, &wol); - - if (copy_to_user(useraddr, &wol, sizeof(wol))) - return -EFAULT; - return 0; -} - -static int ethtool_set_wol(struct net_device *dev, char *useraddr) -{ - struct ethtool_wolinfo wol; - - if (!ethtool_ops->set_wol) - return -EOPNOTSUPP; - - if (copy_from_user(&wol, useraddr, sizeof(wol))) - return -EFAULT; - - return ethtool_ops->set_wol(dev, &wol); -} - -static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GMSGLVL }; - - if (!ethtool_ops->get_msglevel) - return -EOPNOTSUPP; - - edata.data = ethtool_ops->get_msglevel(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - -static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata; - - if (!ethtool_ops->set_msglevel) - return -EOPNOTSUPP; - - if (copy_from_user(&edata, useraddr, sizeof(edata))) - return -EFAULT; - - ethtool_ops->set_msglevel(dev, edata.data); - return 0; -} - -static int ethtool_nway_reset(struct net_device *dev) -{ - if (!ethtool_ops->nway_reset) - return -EOPNOTSUPP; - - return ethtool_ops->nway_reset(dev); -} - -static int ethtool_get_link(struct net_device *dev, void *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GLINK }; - - if (!ethtool_ops->get_link) - return -EOPNOTSUPP; - - edata.data = ethtool_ops->get_link(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - -static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) -{ - struct ethtool_eeprom eeprom; - struct ethtool_ops *ops = ethtool_ops; - u8 *data; - int ret; - - if (!ops->get_eeprom || !ops->get_eeprom_len) - return -EOPNOTSUPP; - - if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) - return -EFAULT; - - /* Check for wrap and zero */ - if (eeprom.offset + eeprom.len <= eeprom.offset) - return -EINVAL; - - /* Check for exceeding total eeprom len */ - if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) - return -EINVAL; - - data = kmalloc(eeprom.len, GFP_USER); - if (!data) - return -ENOMEM; - - ret = -EFAULT; - if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) - goto out; - - ret = ops->get_eeprom(dev, &eeprom, data); - if (ret) - goto out; - - ret = -EFAULT; - if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) - goto out; - if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) - goto out; - ret = 0; - -out: - kfree(data); - return ret; -} - -static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) -{ - struct ethtool_eeprom eeprom; - struct ethtool_ops *ops = ethtool_ops; - u8 *data; - int ret; - - if (!ops->set_eeprom || !ops->get_eeprom_len) - return -EOPNOTSUPP; - - if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) - return -EFAULT; - - /* Check for wrap and zero */ - if (eeprom.offset + eeprom.len <= eeprom.offset) - return -EINVAL; - - /* Check for exceeding total eeprom len */ - if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) - return -EINVAL; - - data = kmalloc(eeprom.len, GFP_USER); - if (!data) - return -ENOMEM; - - ret = -EFAULT; - if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) - goto out; - - ret = ops->set_eeprom(dev, &eeprom, data); - if (ret) - goto out; - - if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) - ret = -EFAULT; - -out: - kfree(data); - return ret; -} - -static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) -{ - struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; - - if (!ethtool_ops->get_coalesce) - return -EOPNOTSUPP; - - ethtool_ops->get_coalesce(dev, &coalesce); - - if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) - return -EFAULT; - return 0; -} - -static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) -{ - struct ethtool_coalesce coalesce; - - if (!ethtool_ops->get_coalesce) - return -EOPNOTSUPP; - - if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) - return -EFAULT; - - return ethtool_ops->set_coalesce(dev, &coalesce); -} - -static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) -{ - struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; - - if (!ethtool_ops->get_ringparam) - return -EOPNOTSUPP; - - ethtool_ops->get_ringparam(dev, &ringparam); - - if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) - return -EFAULT; - return 0; -} - -static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) -{ - struct ethtool_ringparam ringparam; - - if (!ethtool_ops->get_ringparam) - return -EOPNOTSUPP; - - if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) - return -EFAULT; - - return ethtool_ops->set_ringparam(dev, &ringparam); -} - -static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) -{ - struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; - - if (!ethtool_ops->get_pauseparam) - return -EOPNOTSUPP; - - ethtool_ops->get_pauseparam(dev, &pauseparam); - - if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) - return -EFAULT; - return 0; -} - -static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) -{ - struct ethtool_pauseparam pauseparam; - - if (!ethtool_ops->get_pauseparam) - return -EOPNOTSUPP; - - if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) - return -EFAULT; - - return ethtool_ops->set_pauseparam(dev, &pauseparam); -} - -static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GRXCSUM }; - - if (!ethtool_ops->get_rx_csum) - return -EOPNOTSUPP; - - edata.data = ethtool_ops->get_rx_csum(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - -static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata; - - if (!ethtool_ops->set_rx_csum) - return -EOPNOTSUPP; - - if (copy_from_user(&edata, useraddr, sizeof(edata))) - return -EFAULT; - - ethtool_ops->set_rx_csum(dev, edata.data); - return 0; -} - -static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GTXCSUM }; - - if (!ethtool_ops->get_tx_csum) - return -EOPNOTSUPP; - - edata.data = ethtool_ops->get_tx_csum(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - -static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata; - - if (!ethtool_ops->set_tx_csum) - return -EOPNOTSUPP; - - if (copy_from_user(&edata, useraddr, sizeof(edata))) - return -EFAULT; - - return ethtool_ops->set_tx_csum(dev, edata.data); -} - -static int ethtool_get_sg(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GSG }; - - if (!ethtool_ops->get_sg) - return -EOPNOTSUPP; - - edata.data = ethtool_ops->get_sg(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - -static int ethtool_set_sg(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata; - - if (!ethtool_ops->set_sg) - return -EOPNOTSUPP; - - if (copy_from_user(&edata, useraddr, sizeof(edata))) - return -EFAULT; - - return ethtool_ops->set_sg(dev, edata.data); -} - -static int ethtool_get_tso(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GTSO }; - - if (!ethtool_ops->get_tso) - return -EOPNOTSUPP; - - edata.data = ethtool_ops->get_tso(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - -static int ethtool_set_tso(struct net_device *dev, char *useraddr) -{ - struct ethtool_value edata; - - if (!ethtool_ops->set_tso) - return -EOPNOTSUPP; - - if (copy_from_user(&edata, useraddr, sizeof(edata))) - return -EFAULT; - - return ethtool_ops->set_tso(dev, edata.data); -} - -static int ethtool_self_test(struct net_device *dev, char *useraddr) -{ - struct ethtool_test test; - struct ethtool_ops *ops = ethtool_ops; - u64 *data; - int ret; - - if (!ops->self_test || !ops->self_test_count) - return -EOPNOTSUPP; - - if (copy_from_user(&test, useraddr, sizeof(test))) - return -EFAULT; - - test.len = ops->self_test_count(dev); - data = kmalloc(test.len * sizeof(u64), GFP_USER); - if (!data) - return -ENOMEM; - - ops->self_test(dev, &test, data); - - ret = -EFAULT; - if (copy_to_user(useraddr, &test, sizeof(test))) - goto out; - useraddr += sizeof(test); - if (copy_to_user(useraddr, data, test.len * sizeof(u64))) - goto out; - ret = 0; - -out: - kfree(data); - return ret; -} - -static int ethtool_get_strings(struct net_device *dev, void *useraddr) -{ - struct ethtool_gstrings gstrings; - struct ethtool_ops *ops = ethtool_ops; - u8 *data; - int ret; - - if (!ops->get_strings) - return -EOPNOTSUPP; - - if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) - return -EFAULT; - - switch (gstrings.string_set) { - case ETH_SS_TEST: - if (!ops->self_test_count) - return -EOPNOTSUPP; - gstrings.len = ops->self_test_count(dev); - break; - case ETH_SS_STATS: - if (!ops->get_stats_count) - return -EOPNOTSUPP; - gstrings.len = ops->get_stats_count(dev); - break; - default: - return -EINVAL; - } - - data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); - if (!data) - return -ENOMEM; - - ops->get_strings(dev, gstrings.string_set, data); - - ret = -EFAULT; - if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) - goto out; - useraddr += sizeof(gstrings); - if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) - goto out; - ret = 0; - -out: - kfree(data); - return ret; -} - -static int ethtool_phys_id(struct net_device *dev, void *useraddr) -{ - struct ethtool_value id; - - if (!ethtool_ops->phys_id) - return -EOPNOTSUPP; - - if (copy_from_user(&id, useraddr, sizeof(id))) - return -EFAULT; - - return ethtool_ops->phys_id(dev, id.data); -} - -static int ethtool_get_stats(struct net_device *dev, void *useraddr) -{ - struct ethtool_stats stats; - struct ethtool_ops *ops = ethtool_ops; - u64 *data; - int ret; - - if (!ops->get_ethtool_stats || !ops->get_stats_count) - return -EOPNOTSUPP; - - if (copy_from_user(&stats, useraddr, sizeof(stats))) - return -EFAULT; - - stats.n_stats = ops->get_stats_count(dev); - data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); - if (!data) - return -ENOMEM; - - ops->get_ethtool_stats(dev, &stats, data); - - ret = -EFAULT; - if (copy_to_user(useraddr, &stats, sizeof(stats))) - goto out; - useraddr += sizeof(stats); - if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) - goto out; - ret = 0; - -out: - kfree(data); - return ret; -} - -/* The main entry point in this file. Called from net/core/dev.c */ - -#define ETHTOOL_OPS_COMPAT -int ethtool_ioctl(struct ifreq *ifr) -{ - struct net_device *dev = __dev_get_by_name(ifr->ifr_name); - void *useraddr = (void *) ifr->ifr_data; - u32 ethcmd; - - /* - * XXX: This can be pushed down into the ethtool_* handlers that - * need it. Keep existing behavior for the moment. - */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (!dev || !netif_device_present(dev)) - return -ENODEV; - - if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) - return -EFAULT; - - switch (ethcmd) { - case ETHTOOL_GSET: - return ethtool_get_settings(dev, useraddr); - case ETHTOOL_SSET: - return ethtool_set_settings(dev, useraddr); - case ETHTOOL_GDRVINFO: - return ethtool_get_drvinfo(dev, useraddr); - case ETHTOOL_GREGS: - return ethtool_get_regs(dev, useraddr); - case ETHTOOL_GWOL: - return ethtool_get_wol(dev, useraddr); - case ETHTOOL_SWOL: - return ethtool_set_wol(dev, useraddr); - case ETHTOOL_GMSGLVL: - return ethtool_get_msglevel(dev, useraddr); - case ETHTOOL_SMSGLVL: - return ethtool_set_msglevel(dev, useraddr); - case ETHTOOL_NWAY_RST: - return ethtool_nway_reset(dev); - case ETHTOOL_GLINK: - return ethtool_get_link(dev, useraddr); - case ETHTOOL_GEEPROM: - return ethtool_get_eeprom(dev, useraddr); - case ETHTOOL_SEEPROM: - return ethtool_set_eeprom(dev, useraddr); - case ETHTOOL_GCOALESCE: - return ethtool_get_coalesce(dev, useraddr); - case ETHTOOL_SCOALESCE: - return ethtool_set_coalesce(dev, useraddr); - case ETHTOOL_GRINGPARAM: - return ethtool_get_ringparam(dev, useraddr); - case ETHTOOL_SRINGPARAM: - return ethtool_set_ringparam(dev, useraddr); - case ETHTOOL_GPAUSEPARAM: - return ethtool_get_pauseparam(dev, useraddr); - case ETHTOOL_SPAUSEPARAM: - return ethtool_set_pauseparam(dev, useraddr); - case ETHTOOL_GRXCSUM: - return ethtool_get_rx_csum(dev, useraddr); - case ETHTOOL_SRXCSUM: - return ethtool_set_rx_csum(dev, useraddr); - case ETHTOOL_GTXCSUM: - return ethtool_get_tx_csum(dev, useraddr); - case ETHTOOL_STXCSUM: - return ethtool_set_tx_csum(dev, useraddr); - case ETHTOOL_GSG: - return ethtool_get_sg(dev, useraddr); - case ETHTOOL_SSG: - return ethtool_set_sg(dev, useraddr); - case ETHTOOL_GTSO: - return ethtool_get_tso(dev, useraddr); - case ETHTOOL_STSO: - return ethtool_set_tso(dev, useraddr); - case ETHTOOL_TEST: - return ethtool_self_test(dev, useraddr); - case ETHTOOL_GSTRINGS: - return ethtool_get_strings(dev, useraddr); - case ETHTOOL_PHYS_ID: - return ethtool_phys_id(dev, useraddr); - case ETHTOOL_GSTATS: - return ethtool_get_stats(dev, useraddr); - default: - return -EOPNOTSUPP; - } - - return -EOPNOTSUPP; -} - -#define mii_if_info _kc_mii_if_info -struct _kc_mii_if_info { - int phy_id; - int advertising; - int phy_id_mask; - int reg_num_mask; - - unsigned int full_duplex : 1; /* is full duplex? */ - unsigned int force_media : 1; /* is autoneg. disabled? */ - - struct net_device *dev; - int (*mdio_read) (struct net_device *dev, int phy_id, int location); - void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); -}; - -struct ethtool_cmd; -struct mii_ioctl_data; - -#undef mii_link_ok -#define mii_link_ok _kc_mii_link_ok -#undef mii_nway_restart -#define mii_nway_restart _kc_mii_nway_restart -#undef mii_ethtool_gset -#define mii_ethtool_gset _kc_mii_ethtool_gset -#undef mii_ethtool_sset -#define mii_ethtool_sset _kc_mii_ethtool_sset -#undef mii_check_link -#define mii_check_link _kc_mii_check_link -extern int _kc_mii_link_ok (struct mii_if_info *mii); -extern int _kc_mii_nway_restart (struct mii_if_info *mii); -extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, - struct ethtool_cmd *ecmd); -extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, - struct ethtool_cmd *ecmd); -extern void _kc_mii_check_link (struct mii_if_info *mii); -#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) -#undef generic_mii_ioctl -#define generic_mii_ioctl _kc_generic_mii_ioctl -extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, - struct mii_ioctl_data *mii_data, int cmd, - unsigned int *duplex_changed); -#endif /* > 2.4.6 */ - - -struct _kc_pci_dev_ext { - struct pci_dev *dev; - void *pci_drvdata; - struct pci_driver *driver; -}; - -struct _kc_net_dev_ext { - struct net_device *dev; - unsigned int carrier; -}; - - -/**************************************/ -/* mii support */ - -int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) -{ - struct net_device *dev = mii->dev; - u32 advert, bmcr, lpa, nego; - - ecmd->supported = - (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); - - /* only supports twisted-pair */ - ecmd->port = PORT_MII; - - /* only supports internal transceiver */ - ecmd->transceiver = XCVR_INTERNAL; - - /* this isn't fully supported at higher layers */ - ecmd->phy_address = mii->phy_id; - - ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; - advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); - if (advert & ADVERTISE_10HALF) - ecmd->advertising |= ADVERTISED_10baseT_Half; - if (advert & ADVERTISE_10FULL) - ecmd->advertising |= ADVERTISED_10baseT_Full; - if (advert & ADVERTISE_100HALF) - ecmd->advertising |= ADVERTISED_100baseT_Half; - if (advert & ADVERTISE_100FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - - bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); - lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); - if (bmcr & BMCR_ANENABLE) { - ecmd->advertising |= ADVERTISED_Autoneg; - ecmd->autoneg = AUTONEG_ENABLE; - - nego = mii_nway_result(advert & lpa); - if (nego == LPA_100FULL || nego == LPA_100HALF) - ecmd->speed = SPEED_100; - else - ecmd->speed = SPEED_10; - if (nego == LPA_100FULL || nego == LPA_10FULL) { - ecmd->duplex = DUPLEX_FULL; - mii->full_duplex = 1; - } else { - ecmd->duplex = DUPLEX_HALF; - mii->full_duplex = 0; - } - } else { - ecmd->autoneg = AUTONEG_DISABLE; - - ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; - ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; - } - - /* ignore maxtxpkt, maxrxpkt for now */ - - return 0; -} - -int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) -{ - struct net_device *dev = mii->dev; - - if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) - return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) - return -EINVAL; - if (ecmd->port != PORT_MII) - return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL) - return -EINVAL; - if (ecmd->phy_address != mii->phy_id) - return -EINVAL; - if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) - return -EINVAL; - - /* ignore supported, maxtxpkt, maxrxpkt */ - - if (ecmd->autoneg == AUTONEG_ENABLE) { - u32 bmcr, advert, tmp; - - if ((ecmd->advertising & (ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full)) == 0) - return -EINVAL; - - /* advertise only what has been requested */ - advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); - tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); - if (ADVERTISED_10baseT_Half) - tmp |= ADVERTISE_10HALF; - if (ADVERTISED_10baseT_Full) - tmp |= ADVERTISE_10FULL; - if (ADVERTISED_100baseT_Half) - tmp |= ADVERTISE_100HALF; - if (ADVERTISED_100baseT_Full) - tmp |= ADVERTISE_100FULL; - if (advert != tmp) { - mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); - mii->advertising = tmp; - } - - /* turn on autonegotiation, and force a renegotiate */ - bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); - bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); - mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); - - mii->force_media = 0; - } else { - u32 bmcr, tmp; - - /* turn off auto negotiation, set speed and duplexity */ - bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); - tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); - if (ecmd->speed == SPEED_100) - tmp |= BMCR_SPEED100; - if (ecmd->duplex == DUPLEX_FULL) { - tmp |= BMCR_FULLDPLX; - mii->full_duplex = 1; - } else - mii->full_duplex = 0; - if (bmcr != tmp) - mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); - - mii->force_media = 1; - } - return 0; -} - -int _kc_mii_link_ok (struct mii_if_info *mii) -{ - /* first, a dummy read, needed to latch some MII phys */ - mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); - if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) - return 1; - return 0; -} - -int _kc_mii_nway_restart (struct mii_if_info *mii) -{ - int bmcr; - int r = -EINVAL; - - /* if autoneg is off, it's an error */ - bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); - - if (bmcr & BMCR_ANENABLE) { - bmcr |= BMCR_ANRESTART; - mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); - r = 0; - } - - return r; -} - -void _kc_mii_check_link (struct mii_if_info *mii) -{ - int cur_link = mii_link_ok(mii); - int prev_link = netif_carrier_ok(mii->dev); - - if (cur_link && !prev_link) - netif_carrier_on(mii->dev); - else if (prev_link && !cur_link) - netif_carrier_off(mii->dev); -} - -#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) -int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, - struct mii_ioctl_data *mii_data, int cmd, - unsigned int *duplex_chg_out) -{ - int rc = 0; - unsigned int duplex_changed = 0; - - if (duplex_chg_out) - *duplex_chg_out = 0; - - mii_data->phy_id &= mii_if->phy_id_mask; - mii_data->reg_num &= mii_if->reg_num_mask; - - switch(cmd) { - case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ - case SIOCGMIIPHY: - mii_data->phy_id = mii_if->phy_id; - /* fall through */ - - case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ - case SIOCGMIIREG: - mii_data->val_out = - mii_if->mdio_read(mii_if->dev, mii_data->phy_id, - mii_data->reg_num); - break; - - case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ - case SIOCSMIIREG: { - u16 val = mii_data->val_in; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (mii_data->phy_id == mii_if->phy_id) { - switch(mii_data->reg_num) { - case MII_BMCR: { - unsigned int new_duplex = 0; - if (val & (BMCR_RESET|BMCR_ANENABLE)) - mii_if->force_media = 0; - else - mii_if->force_media = 1; - if (mii_if->force_media && - (val & BMCR_FULLDPLX)) - new_duplex = 1; - if (mii_if->full_duplex != new_duplex) { - duplex_changed = 1; - mii_if->full_duplex = new_duplex; - } - break; - } - case MII_ADVERTISE: - mii_if->advertising = val; - break; - default: - /* do nothing */ - break; - } - } - - mii_if->mdio_write(mii_if->dev, mii_data->phy_id, - mii_data->reg_num, val); - break; - } - - default: - rc = -EOPNOTSUPP; - break; - } - - if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) - *duplex_chg_out = 1; - - return rc; -} -#endif /* > 2.4.6 */ - diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/__init__.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/__init__.py new file mode 100755 index 000000000000..e69de29bb2d1 diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/common.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/common.py new file mode 100755 index 000000000000..58335b06eb15 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/common.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import sys, os, syslog +import getopt, commands, threading +import time, socket, Queue +import signal, select +from functools import partial + +RUN = False +PASS = 0 +FAIL = 1 + +SOCKET_PORT = 50000 +SOCKET_RECV_BUFFER = 4096 +SOCKET_TIME_OUT = 20 +FILE_LOCK = threading.Lock() +SOCKET_LOCK = threading.Lock() + +CMD_TYPE = ['global', 'device'] +RESPONSE_ERROR_PARAMETER = "Parameters error" +I2C_PREFIX = '/sys/bus/i2c/devices/' + +def doBash(cmd): + status, output = commands.getstatusoutput(cmd) + + return status, output + +def doSend(msg, port): + if SOCKET_LOCK.acquire(): + host = socket.gethostname() + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(SOCKET_TIME_OUT) + + try: + s.connect((host, port)) + except: + sys.exit(0) + + s.sendall(msg) + result = s.recv(SOCKET_RECV_BUFFER) + s.close() + SOCKET_LOCK.release() + return result + +def readFile(path): + if FILE_LOCK.acquire(): + try: + file = open(path) + except IOError as e: + print "Error: unable to open file: %s" % str(e) + FILE_LOCK.release() + return 'Error' + + value = file.readline().rstrip() + file.close() + FILE_LOCK.release() + + return value + +def writeFile(path, value): + if FILE_LOCK.acquire(): + try: + file = open(path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + FILE_LOCK.release() + return 'Error' + + file.seek(0) + file.write(str(value)) + file.close() + FILE_LOCK.release() + + return "Success" diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/device.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/device.py new file mode 100755 index 000000000000..6b08dd1511b0 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/device.py @@ -0,0 +1,370 @@ +#!/usr/bin/env python +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import common + +TOTAL_PORT_NUM = 54 +SFP_MAX_NUM = 48 +CPLDA_SFP_NUM = 24 +CPLDB_SFP_NUM = 12 +CPLDC_SFP_NUM = 18 +FAN_NUM = 5 + +DEVICE_BUS = {'cpld': ['6-0074', '7-0075', '8-0076'], + 'fan': ['5-0070'], + 'psu': ['2-0058', '3-0059'], + 'status_led': ['7-0075']} + +class DeviceThread(common.threading.Thread): + def __init__(self,threadname, q): + common.threading.Thread.__init__(self,name = threadname) + self.queue = q + + def run(self): + while common.RUN: + message = self.queue.get() + self.onMessage(message) + + def onMessage(self, message): + """ + Commands: + led : Led controls + locate : Blink locator LED + sensors : Read HW monitors + """ + + if len(message.command) < 1: + result = self.onMessage.__doc__ + else: + if message.command[0] == 'init': + result = deviceInit() + elif message.command[0] == 'led': + result = ledControls(message.command[1:]) + elif message.command[0] == 'locate': + locatethread = common.threading.Thread(target = locateDeviceLed) + locatethread.start() + result = 'Success' + elif message.command[0] == 'sensors': + result = getSensors() + else: + result = self.onMessage.__doc__ + + if (message.callback is not None): + message.callback(result) + +STATUS_ALERT = {'fan': ['wrongAirflow_alert', 'outerRPMOver_alert', 'outerRPMUnder_alert', 'outerRPMZero_alert', + 'innerRPMOver_alert', 'innerRPMUnder_alert', 'innerRPMZero_alert', 'notconnect_alert'], + 'psu': ['vout_over_voltage', 'iout_over_current_fault', 'iout_over_current_warning', + 'iput_over_current_warning', 'iput_insufficient', 'temp_over_temp_fault', 'temp_over_temp_warning']} +class PlatformStatusThread(common.threading.Thread): + def __init__(self,threadname, timer): + self.running = True + common.threading.Thread.__init__(self,name = threadname) + self.timer = timer + self.fan_led_status = 'off' + self.psu_led_status = 'off' + + def run(self): + while common.RUN: + self.checkPlatformStatus() + common.time.sleep(self.timer) + + def checkPlatformStatus(self): + total_result = common.PASS + total_result += self.checkFanStatus() + total_result += self.checkPsuStatus() + + def checkFanStatus(self): + fan_result = common.PASS + fan_bus = DEVICE_BUS['fan'] + fan_alert = STATUS_ALERT['fan'] + fan_led = LED_COMMAND['fan_led'] + fan_normal = 'green' + fan_abnormal = 'blink_amber' + led_bus = DEVICE_BUS['status_led'] + led_path = common.I2C_PREFIX + led_bus[0] + '/' + LED_NODES[3] + + status, output = common.doBash("ls " + common.I2C_PREFIX) + if output.find(fan_bus[0]) != -1: + for num in range(0,FAN_NUM): + for alert_type in fan_alert: + path = common.I2C_PREFIX + fan_bus[0] + "/fan" + str(num+1) + "_" + alert_type + result = common.readFile(path) + if result != 'Error': + fan_result += int(result) + if fan_result != common.PASS: + if self.fan_led_status != fan_abnormal: + common.writeFile(led_path, fan_led[fan_abnormal]) + self.fan_led_status = fan_abnormal + common.syslog.syslog(common.syslog.LOG_ERR, 'FAN Status Error !!!') + return common.FAIL + + if self.fan_led_status != fan_normal: + common.writeFile(led_path, fan_led[fan_normal]) + self.fan_led_status = fan_normal + common.syslog.syslog(common.syslog.LOG_ERR, 'FAN Status Normal !!!') + return common.PASS + + def checkPsuStatus(self): + psu_result = common.PASS + psu_bus = DEVICE_BUS['psu'] + psu_alert = STATUS_ALERT['psu'] + psu_led = LED_COMMAND['pwr_led'] + psu_normal = 'green' + psu_abnormal = 'blink_amber' + led_bus = DEVICE_BUS['status_led'] + led_path = common.I2C_PREFIX + led_bus[0] + '/' + LED_NODES[1] + + status, output = common.doBash("ls " + common.I2C_PREFIX) + if output.find(psu_bus[0]) != -1 and output.find(psu_bus[1]) != -1: + for nodes in psu_bus: + for alert_type in psu_alert: + path = common.I2C_PREFIX + nodes + "/" + alert_type + result = common.readFile(path) + if result != 'Error': + psu_result += int(result) + if psu_result != common.PASS: + if self.psu_led_status != psu_abnormal: + common.writeFile(led_path, psu_led[psu_abnormal]) + self.psu_led_status = psu_abnormal + common.syslog.syslog(common.syslog.LOG_ERR, 'PSU Status Error !!!') + return common.FAIL + + if self.psu_led_status != psu_normal: + common.writeFile(led_path, psu_led[psu_normal]) + self.psu_led_status = psu_normal + common.syslog.syslog(common.syslog.LOG_ERR, 'PSU Status Normal !!!') + return common.PASS + +LED_COMMAND = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, + 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, + 'serial_led_enable': {'disable':'0', 'enable':'1'}} +LED_NODES = ['sys_led', 'pwr_led', 'loc_led', 'fan_led', "cpld_allled_ctrl", "serial_led_enable"] +def ledControls(args): + """ + Commands: + set : Set led config + get : Get led status + """ + COMMAND_TYPE = ['set', 'get'] + if len(args) < 1 or args[0] not in COMMAND_TYPE: + return ledControls.__doc__ + + result = setGetLed(args[0:]) + + return result + +def setGetLed(args): + """ + Commands: + sys_led : System status led [green/amber/off/blink_green/blink_amber] + pwr_led : Power status led [green/amber/off/blink_green/blink_amber] + loc_led : Locator led [on/off/blink] + fan_led : Fan led [green/amber/off/blink_green/blink_amber] + """ + if len(args) < 3 or args[1] not in LED_COMMAND: + return setGetLed.__doc__ + + led_bus = DEVICE_BUS['status_led'] + for i in range(0,len(LED_NODES)): + if args[1] == LED_NODES[i]: + path = common.I2C_PREFIX + led_bus[0] + '/' + LED_NODES[i] + command = LED_COMMAND[args[1]] + + if args[0] == 'set': + if args[2] in command: + data = command[args[2]] + result = common.writeFile(path, data) + else: + result = setGetLed.__doc__ + else: + result = common.readFile(node) + if result != "Error": + result = list (command.keys()) [list (command.values()).index (result)] + + return result + +def locateDeviceLed(): + setGetLed(['set', 'loc_led', 'blink']) + common.time.sleep(20) + setGetLed(['set', 'loc_led', 'off']) + +SENSORS_PATH = common.I2C_PREFIX + '5-0070/' +SENSORS_NODES = {'fan_rpm': ['_inner_rpm', '_outer_rpm'], + 'fan_vol': ['ADC8_vol', 'ADC7_vol','ADC6_vol', 'ADC5_vol','ADC4_vol', 'ADC3_vol'], + 'temp':['lm75_48_temp', 'lm75_49_temp', 'lm75_4a_temp'], + 'fan_alert':['_status_alert', '_wrongAirflow_alert', '_outerRPMOver_alert', '_outerRPMUnder_alert', + '_outerRPMZero_alert', '_innerRPMOver_alert', '_innerRPMUnder_alert', '_innerRPMZero_alert', '_notconnect_alert'], + 'vol_alert':['_under_alert', '_over_alert'], + 'temp_alert':['lm75_48_temp_alert', 'lm75_49_temp_alert', 'lm75_4a_temp_alert', 'sa56004x_Ltemp_alert', 'sa56004x_Rtemp_alert']} +SENSORS_TYPE = {'fan_rpm': ['Inner RPM', 'Outer RPM'], + 'fan_vol': ['P0.2', 'P0.6','P0.1', 'P1.5','P0.7', 'P1.6'], + 'temp':['lm75_48_temp', 'lm75_49_temp', 'lm75_4a_temp']} +def getSensors(): + string = '' + # Firmware version + val = common.readFile(SENSORS_PATH + 'mb_fw_version') + string = '\n' + "MB-SW Version: " + val + + val = common.readFile(SENSORS_PATH + 'fb_fw_version') + string += '\n' + "FB-SW Version: " + val + + # Fan + string += getFan() + + # HW Monitor + string += '\n' + getHWMonitor() + + # Voltage + string += '\n' + getVoltage() + + return string + +def getFan(): + string = '' + for i in range(0,FAN_NUM): + # Status + result = getFanStatus(i) + string += '\n\n' + "FAN " + str(i+1) + ": " + result + + if result == 'Disconnect': + continue + + # Alert + result = getFanAlert(i) + string += '\n' + " Status: " + result + + # Inner RPM + result = getFanInnerRPM(i) + string += '\n' + " Inner RPM: " + result.rjust(10) + " RPM" + + # Outer RPM + result = getFanOuterRPM(i) + string += '\n' + " Outer RPM: " + result.rjust(10) + " RPM" + + return string + +def getFanStatus(num): + val = common.readFile(SENSORS_PATH + 'fan' + str(num+1) + '_present') + if val != 'Error': + if int(val, 16) == 0: + result = 'Connect' + else: + result = 'Disconnect' + else: + result = val + return result + +def getFanAlert(num): + alert = 0 + alert_types = SENSORS_NODES['fan_alert'] + for alert_type in alert_types: + val = common.readFile(SENSORS_PATH + 'fan' + str(num+1) + alert_type) + if val != 'Error': + alert += int(val, 16) + else: + return val + + if alert > 0: + result = 'Warning' + else: + result = 'Normal' + + return result + +def getFanInnerRPM(num): + return common.readFile(SENSORS_PATH + 'fan' + str(num+1) + '_inner_rpm') + +def getFanOuterRPM(num): + return common.readFile(SENSORS_PATH + 'fan' + str(num+1) + '_outer_rpm') + +def getHWMonitor(): + string = '' + temp_type = SENSORS_TYPE['temp'] + for types in temp_type: + val = common.readFile(SENSORS_PATH + types) + val_alert = common.readFile(SENSORS_PATH + types + '_alert') + if val_alert != 'Error': + if int(val_alert, 16) == 1: + alert = 'Warning' + else: + alert = 'Normal' + else: + alert = val_alert + string += '\n' + types + ": " + val + " C" + " ( " + alert + " )" + + return string + +def getVoltage(): + string = '' + nodes = SENSORS_NODES['fan_vol'] + types = SENSORS_TYPE['fan_vol'] + for i in range(0,len(nodes)): + val = common.readFile(SENSORS_PATH + nodes[i]) + alert = getVoltageAlert(i) + string += '\n' + types[i] + ": " + val + " V ( " + alert + " )" + + return string + +def getVoltageAlert(num): + alert = 0 + nodes = SENSORS_NODES['vol_alert'] + for node in nodes: + val = common.readFile(SENSORS_PATH + 'ADC' + str(num+1) + node) + if val != 'Error': + alert += int(val, 16) + else: + return val + + if alert > 0: + result = 'Warning' + else: + result = 'Normal' + + return result + +DEVICE_INIT = {'led': [['set', 'sys_led', 'green'], ['set', 'pwr_led', 'green'], ['set', 'fan_led', 'green'], ['set', 'cpld_allled_ctrl', 'normal'], ['set', 'serial_led_enable', 'enable']]} +def deviceInit(): + # Set led + for i in range(0,len(DEVICE_INIT['led'])): + setGetLed(DEVICE_INIT['led'][i]) + + # Set tx disable + cpld_bus = DEVICE_BUS['cpld'] + for x in range(0, SFP_MAX_NUM): + if x < CPLDB_SFP_NUM: + bus = cpld_bus[1] + elif x < CPLDB_SFP_NUM + CPLDA_SFP_NUM: + bus = cpld_bus[0] + else: + bus = cpld_bus[2] + + path = common.I2C_PREFIX + bus + '/sfp' + str(x+1) + '_tx_disable' + result = common.writeFile(path, "0") + + # Set QSFP reset to normal + for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): + path = common.I2C_PREFIX + cpld_bus[2] + '/sfp' + str(x+1) + '_reset' + result = common.writeFile(path, "1") + + # Set QSFP I2C enable + for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): + path = common.I2C_PREFIX + cpld_bus[2] + '/sfp' + str(x+1) + '_modeseln' + result = common.writeFile(path, "0") + + return diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/main.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/main.py new file mode 100755 index 000000000000..2074e533e0d4 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/main.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import common, device + +HOST = '' +SOCKET_LIST = [] +SOCKET_MAX_CLIENT = 10 +QUEUES = [] +THREADS = [] +FUNCS = {} + +class GlobalThread(common.threading.Thread): + def __init__(self,threadname, q): + common.threading.Thread.__init__(self,name = threadname) + self.queue = q + + def run(self): + while common.RUN: + message = self.queue.get() + self.onMessage(message) + def onMessage(self, message): + """ + Commands: + uninstall : Uninstall platform drivers + """ + if len(message.command) < 1: + result = self.onMessage.__doc__ + else: + if message.command[0] == 'uninstall': + common.RUN = False + doUninstall() + result = 'Success' + else: + result = self.onMessage.__doc__ + if (message.callback is not None): + message.callback(result) + +class messageObject(object): + def __init__(self, command, callback): + super(messageObject, self).__init__() + self.command = command + self.callback = callback + +def callback(sock, result): + sock.sendall(result) + +def messageHandler(): + server_socket = common.socket.socket(common.socket.AF_INET, common.socket.SOCK_STREAM) + server_socket.setsockopt(common.socket.SOL_SOCKET, common.socket.SO_REUSEADDR, 1) + server_socket.bind((HOST, common.SOCKET_PORT)) + server_socket.listen(SOCKET_MAX_CLIENT) + SOCKET_LIST.append(server_socket) + + while(common.RUN): + ready_to_read,ready_to_write,in_error = common.select.select(SOCKET_LIST,[],[],0) + for sock in ready_to_read: + if sock == server_socket: + sockfd, addr = server_socket.accept() + SOCKET_LIST.append(sockfd) + else: + try: + data = sock.recv(common.SOCKET_RECV_BUFFER) + if data: + cb = common.partial(callback, sock) + cmdlist = data.split() + + if cmdlist[0] not in common.CMD_TYPE: + callback(sock, 'Fail') + continue + + msg = messageObject(cmdlist[1:], cb) + FUNCS[cmdlist[0]].put(msg) + continue + else: + if sock in SOCKET_LIST: + SOCKET_LIST.remove(sock) + except: + raise + continue + common.time.sleep(0.2) + + server_socket.close() + +# Platform initialize +KERNEL_MODULE = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_fn_6254_dn_f_cpld', 'pegatron_hwmon_mcu', 'pegatron_fn_6254_dn_f_psu', 'pegatron_fn_6254_dn_f_sfp', 'pegatron_fn_6254_dn_f_ixgbe'] +MODULE_ID = ['pca9544', 'pca9544', 'fn_6254_dn_f_psu', 'fn_6254_dn_f_psu', '24c02', 'pega_hwmon_mcu', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_sfpA', 'fn_6254_dn_f_sfpB', 'fn_6254_dn_f_sfpC'] +I2C_CHECK_NODE = ['i2c-0', 'i2c-1'] +UNINSTALL_CHECK_NODE = ['-0072', '-0073'] +DEVICE_NODE= ['i2c-2', 'i2c-6', 'i2c-2', 'i2c-3', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] +DEVICE_ADDRESS = ['0x72', '0x73', '0x58', '0x59', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] +def checkDevicePosition(num): + for i in range(0, len(I2C_CHECK_NODE)): + status, output = common.doBash("echo " + MODULE_ID[num] + " " + DEVICE_ADDRESS[num] + " > " + common.I2C_PREFIX + I2C_CHECK_NODE[i] + "/new_device") + status, output = common.doBash("ls " + common.I2C_PREFIX + DEVICE_NODE[num]) + DEVICE_NODE[num] = I2C_CHECK_NODE[i] + + if status: + status, output = common.doBash("echo " + DEVICE_ADDRESS[num] + " > " + common.I2C_PREFIX + I2C_CHECK_NODE[i] + "/delete_device") + else: + return + return + +def installDevice(): + for i in range(0, len(MODULE_ID)): + if MODULE_ID[i] == "pca9544": + checkDevicePosition(i) + else: + status, output = common.doBash("echo " + MODULE_ID[i] + " " + DEVICE_ADDRESS[i] + " > " + common.I2C_PREFIX + DEVICE_NODE[i] + "/new_device") + return + +def checkDriver(): + for i in range(0, len(KERNEL_MODULE)): + status, output = common.doBash("lsmod | grep " + KERNEL_MODULE[i]) + if status: + status, output = common.doBash("modprobe " + KERNEL_MODULE[i]) + return + +def doInstall(): + status, output = common.doBash("depmod -a") + checkDriver() + installDevice() + status, output = common.doBash("systemctl status flnet_s8930_54n-platform-main.service | grep inactive") + return + +def setupThreads(): + global THREADS, QUEUES + + # Queues + # Global + queueGlobal = common.Queue.Queue() + QUEUES.append(queueGlobal) + FUNCS['global'] = queueGlobal + + # Device + queueDevice = common.Queue.Queue() + QUEUES.append(queueDevice) + FUNCS['device'] = queueDevice + + # Threads + # Global + threadGlobal = GlobalThread('Global Handler', queueGlobal) + THREADS.append(threadGlobal) + + # Device + threadDevice = device.DeviceThread('Device Handler', queueDevice) + THREADS.append(threadDevice) + + # Check platform status + threadPlatformStatus = device.PlatformStatusThread('Platform Status Handler', 0.3) + THREADS.append(threadPlatformStatus) +def functionInit(): + setupThreads() + for thread in THREADS: + thread.start() + return + +def deviceInit(): + msg = messageObject(['init'], None) + FUNCS['device'].put(msg) + return + +# Platform uninitialize +def doUninstall(): + for i in range(0, len(KERNEL_MODULE)): + status, output = common.doBash("modprobe -rq " + KERNEL_MODULE[i]) + for i in range(0, len(MODULE_ID)): + if MODULE_ID[i] == "pca9544": + for node in range(0, len(I2C_CHECK_NODE)): + status, output = common.doBash("ls " + common.I2C_PREFIX + str(node) + UNINSTALL_CHECK_NODE[i]) + if not status: + status, output = common.doBash("echo " + DEVICE_ADDRESS[i] + " > " + common.I2C_PREFIX + I2C_CHECK_NODE[node] + "/delete_device") + else: + status, output = common.doBash("echo " + DEVICE_ADDRESS[i] + " > " + common.I2C_PREFIX + DEVICE_NODE[i] + "/delete_device") + return + +def main(): + args = common.sys.argv[1:] + + if len(args[0:]) < 1: + common.sys.exit(0) + + if args[0] == 'install': + common.RUN = True + doInstall() + functionInit() + deviceInit() + messageHandler() + + common.sys.exit(0) + +if __name__ == "__main__": + main() diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors index 6cd3af6feb23..d3af6e31dded 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/scripts/sensors @@ -2,6 +2,4 @@ docker exec -i pmon sensors "$@" #To probe sensors not part of lm-sensors -if [ -r /usr/local/bin/pegatron_fn_6254_dn_f_sensors.py ]; then - python /usr/local/bin/pegatron_fn_6254_dn_f_sensors.py get_sensors -fi +pegatron_fn_6254_dn_f_util.py cmd device sensors diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service index 209edc0af6e7..b80b3d39588e 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-init.service @@ -4,8 +4,8 @@ Before=network.target DefaultDependencies=no [Service] -Type=oneshot -ExecStart=/usr/local/bin/pegatron_fn_6254_dn_f_util.py install +Type=simple +ExecStart=/usr/local/bin/pegaProcess/main.py install ExecStop=/usr/local/bin/pegatron_fn_6254_dn_f_util.py uninstall RemainAfterExit=yes diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-status.service b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-status.service deleted file mode 100644 index bfbfcb5e7067..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/service/fn_6254_dn_f-platform-status.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Pegastron fn-6254-dn-f Platform status service -After=fn_6254_dn_f-platform-init.service -DefaultDependencies=no - -[Service] -Type=simple -ExecStart=/usr/local/bin/pegatron_fn_6254_dn_f_status.py run -ExecStop=/usr/local/bin/pegatron_fn_6254_dn_f_status.py stop - -[Install] -WantedBy=multi-user.target diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_sensors.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_sensors.py deleted file mode 100755 index 5e3f511cd4d5..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_sensors.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/python - -import os -import sys -import logging - -FAN_NUM = 5 -sensors_path = '/sys/bus/i2c/devices/5-0070/' -sensors_nodes = {'fan_rpm': ['_inner_rpm', '_outer_rpm'], - 'fan_vol': ['ADC8_vol', 'ADC7_vol','ADC6_vol', 'ADC5_vol','ADC4_vol', 'ADC3_vol'], - 'temp':['lm75_48_temp', 'lm75_49_temp', 'lm75_4a_temp']} -sensors_type = {'fan_rpm': ['Inner RPM', 'Outer RPM'], - 'fan_vol': ['P0.2', 'P0.6','P0.1', 'P1.5','P0.7', 'P1.6'], - 'temp':['lm75_48_temp', 'lm75_49_temp', 'lm75_4a_temp']} - -# Get sysfs attribute -def get_attr_value(attr_path): - retval = 'ERR' - if (not os.path.isfile(attr_path)): - return retval - - try: - with open(attr_path, 'r') as fd: - retval = fd.read() - except Exception as error: - logging.error("Unable to open ", attr_path, " file !") - - retval = retval.rstrip('\r\n') - fd.close() - return retval - -def get_fan_status(number): - attr_value = get_attr_value(sensors_path + "fan" + str(number+1) + "_present") - if (attr_value != 'ERR'): - attr_value = int(attr_value, 16) - - if(attr_value == 0): - string = "Connect" - else: - string = "Disconnect" - return string - -def get_fan_alert(number): - attr_value = get_attr_value(sensors_path + "fan" + str(number+1) + "_status_alert") - if (attr_value != 'ERR'): - attr_value = int(attr_value, 16) - - if(attr_value == 0): - string = "Normal" - else: - string = "Abnormal" - return string - -def get_fan_inner_rpm(number): - return get_attr_value(sensors_path + "fan" + str(number+1) + "_inner_rpm") - -def get_fan_outer_rpm(number): - return get_attr_value(sensors_path + "fan" + str(number+1) + "_outer_rpm") - -def get_fan(): - for i in range(0,FAN_NUM): - print " " - #status - string = get_fan_status(i) - print "FAN " + str(i+1) + ":" + ' ' + string - if string=='Disconnect': - continue - - #alert - string = get_fan_alert(i) - print " Status:"+ ' ' + string - - #inner rpm - string = get_fan_inner_rpm(i) - print " Inner RPM:"+ string.rjust(10) + ' RPM' - - #outer rpm - string = get_fan_outer_rpm(i) - print " Outer RPM:"+ string.rjust(10) + ' RPM' - - return - -def get_hwmon(): - temp_type = sensors_type['temp'] - print " " - - for types in temp_type: - string = get_attr_value(sensors_path + types) - print types + ": " + string + " C" - - return - -def get_voltage(): - print " " - nodes = sensors_nodes['fan_vol'] - types = sensors_type['fan_vol'] - for i in range(0,len(nodes)): - string = get_attr_value(sensors_path + nodes[i]) - print types[i] + ': ' + string + " V" - - return - -def init_fan(): - return - -def main(): - """ - Usage: %(scriptName)s command object - - command: - install : install drivers and generate related sysfs nodes - clean : uninstall drivers and remove related sysfs nodes - show : show all systen status - set : change board setting with fan|led|sfp - """ - - if len(sys.argv)<2: - print main.__doc__ - - for arg in sys.argv[1:]: - if arg == 'fan_init': - init_fan() - elif arg == 'get_sensors': - ver = get_attr_value(sensors_path + "mb_fw_version") - print 'MB-SW Version: ' + ver - ver = get_attr_value(sensors_path + "fb_fw_version") - print 'FB-SW Version: ' + ver - get_fan() - get_hwmon() - get_voltage() - elif arg == 'fan_set': - if len(sys.argv[1:])<1: - print main.__doc__ - else: - set_fan(sys.argv[1:]) - return - else: - print main.__doc__ - -if __name__ == "__main__": - main() diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_status.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_status.py deleted file mode 100755 index 9ad398052c8e..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_status.py +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/env python -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import sys, getopt -import logging -import os -import commands -import threading -import time -import syslog - -DEBUG = False -STOP = False -FAN_NUM = 5 -PASS = 0 -FAIL = 1 - -i2c_prefix = '/sys/bus/i2c/devices/' -status_nodes = {'fan': ['5-0070'], - 'psu':['2-0058', '3-0059']} -system_led_node = '7-0075' -status_alert = {'fan': ['wrongAirflow_alert', 'outerRPMOver_alert', 'outerRPMUnder_alert', 'outerRPMZero_alert', 'innerRPMOver_alert', 'innerRPMUnder_alert', 'innerRPMZero_alert', 'notconnect_alert'], - 'psu': ['vout_over_voltage', 'iout_over_current_fault', 'iout_over_current_warning', 'iput_over_current_warning', 'iput_insufficient', 'temp_over_temp_fault', 'temp_over_temp_warning']} - -led_command = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, - 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, - 'serial_led_enable': {'disable':'0', 'enable':'1'}} - -pre_fan_led_status = 0 -pre_pwr_led_status = 0 - -def dbg_print(string): - if DEBUG == True: - print string - return - -def do_cmd(cmd, show): - logging.info('Run :' + cmd) - status, output = commands.getstatusoutput(cmd) - dbg_print(cmd + "with result:" + str(status)) - dbg_print("output:" + output) - if status: - logging.info('Failed :' + cmd) - if show: - print('Failed :' + cmd) - return status, output - -def read_file(path): - try: - file = open(path) - except IOError as e: - print "Error: unable to open file: %s" % str(e) - return False - - value = int(file.readline().rstrip()) - file.close() - - return value - -def write_file(path, value): - try: - file = open(path, "r+") - except IOError as e: - print "Error: unable to open file: %s" % str(e) - return False - - file.seek(0) - file.write(str(value)) - file.close() - - return - -def check_platform_fan(): - global pre_fan_led_status - fan_result = 0 - fan_status_node = status_nodes['fan'] - fan_alert = status_alert['fan'] - fan_led = led_command['fan_led'] - - status, output = do_cmd("ls " + i2c_prefix, 1) - if output.find(fan_status_node[0]) != -1: - for num in range(0,FAN_NUM): - for alert_type in fan_alert: - path = i2c_prefix + fan_status_node[0] + "/fan" + str(num+1) + "_" + alert_type - fan_result += read_file(path) - - if fan_result != PASS: - if pre_fan_led_status != fan_led["blink_amber"]: - path = i2c_prefix + system_led_node + "/fan_led" - write_file(path, fan_led["blink_amber"]) - pre_fan_led_status = fan_led["blink_amber"] - syslog.syslog(syslog.LOG_ERR, 'FAN Status Error !!!') - return FAIL - - if pre_fan_led_status != fan_led["green"]: - path = i2c_prefix + system_led_node + "/fan_led" - write_file(path, fan_led["green"]) - pre_fan_led_status = fan_led["green"] - syslog.syslog(syslog.LOG_WARNING, 'FAN Status Normal !!!') - return PASS - -def check_platform_psu(): - global pre_pwr_led_status - psu_result = 0 - psu_status_node = status_nodes['psu'] - psu_alert = status_alert['psu'] - psu_led = led_command['pwr_led'] - - status, output = do_cmd("ls " + i2c_prefix, 1) - if output.find(psu_status_node[0]) != -1 and output.find(psu_status_node[1]) != -1: - for nodes in psu_status_node: - for alert_type in psu_alert: - path = i2c_prefix + nodes + "/" + alert_type - psu_result += read_file(path) - - if psu_result != PASS: - if pre_pwr_led_status != psu_led["blink_amber"]: - path = i2c_prefix + system_led_node + "/pwr_led" - write_file(path, psu_led["blink_amber"]) - pre_pwr_led_status = psu_led["blink_amber"] - syslog.syslog(syslog.LOG_ERR, 'PSU Status Error !!!') - return FAIL - - if pre_pwr_led_status != psu_led["green"]: - path = i2c_prefix + system_led_node + "/pwr_led" - write_file(path, psu_led["green"]) - pre_pwr_led_status = psu_led["green"] - syslog.syslog(syslog.LOG_WARNING, 'PSU Status Normal !!!') - return PASS - -def pega_check_platform_status(): - while(True): - total_result = 0 - if STOP == True: - return - total_result += check_platform_fan() - total_result += check_platform_psu() - time.sleep(1) - return - -def main(): - for arg in sys.argv[1:]: - if arg == 'run': - pega_check_platform_status() - elif arg == 'stop': - STOP = True - -if __name__ == "__main__": - main() diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py index fb108f12af60..ce539edf22fa 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/utils/pegatron_fn_6254_dn_f_util.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -# Copyright (C) 2018 Pegatron, Inc. +# Copyright (C) 2019 Pegatron, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -15,251 +15,44 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import sys, getopt -import logging -import os -import commands -import threading -import time +from pegaProcess import common -DEBUG = False - -SFP_MAX_NUM = 48 -TOTAL_PORT_NUM = 54 -CPLDA_SFP_NUM = 24 -CPLDB_SFP_NUM = 12 -CPLDC_SFP_NUM = 18 - -kernel_module = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_fn_6254_dn_f_cpld', 'pegatron_hwmon_mcu', 'pegatron_fn_6254_dn_f_psu', 'pegatron_fn_6254_dn_f_sfp', 'pegatron_fn_6254_dn_f_ixgbe'] -moduleID = ['pca9544', 'pca9544', 'fn_6254_dn_f_psu', 'fn_6254_dn_f_psu', '24c02', 'pega_hwmon_mcu', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_cpld', 'fn_6254_dn_f_sfpA', 'fn_6254_dn_f_sfpB', 'fn_6254_dn_f_sfpC'] -i2c_check_node = ['i2c-0', 'i2c-1'] -uninstall_check_node = ['-0072', '-0073'] -device_address = ['0x72', '0x73', '0x58', '0x59', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] -device_node= ['i2c-2', 'i2c-6', 'i2c-2', 'i2c-3', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] - -i2c_prefix = '/sys/bus/i2c/devices/' -cpld_bus = ['6-0074', '7-0075', '8-0076'] -led_nodes = ['sys_led', 'pwr_led', 'loc_led', 'fan_led', "cpld_allled_ctrl", "serial_led_enable"] - -def dbg_print(string): - if DEBUG == True: - print string - return - -def do_cmd(cmd, show): - logging.info('Run :' + cmd) - status, output = commands.getstatusoutput(cmd) - dbg_print(cmd + "with result:" + str(status)) - dbg_print("output:" + output) - if status: - logging.info('Failed :' + cmd) - if show: - print('Failed :' + cmd) - return status, output - -def install_driver(): - status, output = do_cmd("depmod -a", 1) - - for i in range(0, len(kernel_module)): - status, output = do_cmd("modprobe " + kernel_module[i], 1) - if status: - return status - - return - -def check_device_position(num): - for i in range(0, len(i2c_check_node)): - status, output = do_cmd("echo " + moduleID[num] + " " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/new_device", 0) - status, output = do_cmd("ls " + i2c_prefix + device_node[num], 0) - device_node[num] = i2c_check_node[i] - - if status: - status, output = do_cmd("echo " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/delete_device", 0) - else: - return - - return - -def install_device(): - for i in range(0, len(moduleID)): - if moduleID[i] == "pca9544": - check_device_position(i) - else: - status, output = do_cmd("echo " + moduleID[i] + " " + device_address[i] + " > " + i2c_prefix + device_node[i] + "/new_device", 1) - - return - -def check_driver(): - for i in range(0, len(kernel_module)): - status, output = do_cmd("lsmod | grep " + kernel_module[i], 0) - if status: - status, output = do_cmd("modprobe " + kernel_module[i], 1) - - return - -def do_install(): - status, output = do_cmd("depmod -a", 1) - - check_driver() - install_device() - return - -def do_uninstall(): - for i in range(0, len(kernel_module)): - status, output = do_cmd("modprobe -rq " + kernel_module[i], 0) - - for i in range(0, len(moduleID)): - if moduleID[i] == "pca9544": - for node in range(0, len(i2c_check_node)): - status, output = do_cmd("ls " + i2c_prefix + str(node) + uninstall_check_node[i], 0) - if not status: - status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + i2c_check_node[node] + "/delete_device", 0) - - else: - status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + device_node[i] + "/delete_device", 0) - - return - -led_command = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, - 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, - 'serial_led_enable': {'disable':'0', 'enable':'1'}} - -def set_led(args): +#message handler +def doCommand(cmd): """ - command: - sys_led : set SYS led [off | green | amber | blink_green | blink_amber] - pwr_led : set PWR led [off | green | amber | blink_green | blink_amber] - loc_led : set LOCATOR led [off | on | blink] - fan_led : set FAN led [off | green | amber | blink_green | blink_amber] + Command: + global : Set global config + device : Set device config """ - if len(args) < 1: - print set_led.__doc__ - sys.exit(0) - - if args[0] not in led_command: - print set_led.__doc__ - sys.exit(0) - - for i in range(0,len(led_nodes)): - if args[0] == led_nodes[i]: - node = i2c_prefix + cpld_bus[1] + '/'+ led_nodes[i] - - command = led_command[args[0]] - data = command[args[1]] - - status, output = do_cmd("echo "+ str(data) + " > "+ node, 1) - - return - -def set_device(args): - """ - command: - led : set status led - """ - - if len(args[0:]) < 1: - print set_device.__doc__ - sys.exit(0) - - if args[0] == 'led': - set_led(args[1:]) - else: - print set_device.__doc__ - - return - -device_init = {'led': [['led', 'sys_led', 'green'], ['led', 'pwr_led', 'green'], ['led', 'fan_led', 'green'], ['led', 'cpld_allled_ctrl', 'normal'], ['led', 'serial_led_enable', 'enable']]} - -def pega_init(): - #set led - for i in range(0,len(device_init['led'])): - set_device(device_init['led'][i]) - - #set tx_disable - for x in range(0, SFP_MAX_NUM): - if x < CPLDB_SFP_NUM: - bus = cpld_bus[1] - elif x < CPLDB_SFP_NUM + CPLDA_SFP_NUM: - bus = cpld_bus[0] - else: - bus = cpld_bus[2] - - nodes = i2c_prefix + bus + '/sfp' + str(x+1) + '_tx_disable' - dbg_print("SFP_TX_DISABLE NODES: " + nodes) - status, output = do_cmd("echo 0 > "+ nodes, 1) - - #set QSFP reset to normal - for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): - nodes = i2c_prefix + cpld_bus[2] + '/sfp' + str(x+1) + '_reset' - dbg_print("SFP_RESET NODES: " + nodes) - status, output = do_cmd("echo 1 > "+ nodes, 1) + if len(cmd[0:]) < 1 or cmd[0] not in common.CMD_TYPE: + print doCommand.__doc__ + return + msg = ' '.join(str(data) for data in cmd) + result = common.doSend(msg, common.SOCKET_PORT) + print result - #set QSFP I2c enable - for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): - nodes = i2c_prefix + cpld_bus[2] + '/sfp' + str(x+1) + '_modeseln' - dbg_print("SFP_MODSEL NODES: " + nodes) - status, output = do_cmd("echo 0 > "+ nodes, 1) - return - -def pega_cmd(args): - """ - command: - locate : blink locate LED for searching - """ - - if len(args) < 1: - print pega_cmd.__doc__ - sys.exit(0) - - if args[0] == 'locate': - set_led(['loc_led', 'blink']) - time.sleep(20) - set_led(['loc_led', 'off']) - else: - print pega_cmd.__doc__ - sys.exit(0) return def main(): """ - command: - install : install drivers - uninstall : uninstall drivers - set : change board settings - cmd : do command - debug : show debug info [on/off] - """ - - if len(sys.argv[1:]) < 1: - print main.__doc__ - sys.exit(0) + Command: + install : Install drivers + uninstall : Uninstall drivers + cmd : Commands + """ + args = common.sys.argv[1:] - arg = sys.argv[1] - if arg == 'install': - do_install() - pega_init() - elif arg == 'uninstall': - do_uninstall() - elif arg == 'set': - set_device(sys.argv[2:]) - elif arg == 'cmd': - pega_cmd(sys.argv[2:]) - elif arg == 'debug': - if len(sys.argv[2:]) < 1: - print main.__doc__ - sys.exit(0) - if sys.argv[2] == 'on': - DEBUG = True - else: - DEBUG = False + if len(args[0:]) < 1: + print main.__doc__ + return + + if args[0] == 'uninstall': + doCommand(['global', 'uninstall']) + elif args[0] == 'cmd': + doCommand(args[1:]) else: - print main.__doc__ - sys.exit(0) - - return + print main.__doc__ + common.sys.exit(0) if __name__ == "__main__": main() From 06493c79c4fd7322bdd97f11e3f407f3c0f18610 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Tue, 13 Aug 2019 13:49:28 +0800 Subject: [PATCH 17/20] Modify porsche project utility --- platform/nephos/one-image.mk | 1 - .../debian/changelog | 7 + .../porsche/modules/Makefile | 2 +- .../porsche/modules/pegatron_porsche_psu.c | 329 ++++++++++++++++ .../porsche/pegaProcess/__init__.py | 0 .../porsche/pegaProcess/common.py | 87 ++++ .../porsche/pegaProcess/device.py | 370 ++++++++++++++++++ .../porsche/pegaProcess/main.py | 205 ++++++++++ .../porsche/scripts/sensors | 4 +- .../service/porsche-platform-init.service | 4 +- .../porsche/utils/pegatron_porsche_util.py | 229 ++--------- .../porsche/utils/porsche_sensors.py | 141 ------- 12 files changed, 1032 insertions(+), 347 deletions(-) create mode 100644 platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_psu.c create mode 100755 platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/__init__.py create mode 100755 platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/common.py create mode 100755 platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/device.py create mode 100755 platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/main.py delete mode 100755 platform/nephos/sonic-platform-modules-pegatron/porsche/utils/porsche_sensors.py diff --git a/platform/nephos/one-image.mk b/platform/nephos/one-image.mk index 0500e8315268..883dfbada2e2 100644 --- a/platform/nephos/one-image.mk +++ b/platform/nephos/one-image.mk @@ -17,5 +17,4 @@ $(SONIC_ONE_IMAGE)_DOCKERS += $(filter-out $(patsubst %-$(DBG_IMAGE_MARK).gz,%.g else $(SONIC_ONE_IMAGE)_DOCKERS = $(SONIC_INSTALL_DOCKER_IMAGES) endif ->>>>>>> cc312793b57e04420f9aea8e0d86dbc6fa74de25 SONIC_INSTALLERS += $(SONIC_ONE_IMAGE) diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/changelog b/platform/nephos/sonic-platform-modules-pegatron/debian/changelog index 39ecd34d960c..cd7e09adb563 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/changelog +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/changelog @@ -1,3 +1,10 @@ +sonic-pegatron-platform-modules (1.0.0) unstable; urgency=low + + * Add fn-6254-dn-f + * Modify porsche utility + + -- Pegatron Mon, 12 Aug 2019 17:41:32 +0800 + sonic-pegatron-platform-modules (0.1) unstable; urgency=low * Initial release diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/Makefile b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/Makefile index 60e882a586d9..8b543bd5918a 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/Makefile +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/Makefile @@ -1 +1 @@ -obj-m:=pegatron_porsche_cpld.o pegatron_hwmon_mcu.o pegatron_porsche_sfp.o +obj-m:=pegatron_porsche_cpld.o pegatron_hwmon_mcu.o pegatron_porsche_sfp.o pegatron_porsche_psu.o diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_psu.c b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_psu.c new file mode 100644 index 000000000000..c990a0ea905c --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_psu.c @@ -0,0 +1,329 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef PEGA_DEBUG +/*#define pega_DEBUG*/ +#ifdef PEGA_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ + +#define PSU_58_ADDRESS 0x58 +#define PSU_59_ADDRESS 0x59 +#define PSU_VOUT_REG 0x7A +#define PSU_IOUT_REG 0x7B +#define PSU_IPUT_REG 0x7C +#define PSU_TEMP_REG 0x7D +#define PSU_VOUT_OVER_VOLTAGE_BIT 7 +#define PSU_IOUT_OVER_CURRENT_FAULT_BIT 7 +#define PSU_IOUT_OVER_CURRENT_WARNING_BIT 5 +#define PSU_IPUT_OVER_CURRENT_WARNING_BIT 1 +#define PSU_IPUT_INSUFFICIENT_BIT 3 +#define PSU_TEMP_OVER_TEMP_FAULT_BIT 7 +#define PSU_TEMP_OVER_TEMP_WARNING_BIT 6 + +#define GET_BIT(data, bit, value) value = (data >> bit) & 0x1 +#define SET_BIT(data, bit) data |= (1 << bit) +#define CLEAR_BIT(data, bit) data &= ~(1 << bit) + +struct psu_client_node { + struct i2c_client *client; + struct list_head list; +}; + +static const unsigned short normal_i2c[] = { PSU_58_ADDRESS, PSU_59_ADDRESS, I2C_CLIENT_END }; +static LIST_HEAD(psu_client_list); +static struct mutex list_lock; + +static int pega_porsche_psu_read(unsigned short addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct psu_client_node *psu_node = NULL; + int data = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &psu_client_list) + { + psu_node = list_entry(list_node, struct psu_client_node, list); + + if (psu_node->client->addr == addr) { + data = i2c_smbus_read_byte_data(psu_node->client, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, addr, reg, data)); + break; + } + } + + mutex_unlock(&list_lock); + + return data; +} + +static ssize_t read_psu_vout_over_voltage(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_VOUT_REG, val = 0; + + data = pega_porsche_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_VOUT_OVER_VOLTAGE_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_iout_over_current_fault(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_IOUT_REG, val = 0; + + data = pega_porsche_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_IOUT_OVER_CURRENT_FAULT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_iout_over_current_warning(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_IOUT_REG, val = 0; + + data = pega_porsche_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_IOUT_OVER_CURRENT_WARNING_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_iput_over_current_warning(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_IPUT_REG, val = 0; + + data = pega_porsche_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_IPUT_OVER_CURRENT_WARNING_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_iput_insufficient(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_IPUT_REG, val = 0; + + data = pega_porsche_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_IPUT_INSUFFICIENT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_temp_over_temp_fault(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_TEMP_REG, val = 0; + + data = pega_porsche_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_TEMP_OVER_TEMP_FAULT_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t read_psu_temp_over_temp_warning(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 data = 0, reg = PSU_TEMP_REG, val = 0; + + data = pega_porsche_psu_read(client->addr, reg); + DBG(printk(KERN_ALERT "%s - addr: 0x%x, reg: %x, data: %x\r\n", __func__, client->addr, reg, data)); + GET_BIT(data, PSU_TEMP_OVER_TEMP_WARNING_BIT, val); + + return sprintf(buf, "%d\n", val); +} + +static SENSOR_DEVICE_ATTR(vout_over_voltage, S_IRUGO, read_psu_vout_over_voltage, NULL, 0); +static SENSOR_DEVICE_ATTR(iout_over_current_fault, S_IRUGO, read_psu_iout_over_current_fault, NULL, 0); +static SENSOR_DEVICE_ATTR(iout_over_current_warning, S_IRUGO, read_psu_iout_over_current_warning, NULL, 0); +static SENSOR_DEVICE_ATTR(iput_over_current_warning, S_IRUGO, read_psu_iput_over_current_warning, NULL, 0); +static SENSOR_DEVICE_ATTR(iput_insufficient, S_IRUGO, read_psu_iput_insufficient, NULL, 0); +static SENSOR_DEVICE_ATTR(temp_over_temp_fault, S_IRUGO, read_psu_temp_over_temp_fault, NULL, 0); +static SENSOR_DEVICE_ATTR(temp_over_temp_warning, S_IRUGO, read_psu_temp_over_temp_warning, NULL, 0); + +static struct attribute *pega_porsche_psu_attributes[] = { + &sensor_dev_attr_vout_over_voltage.dev_attr.attr, + &sensor_dev_attr_iout_over_current_fault.dev_attr.attr, + &sensor_dev_attr_iout_over_current_warning.dev_attr.attr, + &sensor_dev_attr_iput_over_current_warning.dev_attr.attr, + &sensor_dev_attr_iput_insufficient.dev_attr.attr, + &sensor_dev_attr_temp_over_temp_fault.dev_attr.attr, + &sensor_dev_attr_temp_over_temp_warning.dev_attr.attr, + NULL +}; + +static const struct attribute_group pega_porsche_psu_group = { .attrs = pega_porsche_psu_attributes}; + +static void pega_porsche_psu_add_client(struct i2c_client *client) +{ + struct psu_client_node *node = kzalloc(sizeof(struct psu_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate psu_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &psu_client_list); + mutex_unlock(&list_lock); +} + +static void pega_porsche_psu_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct psu_client_node *psu_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &psu_client_list) + { + psu_node = list_entry(list_node, struct psu_client_node, list); + + if (psu_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(psu_node); + } + + mutex_unlock(&list_lock); +} + +static int pega_porsche_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_dbg(&client->dev, "i2c_check_functionality failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + } + + /* Register sysfs hooks */ + switch(client->addr) + { + case PSU_58_ADDRESS: + case PSU_59_ADDRESS: + status = sysfs_create_group(&client->dev.kobj, &pega_porsche_psu_group); + break; + default: + dev_dbg(&client->dev, "i2c_check_psu failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + break; + } + + if (status) { + goto exit; + } + + dev_info(&client->dev, "chip found\n"); + pega_porsche_psu_add_client(client); + + return 0; + +exit: + return status; +} + +static int pega_porsche_psu_remove(struct i2c_client *client) +{ + switch(client->addr) + { + case PSU_58_ADDRESS: + case PSU_59_ADDRESS: + sysfs_remove_group(&client->dev.kobj, &pega_porsche_psu_group); + break; + default: + dev_dbg(&client->dev, "i2c_remove_psu failed (0x%x)\n", client->addr); + break; + } + + pega_porsche_psu_remove_client(client); + return 0; +} + +static const struct i2c_device_id pega_porsche_psu_id[] = { + { "porsche_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, pega_porsche_psu_id); + +static struct i2c_driver pega_porsche_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "pegatron_porsche_psu", + }, + .probe = pega_porsche_psu_probe, + .remove = pega_porsche_psu_remove, + .id_table = pega_porsche_psu_id, + .address_list = normal_i2c, +}; + +static int __init pega_porsche_psu_init(void) +{ + mutex_init(&list_lock); + + return i2c_add_driver(&pega_porsche_psu_driver); +} + +static void __exit pega_porsche_psu_exit(void) +{ + i2c_del_driver(&pega_porsche_psu_driver); +} + +MODULE_AUTHOR("Peter5 Lin "); +MODULE_DESCRIPTION("pega_porsche_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(pega_porsche_psu_init); +module_exit(pega_porsche_psu_exit); diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/__init__.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/__init__.py new file mode 100755 index 000000000000..e69de29bb2d1 diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/common.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/common.py new file mode 100755 index 000000000000..58335b06eb15 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/common.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import sys, os, syslog +import getopt, commands, threading +import time, socket, Queue +import signal, select +from functools import partial + +RUN = False +PASS = 0 +FAIL = 1 + +SOCKET_PORT = 50000 +SOCKET_RECV_BUFFER = 4096 +SOCKET_TIME_OUT = 20 +FILE_LOCK = threading.Lock() +SOCKET_LOCK = threading.Lock() + +CMD_TYPE = ['global', 'device'] +RESPONSE_ERROR_PARAMETER = "Parameters error" +I2C_PREFIX = '/sys/bus/i2c/devices/' + +def doBash(cmd): + status, output = commands.getstatusoutput(cmd) + + return status, output + +def doSend(msg, port): + if SOCKET_LOCK.acquire(): + host = socket.gethostname() + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(SOCKET_TIME_OUT) + + try: + s.connect((host, port)) + except: + sys.exit(0) + + s.sendall(msg) + result = s.recv(SOCKET_RECV_BUFFER) + s.close() + SOCKET_LOCK.release() + return result + +def readFile(path): + if FILE_LOCK.acquire(): + try: + file = open(path) + except IOError as e: + print "Error: unable to open file: %s" % str(e) + FILE_LOCK.release() + return 'Error' + + value = file.readline().rstrip() + file.close() + FILE_LOCK.release() + + return value + +def writeFile(path, value): + if FILE_LOCK.acquire(): + try: + file = open(path, "r+") + except IOError as e: + print "Error: unable to open file: %s" % str(e) + FILE_LOCK.release() + return 'Error' + + file.seek(0) + file.write(str(value)) + file.close() + FILE_LOCK.release() + + return "Success" diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/device.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/device.py new file mode 100755 index 000000000000..6b08dd1511b0 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/device.py @@ -0,0 +1,370 @@ +#!/usr/bin/env python +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import common + +TOTAL_PORT_NUM = 54 +SFP_MAX_NUM = 48 +CPLDA_SFP_NUM = 24 +CPLDB_SFP_NUM = 12 +CPLDC_SFP_NUM = 18 +FAN_NUM = 5 + +DEVICE_BUS = {'cpld': ['6-0074', '7-0075', '8-0076'], + 'fan': ['5-0070'], + 'psu': ['2-0058', '3-0059'], + 'status_led': ['7-0075']} + +class DeviceThread(common.threading.Thread): + def __init__(self,threadname, q): + common.threading.Thread.__init__(self,name = threadname) + self.queue = q + + def run(self): + while common.RUN: + message = self.queue.get() + self.onMessage(message) + + def onMessage(self, message): + """ + Commands: + led : Led controls + locate : Blink locator LED + sensors : Read HW monitors + """ + + if len(message.command) < 1: + result = self.onMessage.__doc__ + else: + if message.command[0] == 'init': + result = deviceInit() + elif message.command[0] == 'led': + result = ledControls(message.command[1:]) + elif message.command[0] == 'locate': + locatethread = common.threading.Thread(target = locateDeviceLed) + locatethread.start() + result = 'Success' + elif message.command[0] == 'sensors': + result = getSensors() + else: + result = self.onMessage.__doc__ + + if (message.callback is not None): + message.callback(result) + +STATUS_ALERT = {'fan': ['wrongAirflow_alert', 'outerRPMOver_alert', 'outerRPMUnder_alert', 'outerRPMZero_alert', + 'innerRPMOver_alert', 'innerRPMUnder_alert', 'innerRPMZero_alert', 'notconnect_alert'], + 'psu': ['vout_over_voltage', 'iout_over_current_fault', 'iout_over_current_warning', + 'iput_over_current_warning', 'iput_insufficient', 'temp_over_temp_fault', 'temp_over_temp_warning']} +class PlatformStatusThread(common.threading.Thread): + def __init__(self,threadname, timer): + self.running = True + common.threading.Thread.__init__(self,name = threadname) + self.timer = timer + self.fan_led_status = 'off' + self.psu_led_status = 'off' + + def run(self): + while common.RUN: + self.checkPlatformStatus() + common.time.sleep(self.timer) + + def checkPlatformStatus(self): + total_result = common.PASS + total_result += self.checkFanStatus() + total_result += self.checkPsuStatus() + + def checkFanStatus(self): + fan_result = common.PASS + fan_bus = DEVICE_BUS['fan'] + fan_alert = STATUS_ALERT['fan'] + fan_led = LED_COMMAND['fan_led'] + fan_normal = 'green' + fan_abnormal = 'blink_amber' + led_bus = DEVICE_BUS['status_led'] + led_path = common.I2C_PREFIX + led_bus[0] + '/' + LED_NODES[3] + + status, output = common.doBash("ls " + common.I2C_PREFIX) + if output.find(fan_bus[0]) != -1: + for num in range(0,FAN_NUM): + for alert_type in fan_alert: + path = common.I2C_PREFIX + fan_bus[0] + "/fan" + str(num+1) + "_" + alert_type + result = common.readFile(path) + if result != 'Error': + fan_result += int(result) + if fan_result != common.PASS: + if self.fan_led_status != fan_abnormal: + common.writeFile(led_path, fan_led[fan_abnormal]) + self.fan_led_status = fan_abnormal + common.syslog.syslog(common.syslog.LOG_ERR, 'FAN Status Error !!!') + return common.FAIL + + if self.fan_led_status != fan_normal: + common.writeFile(led_path, fan_led[fan_normal]) + self.fan_led_status = fan_normal + common.syslog.syslog(common.syslog.LOG_ERR, 'FAN Status Normal !!!') + return common.PASS + + def checkPsuStatus(self): + psu_result = common.PASS + psu_bus = DEVICE_BUS['psu'] + psu_alert = STATUS_ALERT['psu'] + psu_led = LED_COMMAND['pwr_led'] + psu_normal = 'green' + psu_abnormal = 'blink_amber' + led_bus = DEVICE_BUS['status_led'] + led_path = common.I2C_PREFIX + led_bus[0] + '/' + LED_NODES[1] + + status, output = common.doBash("ls " + common.I2C_PREFIX) + if output.find(psu_bus[0]) != -1 and output.find(psu_bus[1]) != -1: + for nodes in psu_bus: + for alert_type in psu_alert: + path = common.I2C_PREFIX + nodes + "/" + alert_type + result = common.readFile(path) + if result != 'Error': + psu_result += int(result) + if psu_result != common.PASS: + if self.psu_led_status != psu_abnormal: + common.writeFile(led_path, psu_led[psu_abnormal]) + self.psu_led_status = psu_abnormal + common.syslog.syslog(common.syslog.LOG_ERR, 'PSU Status Error !!!') + return common.FAIL + + if self.psu_led_status != psu_normal: + common.writeFile(led_path, psu_led[psu_normal]) + self.psu_led_status = psu_normal + common.syslog.syslog(common.syslog.LOG_ERR, 'PSU Status Normal !!!') + return common.PASS + +LED_COMMAND = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, + 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, + 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, + 'serial_led_enable': {'disable':'0', 'enable':'1'}} +LED_NODES = ['sys_led', 'pwr_led', 'loc_led', 'fan_led', "cpld_allled_ctrl", "serial_led_enable"] +def ledControls(args): + """ + Commands: + set : Set led config + get : Get led status + """ + COMMAND_TYPE = ['set', 'get'] + if len(args) < 1 or args[0] not in COMMAND_TYPE: + return ledControls.__doc__ + + result = setGetLed(args[0:]) + + return result + +def setGetLed(args): + """ + Commands: + sys_led : System status led [green/amber/off/blink_green/blink_amber] + pwr_led : Power status led [green/amber/off/blink_green/blink_amber] + loc_led : Locator led [on/off/blink] + fan_led : Fan led [green/amber/off/blink_green/blink_amber] + """ + if len(args) < 3 or args[1] not in LED_COMMAND: + return setGetLed.__doc__ + + led_bus = DEVICE_BUS['status_led'] + for i in range(0,len(LED_NODES)): + if args[1] == LED_NODES[i]: + path = common.I2C_PREFIX + led_bus[0] + '/' + LED_NODES[i] + command = LED_COMMAND[args[1]] + + if args[0] == 'set': + if args[2] in command: + data = command[args[2]] + result = common.writeFile(path, data) + else: + result = setGetLed.__doc__ + else: + result = common.readFile(node) + if result != "Error": + result = list (command.keys()) [list (command.values()).index (result)] + + return result + +def locateDeviceLed(): + setGetLed(['set', 'loc_led', 'blink']) + common.time.sleep(20) + setGetLed(['set', 'loc_led', 'off']) + +SENSORS_PATH = common.I2C_PREFIX + '5-0070/' +SENSORS_NODES = {'fan_rpm': ['_inner_rpm', '_outer_rpm'], + 'fan_vol': ['ADC8_vol', 'ADC7_vol','ADC6_vol', 'ADC5_vol','ADC4_vol', 'ADC3_vol'], + 'temp':['lm75_48_temp', 'lm75_49_temp', 'lm75_4a_temp'], + 'fan_alert':['_status_alert', '_wrongAirflow_alert', '_outerRPMOver_alert', '_outerRPMUnder_alert', + '_outerRPMZero_alert', '_innerRPMOver_alert', '_innerRPMUnder_alert', '_innerRPMZero_alert', '_notconnect_alert'], + 'vol_alert':['_under_alert', '_over_alert'], + 'temp_alert':['lm75_48_temp_alert', 'lm75_49_temp_alert', 'lm75_4a_temp_alert', 'sa56004x_Ltemp_alert', 'sa56004x_Rtemp_alert']} +SENSORS_TYPE = {'fan_rpm': ['Inner RPM', 'Outer RPM'], + 'fan_vol': ['P0.2', 'P0.6','P0.1', 'P1.5','P0.7', 'P1.6'], + 'temp':['lm75_48_temp', 'lm75_49_temp', 'lm75_4a_temp']} +def getSensors(): + string = '' + # Firmware version + val = common.readFile(SENSORS_PATH + 'mb_fw_version') + string = '\n' + "MB-SW Version: " + val + + val = common.readFile(SENSORS_PATH + 'fb_fw_version') + string += '\n' + "FB-SW Version: " + val + + # Fan + string += getFan() + + # HW Monitor + string += '\n' + getHWMonitor() + + # Voltage + string += '\n' + getVoltage() + + return string + +def getFan(): + string = '' + for i in range(0,FAN_NUM): + # Status + result = getFanStatus(i) + string += '\n\n' + "FAN " + str(i+1) + ": " + result + + if result == 'Disconnect': + continue + + # Alert + result = getFanAlert(i) + string += '\n' + " Status: " + result + + # Inner RPM + result = getFanInnerRPM(i) + string += '\n' + " Inner RPM: " + result.rjust(10) + " RPM" + + # Outer RPM + result = getFanOuterRPM(i) + string += '\n' + " Outer RPM: " + result.rjust(10) + " RPM" + + return string + +def getFanStatus(num): + val = common.readFile(SENSORS_PATH + 'fan' + str(num+1) + '_present') + if val != 'Error': + if int(val, 16) == 0: + result = 'Connect' + else: + result = 'Disconnect' + else: + result = val + return result + +def getFanAlert(num): + alert = 0 + alert_types = SENSORS_NODES['fan_alert'] + for alert_type in alert_types: + val = common.readFile(SENSORS_PATH + 'fan' + str(num+1) + alert_type) + if val != 'Error': + alert += int(val, 16) + else: + return val + + if alert > 0: + result = 'Warning' + else: + result = 'Normal' + + return result + +def getFanInnerRPM(num): + return common.readFile(SENSORS_PATH + 'fan' + str(num+1) + '_inner_rpm') + +def getFanOuterRPM(num): + return common.readFile(SENSORS_PATH + 'fan' + str(num+1) + '_outer_rpm') + +def getHWMonitor(): + string = '' + temp_type = SENSORS_TYPE['temp'] + for types in temp_type: + val = common.readFile(SENSORS_PATH + types) + val_alert = common.readFile(SENSORS_PATH + types + '_alert') + if val_alert != 'Error': + if int(val_alert, 16) == 1: + alert = 'Warning' + else: + alert = 'Normal' + else: + alert = val_alert + string += '\n' + types + ": " + val + " C" + " ( " + alert + " )" + + return string + +def getVoltage(): + string = '' + nodes = SENSORS_NODES['fan_vol'] + types = SENSORS_TYPE['fan_vol'] + for i in range(0,len(nodes)): + val = common.readFile(SENSORS_PATH + nodes[i]) + alert = getVoltageAlert(i) + string += '\n' + types[i] + ": " + val + " V ( " + alert + " )" + + return string + +def getVoltageAlert(num): + alert = 0 + nodes = SENSORS_NODES['vol_alert'] + for node in nodes: + val = common.readFile(SENSORS_PATH + 'ADC' + str(num+1) + node) + if val != 'Error': + alert += int(val, 16) + else: + return val + + if alert > 0: + result = 'Warning' + else: + result = 'Normal' + + return result + +DEVICE_INIT = {'led': [['set', 'sys_led', 'green'], ['set', 'pwr_led', 'green'], ['set', 'fan_led', 'green'], ['set', 'cpld_allled_ctrl', 'normal'], ['set', 'serial_led_enable', 'enable']]} +def deviceInit(): + # Set led + for i in range(0,len(DEVICE_INIT['led'])): + setGetLed(DEVICE_INIT['led'][i]) + + # Set tx disable + cpld_bus = DEVICE_BUS['cpld'] + for x in range(0, SFP_MAX_NUM): + if x < CPLDB_SFP_NUM: + bus = cpld_bus[1] + elif x < CPLDB_SFP_NUM + CPLDA_SFP_NUM: + bus = cpld_bus[0] + else: + bus = cpld_bus[2] + + path = common.I2C_PREFIX + bus + '/sfp' + str(x+1) + '_tx_disable' + result = common.writeFile(path, "0") + + # Set QSFP reset to normal + for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): + path = common.I2C_PREFIX + cpld_bus[2] + '/sfp' + str(x+1) + '_reset' + result = common.writeFile(path, "1") + + # Set QSFP I2C enable + for x in range(SFP_MAX_NUM, TOTAL_PORT_NUM): + path = common.I2C_PREFIX + cpld_bus[2] + '/sfp' + str(x+1) + '_modeseln' + result = common.writeFile(path, "0") + + return diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/main.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/main.py new file mode 100755 index 000000000000..3a1e46b11452 --- /dev/null +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/pegaProcess/main.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import common, device + +HOST = '' +SOCKET_LIST = [] +SOCKET_MAX_CLIENT = 10 +QUEUES = [] +THREADS = [] +FUNCS = {} + +class GlobalThread(common.threading.Thread): + def __init__(self,threadname, q): + common.threading.Thread.__init__(self,name = threadname) + self.queue = q + + def run(self): + while common.RUN: + message = self.queue.get() + self.onMessage(message) + def onMessage(self, message): + """ + Commands: + uninstall : Uninstall platform drivers + """ + if len(message.command) < 1: + result = self.onMessage.__doc__ + else: + if message.command[0] == 'uninstall': + common.RUN = False + doUninstall() + result = 'Success' + else: + result = self.onMessage.__doc__ + if (message.callback is not None): + message.callback(result) + +class messageObject(object): + def __init__(self, command, callback): + super(messageObject, self).__init__() + self.command = command + self.callback = callback + +def callback(sock, result): + sock.sendall(result) + +def messageHandler(): + server_socket = common.socket.socket(common.socket.AF_INET, common.socket.SOCK_STREAM) + server_socket.setsockopt(common.socket.SOL_SOCKET, common.socket.SO_REUSEADDR, 1) + server_socket.bind((HOST, common.SOCKET_PORT)) + server_socket.listen(SOCKET_MAX_CLIENT) + SOCKET_LIST.append(server_socket) + + while(common.RUN): + ready_to_read,ready_to_write,in_error = common.select.select(SOCKET_LIST,[],[],0) + for sock in ready_to_read: + if sock == server_socket: + sockfd, addr = server_socket.accept() + SOCKET_LIST.append(sockfd) + else: + try: + data = sock.recv(common.SOCKET_RECV_BUFFER) + if data: + cb = common.partial(callback, sock) + cmdlist = data.split() + + if cmdlist[0] not in common.CMD_TYPE: + callback(sock, 'Fail') + continue + + msg = messageObject(cmdlist[1:], cb) + FUNCS[cmdlist[0]].put(msg) + continue + else: + if sock in SOCKET_LIST: + SOCKET_LIST.remove(sock) + except: + raise + continue + common.time.sleep(0.2) + + server_socket.close() + +# Platform initialize +KERNEL_MODULE = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_porsche_cpld', 'pegatron_hwmon_mcu', 'pegatron_porsche_psu', 'pegatron_porsche_sfp'] +MODULE_ID = ['pca9544', 'pca9544', 'porsche_psu', 'porsche_psu', '24c02', 'pega_hwmon_mcu', 'porsche_cpld', 'porsche_cpld', 'porsche_cpld', 'porsche_sfpA', 'porsche_sfpB', 'porsche_sfpC'] +I2C_CHECK_NODE = ['i2c-0', 'i2c-1'] +UNINSTALL_CHECK_NODE = ['-0072', '-0073'] +DEVICE_NODE= ['i2c-2', 'i2c-6', 'i2c-2', 'i2c-3', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] +DEVICE_ADDRESS = ['0x72', '0x73', '0x58', '0x59', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] +def checkDevicePosition(num): + for i in range(0, len(I2C_CHECK_NODE)): + status, output = common.doBash("echo " + MODULE_ID[num] + " " + DEVICE_ADDRESS[num] + " > " + common.I2C_PREFIX + I2C_CHECK_NODE[i] + "/new_device") + status, output = common.doBash("ls " + common.I2C_PREFIX + DEVICE_NODE[num]) + DEVICE_NODE[num] = I2C_CHECK_NODE[i] + + if status: + status, output = common.doBash("echo " + DEVICE_ADDRESS[num] + " > " + common.I2C_PREFIX + I2C_CHECK_NODE[i] + "/delete_device") + else: + return + return + +def installDevice(): + for i in range(0, len(MODULE_ID)): + if MODULE_ID[i] == "pca9544": + checkDevicePosition(i) + else: + status, output = common.doBash("echo " + MODULE_ID[i] + " " + DEVICE_ADDRESS[i] + " > " + common.I2C_PREFIX + DEVICE_NODE[i] + "/new_device") + return + +def checkDriver(): + for i in range(0, len(KERNEL_MODULE)): + status, output = common.doBash("lsmod | grep " + KERNEL_MODULE[i]) + if status: + status, output = common.doBash("modprobe " + KERNEL_MODULE[i]) + return + +def doInstall(): + status, output = common.doBash("depmod -a") + checkDriver() + installDevice() + status, output = common.doBash("systemctl status flnet_s8930_54n-platform-main.service | grep inactive") + return + +def setupThreads(): + global THREADS, QUEUES + + # Queues + # Global + queueGlobal = common.Queue.Queue() + QUEUES.append(queueGlobal) + FUNCS['global'] = queueGlobal + + # Device + queueDevice = common.Queue.Queue() + QUEUES.append(queueDevice) + FUNCS['device'] = queueDevice + + # Threads + # Global + threadGlobal = GlobalThread('Global Handler', queueGlobal) + THREADS.append(threadGlobal) + + # Device + threadDevice = device.DeviceThread('Device Handler', queueDevice) + THREADS.append(threadDevice) + + # Check platform status + threadPlatformStatus = device.PlatformStatusThread('Platform Status Handler', 0.3) + THREADS.append(threadPlatformStatus) +def functionInit(): + setupThreads() + for thread in THREADS: + thread.start() + return + +def deviceInit(): + msg = messageObject(['init'], None) + FUNCS['device'].put(msg) + return + +# Platform uninitialize +def doUninstall(): + for i in range(0, len(KERNEL_MODULE)): + status, output = common.doBash("modprobe -rq " + KERNEL_MODULE[i]) + for i in range(0, len(MODULE_ID)): + if MODULE_ID[i] == "pca9544": + for node in range(0, len(I2C_CHECK_NODE)): + status, output = common.doBash("ls " + common.I2C_PREFIX + str(node) + UNINSTALL_CHECK_NODE[i]) + if not status: + status, output = common.doBash("echo " + DEVICE_ADDRESS[i] + " > " + common.I2C_PREFIX + I2C_CHECK_NODE[node] + "/delete_device") + else: + status, output = common.doBash("echo " + DEVICE_ADDRESS[i] + " > " + common.I2C_PREFIX + DEVICE_NODE[i] + "/delete_device") + return + +def main(): + args = common.sys.argv[1:] + + if len(args[0:]) < 1: + common.sys.exit(0) + + if args[0] == 'install': + common.RUN = True + doInstall() + functionInit() + deviceInit() + messageHandler() + + common.sys.exit(0) + +if __name__ == "__main__": + main() diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/scripts/sensors b/platform/nephos/sonic-platform-modules-pegatron/porsche/scripts/sensors index 7f9426a0c5ec..821c1ff9acba 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/scripts/sensors +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/scripts/sensors @@ -2,6 +2,4 @@ docker exec -i pmon sensors "$@" #To probe sensors not part of lm-sensors -if [ -r /usr/local/bin/porsche_sensors.py ]; then - python /usr/local/bin/porsche_sensors.py get_sensors -fi +pegatron_porsche_util.py cmd device sensors diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/service/porsche-platform-init.service b/platform/nephos/sonic-platform-modules-pegatron/porsche/service/porsche-platform-init.service index 8e6f4344715f..96a1ff5eebcf 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/service/porsche-platform-init.service +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/service/porsche-platform-init.service @@ -4,8 +4,8 @@ After=local-fs.target DefaultDependencies=no [Service] -Type=oneshot -ExecStart=/usr/local/bin/pegatron_porsche_util.py install +Type=simple +ExecStart=/usr/local/bin/pegaProcess/main.py install ExecStop=/usr/local/bin/pegatron_porsche_util.py uninstall RemainAfterExit=yes diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py index d590dbbcbe32..01cb993ae7ce 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/pegatron_porsche_util.py @@ -15,213 +15,44 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import sys, getopt -import logging -import os -import commands -import threading +from pegaProcess import common -DEBUG = False - -SFP_MAX_NUM = 48 -CPLDA_SFP_NUM = 24 -CPLDB_SFP_NUM = 12 -CPLDC_SFP_NUM = 18 - -kernel_module = ['i2c_dev', 'i2c-mux-pca954x force_deselect_on_exit=1', 'at24', 'pegatron_porsche_cpld', 'pegatron_hwmon_mcu', 'pegatron_porsche_sfp'] -moduleID = ['pca9544', 'pca9544', '24c02', 'porsche_hwmon_mcu', 'porsche_cpld', 'porsche_cpld', 'porsche_cpld', 'porsche_sfpA', 'porsche_sfpB', 'porsche_sfpC'] -i2c_check_node = ['i2c-0', 'i2c-1'] -uninstall_check_node = ['-0072', '-0073'] -device_address = ['0x72', '0x73', '0x54', '0x70', '0x74', '0x75', '0x76', '0x50', '0x50', '0x50'] -device_node= ['i2c-2', 'i2c-6', 'i2c-4', 'i2c-5', 'i2c-6', 'i2c-7', 'i2c-8', 'i2c-6', 'i2c-7', 'i2c-8'] - -i2c_prefix = '/sys/bus/i2c/devices/' -cpld_bus = ['6-0074', '7-0075', '8-0076'] -led_nodes = ['sys_led', 'pwr_led', 'loc_led', 'fan_led', "cpld_allled_ctrl", "serial_led_enable"] - -def dbg_print(string): - if DEBUG == True: - print string - return - -def do_cmd(cmd, show): - logging.info('Run :' + cmd) - status, output = commands.getstatusoutput(cmd) - dbg_print(cmd + "with result:" + str(status)) - dbg_print("output:" + output) - if status: - logging.info('Failed :' + cmd) - if show: - print('Failed :' + cmd) - return status, output - -def install_driver(): - status, output = do_cmd("depmod -a", 1) - - for i in range(0, len(kernel_module)): - status, output = do_cmd("modprobe " + kernel_module[i], 1) - if status: - return status - - return - -def check_device_position(num): - for i in range(0, len(i2c_check_node)): - status, output = do_cmd("echo " + moduleID[num] + " " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/new_device", 0) - status, output = do_cmd("ls " + i2c_prefix + device_node[num], 0) - device_node[num] = i2c_check_node[i] - - if status: - status, output = do_cmd("echo " + device_address[num] + " > " + i2c_prefix + i2c_check_node[i] + "/delete_device", 0) - else: - return - - return - -def install_device(): - for i in range(0, len(moduleID)): - if moduleID[i] == "pca9544": - check_device_position(i) - else: - status, output = do_cmd("echo " + moduleID[i] + " " + device_address[i] + " > " + i2c_prefix + device_node[i] + "/new_device", 1) - - return - -def check_driver(): - for i in range(0, len(kernel_module)): - status, output = do_cmd("lsmod | grep " + kernel_module[i], 0) - if status: - status, output = do_cmd("modprobe " + kernel_module[i], 1) - - return - -def do_install(): - status, output = do_cmd("depmod -a", 1) - - check_driver() - install_device() - - return - -def do_uninstall(): - for i in range(0, len(kernel_module)): - status, output = do_cmd("modprobe -rq " + kernel_module[i], 0) - - for i in range(0, len(moduleID)): - if moduleID[i] == "pca9544": - for node in range(0, len(i2c_check_node)): - status, output = do_cmd("ls " + i2c_prefix + str(node) + uninstall_check_node[i], 0) - if not status: - status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + i2c_check_node[node] + "/delete_device", 0) - - else: - status, output = do_cmd("echo " + device_address[i] + " > " + i2c_prefix + device_node[i] + "/delete_device", 0) - - return - -led_command = {'sys_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'pwr_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'loc_led': {'on':'0', 'off':'1', 'blink':'2'}, - 'fan_led': {'green':'0', 'amber':'1', 'off':'2', 'blink_green':'3', 'blink_amber':'4'}, - 'cpld_allled_ctrl': {'off':'0', 'mix':'1', 'amber':'2', 'normal':'3'}, - 'serial_led_enable': {'disable':'0', 'enable':'1'}} - -def set_led(args): +#message handler +def doCommand(cmd): """ - Usage: %(scriptName)s set led object command - - object: - sys_led : set SYS led [command: off|green|amber|blink_green|blink_amber] - pwr_led : set PWR led [command: off|green|amber|blink_green|blink_amber] - loc_led : set LOCATOR led [command: off|on|blink] - fan_led : set FAN led [command: off|green|amber|blink_green|blink_amber] - """ - if args[0] not in led_command: - print set_led.__doc__ - sys.exit(0) - - for i in range(0,len(led_nodes)): - if args[0] == led_nodes[i]: - node = i2c_prefix + cpld_bus[1] + '/'+ led_nodes[i] - - command = led_command[args[0]] - data = command[args[1]] - - status, output = do_cmd("echo "+ str(data) + " > "+ node, 1) - - return - -def set_device(args): - """ - Usage: %(scriptName)s command object - - command: - led : set status led sys_led|pwr_led|loc_led|mst_led|fan_led|digit_led + Command: + global : Set global config + device : Set device config """ - - if args[0] == 'led': - set_led(args[1:]) + if len(cmd[0:]) < 1 or cmd[0] not in common.CMD_TYPE: + print doCommand.__doc__ return - else: - print set_device.__doc__ - - return - -device_init = {'led': [['led', 'sys_led', 'green'], ['led', 'pwr_led', 'green'], ['led', 'fan_led', 'green'], ['led', 'cpld_allled_ctrl', 'normal'], ['led', 'serial_led_enable', 'enable']]} - -def pega_init(): - #set led - for i in range(0,len(device_init['led'])): - set_device(device_init['led'][i]) - - #set tx_disable - for x in range(0, SFP_MAX_NUM-1): - if x < CPLDB_SFP_NUM: - bus = cpld_bus[1] - elif x < CPLDB_SFP_NUM + CPLDA_SFP_NUM: - bus = cpld_bus[0] - else: - bus = cpld_bus[2] - - nodes = i2c_prefix + bus + '/sfp' + str(x+1) + '_tx_disable' - dbg_print("SFP_TX_DISABLE NODES: " + nodes) - status, output = do_cmd("echo 0 > "+ nodes, 1) - + msg = ' '.join(str(data) for data in cmd) + result = common.doSend(msg, common.SOCKET_PORT) + print result + return def main(): """ - Usage: %(scriptName)s command object - - command: - install : install drivers and generate related sysfs nodes - uninstall : uninstall drivers and remove related sysfs nodes - set : change board setting [led] - debug : debug info [on/off] - """ - - if len(sys.argv)<2: - print main.__doc__ - - for arg in sys.argv[1:]: - if arg == 'install': - do_install() - pega_init() - elif arg == 'uninstall': - do_uninstall() - elif arg == 'set': - if len(sys.argv[2:])<1: - print main.__doc__ - else: - set_device(sys.argv[2:]) - return - elif arg == 'debug': - if sys.argv[2] == 'on': - DEBUG = True - else: - DEBUG = False - else: - print main.__doc__ + Command: + install : Install drivers + uninstall : Uninstall drivers + cmd : Commands + """ + args = common.sys.argv[1:] + + if len(args[0:]) < 1: + print main.__doc__ + return + + if args[0] == 'uninstall': + doCommand(['global', 'uninstall']) + elif args[0] == 'cmd': + doCommand(args[1:]) + else: + print main.__doc__ + common.sys.exit(0) if __name__ == "__main__": main() diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/porsche_sensors.py b/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/porsche_sensors.py deleted file mode 100755 index 40e23ef01b7e..000000000000 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/utils/porsche_sensors.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/python - -import os -import sys -import logging - -FAN_NUM = 5 -sensors_path = '/sys/bus/i2c/devices/5-0070/' -sensors_nodes = {'fan_rpm': ['_inner_rpm', '_outer_rpm'], - 'fan_vol': ['ADC8_vol', 'ADC7_vol','ADC6_vol', 'ADC5_vol','ADC4_vol', 'ADC3_vol'], - 'temp':['lm75_49_temp', 'lm75_48_temp', 'SA56004_local_temp','SA56004_remote_temp']} -sensors_type = {'fan_rpm': ['Inner RPM', 'Outer RPM'], - 'fan_vol': ['P0.2', 'P0.6','P0.1', 'P1.5','P0.7', 'P1.6'], - 'temp':['lm75_49_temp', 'lm75_48_temp', 'SA56004_local_temp','SA56004_remote_temp']} - -# Get sysfs attribute -def get_attr_value(attr_path): - retval = 'ERR' - if (not os.path.isfile(attr_path)): - return retval - - try: - with open(attr_path, 'r') as fd: - retval = fd.read() - except Exception as error: - logging.error("Unable to open ", attr_path, " file !") - - retval = retval.rstrip('\r\n') - fd.close() - return retval - -def get_fan_status(number): - attr_value = get_attr_value(sensors_path + "fan" + str(number+1) + "_present") - if (attr_value != 'ERR'): - attr_value = int(attr_value, 16) - - if(attr_value == 0): - string = "Connect" - else: - string = "Disconnect" - return string - -def get_fan_alert(number): - attr_value = get_attr_value(sensors_path + "fan" + str(number+1) + "_status_alert") - if (attr_value != 'ERR'): - attr_value = int(attr_value, 16) - - if(attr_value == 0): - string = "Normal" - else: - string = "Abnormal" - return string - -def get_fan_inner_rpm(number): - return get_attr_value(sensors_path + "fan" + str(number+1) + "_inner_rpm") - -def get_fan_outer_rpm(number): - return get_attr_value(sensors_path + "fan" + str(number+1) + "_outer_rpm") - -def get_fan(): - for i in range(0,FAN_NUM): - print " " - #status - string = get_fan_status(i) - print "FAN " + str(i+1) + ":" + ' ' + string - if string=='Disconnect': - continue - - #alert - string = get_fan_alert(i) - print " Status:"+ ' ' + string - - #inner rpm - string = get_fan_inner_rpm(i) - print " Inner RPM:"+ string.rjust(10) + ' RPM' - - #outer rpm - string = get_fan_outer_rpm(i) - print " Outer RPM:"+ string.rjust(10) + ' RPM' - - return - -def get_hwmon(): - print " " - string = get_attr_value(sensors_path + "lm75_48_temp") - print "Sensor A: " + string + " C" - - string = get_attr_value(sensors_path + "lm75_49_temp") - print "Sensor B: " + string + " C" - - return - -def get_voltage(): - print " " - nodes = sensors_nodes['fan_vol'] - types = sensors_type['fan_vol'] - for i in range(0,len(nodes)): - string = get_attr_value(sensors_path + nodes[i]) - print types[i] + ': ' + string + " V" - - return - -def init_fan(): - return - -def main(): - """ - Usage: %(scriptName)s command object - - command: - install : install drivers and generate related sysfs nodes - clean : uninstall drivers and remove related sysfs nodes - show : show all systen status - set : change board setting with fan|led|sfp - """ - - if len(sys.argv)<2: - print main.__doc__ - - for arg in sys.argv[1:]: - if arg == 'fan_init': - init_fan() - elif arg == 'get_sensors': - ver = get_attr_value(sensors_path + "fb_hw_version") - print 'HW Version: ' + ver - ver = get_attr_value(sensors_path + "fb_fw_version") - print 'SW Version: ' + ver - get_fan() - get_hwmon() - get_voltage() - elif arg == 'fan_set': - if len(sys.argv[1:])<1: - print main.__doc__ - else: - set_fan(sys.argv[1:]) - return - else: - print main.__doc__ - -if __name__ == "__main__": - main() From c2014a73cb5055518ed0d3032a9723f5f524e965 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Tue, 13 Aug 2019 16:10:15 +0800 Subject: [PATCH 18/20] remove unuse command --- .../fn-6254-dn-f/pegaProcess/main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/main.py b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/main.py index 2074e533e0d4..1a24150004bf 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/main.py +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/pegaProcess/main.py @@ -132,7 +132,6 @@ def doInstall(): status, output = common.doBash("depmod -a") checkDriver() installDevice() - status, output = common.doBash("systemctl status flnet_s8930_54n-platform-main.service | grep inactive") return def setupThreads(): From 0857ab700a9c591992b579da60657596da171997 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Wed, 21 Aug 2019 13:53:49 +0800 Subject: [PATCH 19/20] change sfp eeprom driver from at24 to optoe --- .../modules/pegatron_fn_6254_dn_f_sfp.c | 768 +++++++++++++++--- 1 file changed, 639 insertions(+), 129 deletions(-) diff --git a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_sfp.c b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_sfp.c index c9a5d576ce71..f35b420b513d 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_sfp.c +++ b/platform/nephos/sonic-platform-modules-pegatron/fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_sfp.c @@ -1,23 +1,119 @@ /* - * A SFP driver for the fn_6254_dn_f platform + * pegatron_fn_6254_dn_f_sfp.c - A driver to read and write the EEPROM on optical transceivers + * (SFP and QSFP) * - * Copyright (C) 2018 Pegatron Corporation. - * Peter5_Lin + * Copyright (C) 2014 Cumulus networks Inc. + * Copyright (C) 2017 Finisar Corp. + * Copyright (C) 2019 Pegatron Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or + * the Freeoftware Foundation; either version 2 of the License, or * (at your option) any later version. + */ + +/* + * Description: + * a) Optical transceiver EEPROM read/write transactions are just like + * the at24 eeproms managed by the at24.c i2c driver + * b) The register/memory layout is up to 256 128 byte pages defined by + * a "pages valid" register and switched via a "page select" + * register as explained in below diagram. + * c) 256 bytes are mapped at a time. 'Lower page 00h' is the first 128 + * bytes of address space, and always references the same + * location, independent of the page select register. + * All mapped pages are mapped into the upper 128 bytes + * (offset 128-255) of the i2c address. + * d) Devices with one I2C address (eg QSFP) use I2C address 0x50 + * (A0h in the spec), and map all pages in the upper 128 bytes + * of that address. + * e) Devices with two I2C addresses (eg SFP) have 256 bytes of data + * at I2C address 0x50, and 256 bytes of data at I2C address + * 0x51 (A2h in the spec). Page selection and paged access + * only apply to this second I2C address (0x51). + * e) The address space is presented, by the driver, as a linear + * address space. For devices with one I2C client at address + * 0x50 (eg QSFP), offset 0-127 are in the lower + * half of address 50/A0h/client[0]. Offset 128-255 are in + * page 0, 256-383 are page 1, etc. More generally, offset + * 'n' resides in page (n/128)-1. ('page -1' is the lower + * half, offset 0-127). + * f) For devices with two I2C clients at address 0x50 and 0x51 (eg SFP), + * the address space places offset 0-127 in the lower + * half of 50/A0/client[0], offset 128-255 in the upper + * half. Offset 256-383 is in the lower half of 51/A2/client[1]. + * Offset 384-511 is in page 0, in the upper half of 51/A2/... + * Offset 512-639 is in page 1, in the upper half of 51/A2/... + * Offset 'n' is in page (n/128)-3 (for n > 383) * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * One I2c addressed (eg QSFP) Memory Map * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ + * 2-Wire Serial Address: 1010000x + * + * Lower Page 00h (128 bytes) + * ===================== + * | | + * | | + * | | + * | | + * | | + * | | + * | | + * | | + * | | + * | | + * |Page Select Byte(127)| + * ===================== + * | + * | + * | + * | + * V + * ------------------------------------------------------------ + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * V V V V + * ------------ -------------- --------------- -------------- + * | | | | | | | | + * | Upper | | Upper | | Upper | | Upper | + * | Page 00h | | Page 01h | | Page 02h | | Page 03h | + * | | | (Optional) | | (Optional) | | (Optional | + * | | | | | | | for Cable | + * | | | | | | | Assemblies) | + * | ID | | AST | | User | | | + * | Fields | | Table | | EEPROM Data | | | + * | | | | | | | | + * | | | | | | | | + * | | | | | | | | + * ------------ -------------- --------------- -------------- + * + * The SFF 8436 (QSFP) spec only defines the 4 pages described above. + * In anticipation of future applications and devices, this driver + * supports access to the full architected range, 256 pages. + * + * The CMIS (Common Management Interface Specification) defines use of + * considerably more pages (at least to page 0xAF), which this driver + * supports. + * + * NOTE: This version of the driver ONLY SUPPORTS BANK 0 PAGES on CMIS + * devices. + * + **/ + +#undef PEGA_DEBUG +/*#define pega_DEBUG*/ +#ifdef PEGA_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ #include #include @@ -26,28 +122,63 @@ #include #include #include -#include -#include -#include #include -#include #include +#include -#undef PEGA_DEBUG -/*#define PEGA_DEBUG*/ -#ifdef PEGA_DEBUG -#define DBG(x) x -#else -#define DBG(x) -#endif /* DEBUG */ +#define NUM_ADDRESS 2 + +/* The maximum length of a port name */ +#define MAX_PORT_NAME_LEN 20 + +/* fundamental unit of addressing for EEPROM */ +#define SFP_PAGE_SIZE 128 -#define SFP_EEPROM_SIZE 256 +/* + * Single address devices (eg QSFP) have 256 pages, plus the unpaged + * low 128 bytes. If the device does not support paging, it is + * only 2 'pages' long. + */ +#define SFP_ARCH_PAGES 256 +#define ONE_ADDR_EEPROM_SIZE ((1 + SFP_ARCH_PAGES) * SFP_PAGE_SIZE) +#define ONE_ADDR_EEPROM_UNPAGED_SIZE (2 * SFP_PAGE_SIZE) + +/* + * Dual address devices (eg SFP) have 256 pages, plus the unpaged + * low 128 bytes, plus 256 bytes at 0x50. If the device does not + * support paging, it is 4 'pages' long. + */ +#define TWO_ADDR_EEPROM_SIZE ((3 + SFP_ARCH_PAGES) * SFP_PAGE_SIZE) +#define TWO_ADDR_EEPROM_UNPAGED_SIZE (4 * SFP_PAGE_SIZE) +#define TWO_ADDR_NO_0X51_SIZE (2 * SFP_PAGE_SIZE) + +/* + * flags to distinguish one-address (QSFP family) from two-address (SFP family) + * If the family is not known, figure it out when the device is accessed + */ +#define ONE_ADDR 1 +#define TWO_ADDR 2 +#define CMIS_ADDR 3 + +/* a few constants to find our way around the EEPROM */\ #define SFP_EEPROM_A0_ADDR 0x50 #define SFP_EEPROM_A2_ADDR 0x51 -#define SFP_EEPROM_BUS_TYPE I2C_SMBUS_I2C_BLOCK_DATA +#define SFP_PAGE_SELECT_REG 0x7F +#define ONE_ADDR_PAGEABLE_REG 0x02 +#define QSFP_NOT_PAGEABLE (1<<2) +#define CMIS_NOT_PAGEABLE (1<<7) +#define TWO_ADDR_PAGEABLE_REG 0x40 +#define TWO_ADDR_PAGEABLE (1<<4) +#define TWO_ADDR_0X51_REG 92 +#define TWO_ADDR_0X51_SUPP (1<<6) +#define SFP_ID_REG 0 +#define SFP_READ_OP 0 +#define SFP_WRITE_OP 1 +#define SFP_EOF 0 /* used for access beyond end of device */ #define CPLDA_SFP_NUM 24 #define CPLDB_SFP_NUM 12 #define CPLDC_SFP_NUM 18 +#define MAX_PORT_NUM CPLDA_SFP_NUM #define CPLDA_ADDRESS 0x74 #define CPLDB_ADDRESS 0x75 #define CPLDC_ADDRESS 0x76 @@ -61,10 +192,31 @@ enum cpld_croups { cpld_group_a, cpld_group_b, cpld_group_c}; -static const unsigned short normal_i2c[] = { SFP_EEPROM_A0_ADDR, SFP_EEPROM_A2_ADDR, I2C_CLIENT_END }; -static char SFP_CPLD_GROUPA_MAPPING[CPLDA_SFP_NUM][16]={0}; -static char SFP_CPLD_GROUPB_MAPPING[CPLDB_SFP_NUM][16]={0}; -static char SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; +struct fn_6254_dn_f_sfp_platform_data { + u32 byte_len; /* size (sum of all addr) */ + u16 page_size; /* for writes */ + u8 flags; + void *dummy1; /* backward compatibility */ + void *dummy2; /* backward compatibility */ + + /* dev_class: ONE_ADDR (QSFP) or TWO_ADDR (SFP) */ + int dev_class; + unsigned int write_max; +}; + +struct fn_6254_dn_f_sfp_data { + struct fn_6254_dn_f_sfp_platform_data chip[MAX_PORT_NUM]; + + /* + * Lock protects against activities from other Linux tasks, + * but not from changes by other I2C masters. + */ + struct mutex lock; + struct bin_attribute bin; + kernel_ulong_t driver_data; + + struct i2c_client *client[]; +}; /* * This parameter is to help this driver avoid blocking other drivers out @@ -75,48 +227,91 @@ static char SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; * * This value is forced to be a power of two so that writes align on pages. */ -static unsigned io_limit = 128; -module_param(io_limit, uint, 0); -MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)"); +static unsigned int io_limit = SFP_PAGE_SIZE; /* - * Specs often allow 5 msec for a page write, sometimes 20 msec; + * specs often allow 5 msec for a page write, sometimes 20 msec; * it's important to recover from write timeouts. */ -static unsigned write_timeout = 25; -module_param(write_timeout, uint, 0); -MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)"); - - -struct fn_6254_dn_f_sfp_data { - struct mutex lock; - struct bin_attribute bin; - int use_smbus; - kernel_ulong_t driver_data; +static unsigned int write_timeout = 25; - struct i2c_client *client; -}; +static char SFP_CPLD_GROUPA_MAPPING[CPLDA_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPB_MAPPING[CPLDB_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; extern int pegatron_fn_6254_dn_f_cpld_read(unsigned short cpld_addr, u8 reg); extern int pegatron_fn_6254_dn_f_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -static ssize_t fn_6254_dn_f_sfp_eeprom_read(struct fn_6254_dn_f_sfp_data *data, char *buf, - unsigned offset, size_t count) +/*-------------------------------------------------------------------------*/ +/* + * This routine computes the addressing information to be used for + * a given r/w request. + * + * Task is to calculate the client (0 = i2c addr 50, 1 = i2c addr 51), + * the page, and the offset. + * + * Handles both single address (eg QSFP) and two address (eg SFP). + * For SFP, offset 0-255 are on client[0], >255 is on client[1] + * Offset 256-383 are on the lower half of client[1] + * Pages are accessible on the upper half of client[1]. + * Offset >383 are in 128 byte pages mapped into the upper half + * + * For QSFP, all offsets are on client[0] + * offset 0-127 are on the lower half of client[0] (no paging) + * Pages are accessible on the upper half of client[1]. + * Offset >127 are in 128 byte pages mapped into the upper half + * + * Callers must not read/write beyond the end of a client or a page + * without recomputing the client/page. Hence offset (within page) + * plus length must be less than or equal to 128. (Note that this + * routine does not have access to the length of the call, hence + * cannot do the validity check.) + * + * Offset within Lower Page 00h and Upper Page 00h are not recomputed + */ + +static uint8_t fn_6254_dn_f_sfp_translate_offset(struct fn_6254_dn_f_sfp_data *sfp, + loff_t *offset, struct i2c_client **client, int num) { - struct i2c_msg msg[2]; - u8 msgbuf[2]; - struct i2c_client *client = data->client; - unsigned long timeout, read_time; - int status; + unsigned int page = 0; + + *client = sfp->client[0]; - memset(msg, 0, sizeof(msg)); + /* if SFP style, offset > 255, shift to i2c addr 0x51 */ + if (sfp->chip[num].dev_class == TWO_ADDR) { + if (*offset > 255) { + /* like QSFP, but shifted to client[1] */ + *client = sfp->client[1]; + *offset -= 256; + } + } + + /* + * if offset is in the range 0-128... + * page doesn't matter (using lower half), return 0. + * offset is already correct (don't add 128 to get to paged area) + */ + if (*offset < SFP_PAGE_SIZE) + return page; + + /* note, page will always be positive since *offset >= 128 */ + page = (*offset >> 7)-1; + /* 0x80 places the offset in the top half, offset is last 7 bits */ + *offset = SFP_PAGE_SIZE + (*offset & 0x7f); - if (count > io_limit) - count = io_limit; + return page; /* note also returning clifn_6254_dn_f_sfp_translate_offsetent and offset */ +} + +static ssize_t fn_6254_dn_f_sfp_eeprom_read(struct fn_6254_dn_f_sfp_data *sfp, + struct i2c_client *client, + char *buf, unsigned int offset, size_t count) +{ + unsigned long timeout, read_time; + int status, i; - /* Smaller eeproms can work given some SMBus extension calls */ - if (count > I2C_SMBUS_BLOCK_MAX) - count = I2C_SMBUS_BLOCK_MAX; + /*smaller eeproms can work given some SMBus extension calls */ + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; /* * Reads fail if the previous write didn't complete yet. We may @@ -126,75 +321,323 @@ static ssize_t fn_6254_dn_f_sfp_eeprom_read(struct fn_6254_dn_f_sfp_data *data, timeout = jiffies + msecs_to_jiffies(write_timeout); do { read_time = jiffies; - switch (data->use_smbus) { - case I2C_SMBUS_I2C_BLOCK_DATA: - status = i2c_smbus_read_i2c_block_data(client, offset, - count, buf); - break; - case I2C_SMBUS_WORD_DATA: - status = i2c_smbus_read_word_data(client, offset); - if (status >= 0) { - buf[0] = status & 0xff; - if (count == 2) - buf[1] = status >> 8; - status = count; - } - break; - case I2C_SMBUS_BYTE_DATA: - status = i2c_smbus_read_byte_data(client, offset); - if (status >= 0) { - buf[0] = status; - status = count; - } - break; - default: - status = i2c_transfer(client->adapter, msg, 2); - if (status == 2) - status = count; - } - dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", + + status = i2c_smbus_read_i2c_block_data(client, offset, count, buf); + + dev_dbg(&client->dev, "eeprom read %zu@%d --> %d (%ld)\n", count, offset, status, jiffies); - if (status == count) + if (status == count) /* happy path */ return count; + if (status == -ENXIO) /* no module present */ + return status; + /* REVISIT: at HZ=100, this is sloooow */ - msleep(1); + usleep_range(1000, 2000); } while (time_before(read_time, timeout)); return -ETIMEDOUT; } -static ssize_t fn_6254_dn_f_sfp_read(struct fn_6254_dn_f_sfp_data *data, - char *buf, loff_t off, size_t count) +static ssize_t fn_6254_dn_f_sfp_eeprom_write(struct fn_6254_dn_f_sfp_data *sfp, + struct i2c_client *client, + const char *buf, + unsigned int offset, size_t count) { - ssize_t retval = 0; + ssize_t status; + unsigned long timeout, write_time; + unsigned int next_page_start; + int i = 0; + + /* write max is at most a page + * (In this driver, write_max is actually one byte!) + */ + if (count > sfp->chip[i].write_max) + count = sfp->chip[i].write_max; + + /* shorten count if necessary to avoid crossing page boundary */ + next_page_start = roundup(offset + 1, SFP_PAGE_SIZE); + if (offset + count > next_page_start) + count = next_page_start - offset; - if (unlikely(!count)) - return count; + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; /* - * Read data from chip, protecting against concurrent updates - * from this host, but not from other I2C masters. + * Reads fail if the previous write didn't complete yet. We may + * loop a few times until this one succeeds, waiting at least + * long enough for one entire page write to work. */ - mutex_lock(&data->lock); + timeout = jiffies + msecs_to_jiffies(write_timeout); + do { + write_time = jiffies; + + status = i2c_smbus_write_i2c_block_data(client, + offset, count, buf); + if (status == 0) + return count; + + /* REVISIT: at HZ=100, this is sloooow */ + usleep_range(1000, 2000); + } while (time_before(write_time, timeout)); + + return -ETIMEDOUT; +} + +static ssize_t fn_6254_dn_f_sfp_eeprom_update_client(struct fn_6254_dn_f_sfp_data *sfp, + char *buf, loff_t off, size_t count, int num) +{ + struct i2c_client *client; + ssize_t retval = 0; + uint8_t page = 0; + loff_t phy_offset = off; + int ret = 0; + + page = fn_6254_dn_f_sfp_translate_offset(sfp, &phy_offset, &client, num); + + dev_dbg(&client->dev, + "%s off %lld page:%d phy_offset:%lld, count:%ld\n", + __func__, off, page, phy_offset, (long int) count); + if (page > 0) { + ret = fn_6254_dn_f_sfp_eeprom_write(sfp, client, &page, + SFP_PAGE_SELECT_REG, 1); + if (ret < 0) { + dev_dbg(&client->dev, + "Write page register for page %d failed ret:%d!\n", + page, ret); + return ret; + } + } while (count) { ssize_t status; - status = fn_6254_dn_f_sfp_eeprom_read(data, buf, off, count); + status = fn_6254_dn_f_sfp_eeprom_read(sfp, client, + buf, phy_offset, count); + if (status <= 0) { if (retval == 0) retval = status; break; } buf += status; - off += status; + phy_offset += status; count -= status; retval += status; } - mutex_unlock(&data->lock); + + if (page > 0) { + /* return the page register to page 0 (why?) */ + page = 0; + ret = fn_6254_dn_f_sfp_eeprom_write(sfp, client, &page, + SFP_PAGE_SELECT_REG, 1); + if (ret < 0) { + dev_err(&client->dev, + "Restore page register to 0 failed:%d!\n", ret); + /* error only if nothing has been transferred */ + if (retval == 0) + retval = ret; + } + } + return retval; +} + +/* + * Figure out if this access is within the range of supported pages. + * Note this is called on every access because we don't know if the + * module has been replaced since the last call. + * If/when modules support more pages, this is the routine to update + * to validate and allow access to additional pages. + * + * Returns updated len for this access: + * - entire access is legal, original len is returned. + * - access begins legal but is too long, len is truncated to fit. + * - initial offset exceeds supported pages, return OPTOE_EOF (zero) + */ +static ssize_t fn_6254_dn_f_sfp_page_legal(struct fn_6254_dn_f_sfp_data *sfp, + loff_t off, size_t len, int num) +{ + struct i2c_client *client = sfp->client[0]; + u8 regval; + int not_pageable; + int status; + size_t maxlen; + + if (off < 0) + return -EINVAL; + + if (sfp->chip[num].dev_class == TWO_ADDR) { + /* SFP case */ + /* if only using addr 0x50 (first 256 bytes) we're good */ + if ((off + len) <= TWO_ADDR_NO_0X51_SIZE) + return len; + /* if offset exceeds possible pages, we're not good */ + if (off >= TWO_ADDR_EEPROM_SIZE) + return SFP_EOF; + /* in between, are pages supported? */ + status = fn_6254_dn_f_sfp_eeprom_read(sfp, client, ®val, + TWO_ADDR_PAGEABLE_REG, 1); + if (status < 0) + return status; /* error out (no module?) */ + if (regval & TWO_ADDR_PAGEABLE) { + /* Pages supported, trim len to the end of pages */ + maxlen = TWO_ADDR_EEPROM_SIZE - off; + } else { + /* pages not supported, trim len to unpaged size */ + if (off >= TWO_ADDR_EEPROM_UNPAGED_SIZE) + return SFP_EOF; + + /* will be accessing addr 0x51, is that supported? */ + /* byte 92, bit 6 implies DDM support, 0x51 support */ + status = fn_6254_dn_f_sfp_eeprom_read(sfp, client, ®val, + TWO_ADDR_0X51_REG, 1); + if (status < 0) + return status; + if (regval & TWO_ADDR_0X51_SUPP) { + /* addr 0x51 is OK */ + maxlen = TWO_ADDR_EEPROM_UNPAGED_SIZE - off; + } else { + /* addr 0x51 NOT supported, trim to 256 max */ + if (off >= TWO_ADDR_NO_0X51_SIZE) + return SFP_EOF; + maxlen = TWO_ADDR_NO_0X51_SIZE - off; + } + } + len = (len > maxlen) ? maxlen : len; + dev_dbg(&client->dev, + "page_legal, SFP, off %lld len %ld\n", + off, (long int) len); + } else { + /* QSFP case, CMIS case */ + /* if no pages needed, we're good */ + if ((off + len) <= ONE_ADDR_EEPROM_UNPAGED_SIZE) + return len; + /* if offset exceeds possible pages, we're not good */ + if (off >= ONE_ADDR_EEPROM_SIZE) + return SFP_EOF; + /* in between, are pages supported? */ + status = fn_6254_dn_f_sfp_eeprom_read(sfp, client, ®val, + ONE_ADDR_PAGEABLE_REG, 1); + if (status < 0) + return status; /* error out (no module?) */ + + if (sfp->chip[num].dev_class == ONE_ADDR) { + not_pageable = QSFP_NOT_PAGEABLE; + } else { + not_pageable = CMIS_NOT_PAGEABLE; + } + dev_dbg(&client->dev, + "Paging Register: 0x%x; not_pageable mask: 0x%x\n", + regval, not_pageable); + + if (regval & not_pageable) { + /* pages not supported, trim len to unpaged size */ + if (off >= ONE_ADDR_EEPROM_UNPAGED_SIZE) + return SFP_EOF; + maxlen = ONE_ADDR_EEPROM_UNPAGED_SIZE - off; + } else { + /* Pages supported, trim len to the end of pages */ + maxlen = ONE_ADDR_EEPROM_SIZE - off; + } + len = (len > maxlen) ? maxlen : len; + dev_dbg(&client->dev, + "page_legal, QSFP, off %lld len %ld\n", + off, (long int) len); + } + return len; +} + +static ssize_t fn_6254_dn_f_sfp_read(struct fn_6254_dn_f_sfp_data *sfp, + char *buf, loff_t off, size_t len, int num) +{ + int chunk; + int status = 0; + ssize_t retval; + size_t pending_len = 0, chunk_len = 0; + loff_t chunk_offset = 0, chunk_start_offset = 0; + loff_t chunk_end_offset = 0; + + if (unlikely(!len)) + return len; + + /* + * Read data from chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&sfp->lock); + + /* + * Confirm this access fits within the device suppored addr range + */ + status = fn_6254_dn_f_sfp_page_legal(sfp, off, len, num); + if ((status == SFP_EOF) || (status < 0)) { + mutex_unlock(&sfp->lock); + return status; + } + len = status; + + /* + * For each (128 byte) chunk involved in this request, issue a + * separate call to sff_eeprom_update_client(), to + * ensure that each access recalculates the client/page + * and writes the page register as needed. + * Note that chunk to page mapping is confusing, is different for + * QSFP and SFP, and never needs to be done. Don't try! + */ + pending_len = len; /* amount remaining to transfer */ + retval = 0; /* amount transferred */ + for (chunk = off >> 7; chunk <= (off + len - 1) >> 7; chunk++) { + + /* + * Compute the offset and number of bytes to be read/write + * + * 1. start at an offset not equal to 0 (within the chunk) + * and read/write less than the rest of the chunk + * 2. start at an offset not equal to 0 and read/write the rest + * of the chunk + * 3. start at offset 0 (within the chunk) and read/write less + * than entire chunk + * 4. start at offset 0 (within the chunk), and read/write + * the entire chunk + */ + chunk_start_offset = chunk * SFP_PAGE_SIZE; + chunk_end_offset = chunk_start_offset + SFP_PAGE_SIZE; + + if (chunk_start_offset < off) { + chunk_offset = off; + if ((off + pending_len) < chunk_end_offset) + chunk_len = pending_len; + else + chunk_len = chunk_end_offset - off; + } else { + chunk_offset = chunk_start_offset; + if (pending_len < SFP_PAGE_SIZE) + chunk_len = pending_len; + else + chunk_len = SFP_PAGE_SIZE; + } + + /* + * note: chunk_offset is from the start of the EEPROM, + * not the start of the chunk + */ + status = fn_6254_dn_f_sfp_eeprom_update_client(sfp, buf, + chunk_offset, chunk_len, num); + if (status != chunk_len) { + /* This is another 'no device present' path */ + if (status > 0) + retval += status; + if (retval == 0) + retval = status; + break; + } + buf += status; + pending_len -= status; + retval += status; + } + mutex_unlock(&sfp->lock); return retval; } @@ -206,7 +649,9 @@ fn_6254_dn_f_sfp_bin_read(struct file *filp, struct kobject *kobj, { int i; u8 cpldData = 0; - struct fn_6254_dn_f_sfp_data *data; + struct fn_6254_dn_f_sfp_data *sfp; + + sfp = dev_get_drvdata(container_of(kobj, struct device, kobj)); /*SFP 1-12*/ for(i=0; ilock); - data->use_smbus = use_smbus; - /* - * Export the EEPROM bytes through sysfs, since that's convenient. - * By default, only root should see the data (maybe passwords etc) - */ + int i, err; + struct fn_6254_dn_f_sfp_platform_data chip[MAX_PORT_NUM]; + struct fn_6254_dn_f_sfp_data *sfp; + + if (client->addr != SFP_EEPROM_A0_ADDR) { + DBG(printk(KERN_ALERT "%s - probe, bad i2c addr: 0x%x\n", __func__, client->addr)); + err = -EINVAL; + goto exit; + } - data->client = client; - data->driver_data = dev_id->driver_data; + sfp = kzalloc(sizeof(struct fn_6254_dn_f_sfp_data) + + NUM_ADDRESS * sizeof(struct i2c_client *), + GFP_KERNEL); + if (!sfp) { + err = -ENOMEM; + goto exit; + } - sysfs_bin_attr_init(&data->bin); + mutex_init(&sfp->lock); switch(dev_id->driver_data) { case cpld_group_a: + for(i=0; idev.kobj, &fn_6254_dn_f_sfpA_group); if (err) goto err_clients; break; case cpld_group_b: + for(i=0; idev.kobj, &fn_6254_dn_f_sfpB_group); if (err) goto err_clients; break; case cpld_group_c: + for(i=0; idev.kobj, &fn_6254_dn_f_sfpC_group); if (err) goto err_clients; @@ -339,23 +806,56 @@ static int fn_6254_dn_f_sfp_device_probe(struct i2c_client *client, const struct break; } - i2c_set_clientdata(client, data); + unsigned int write_max = 1; + + if (write_max > io_limit) + write_max = io_limit; + + if (write_max > I2C_SMBUS_BLOCK_MAX) + write_max = I2C_SMBUS_BLOCK_MAX; + + chip[i].write_max = write_max; + + for(i=0; ichip[i] = chip[i]; + + sfp->driver_data = dev_id->driver_data; + sfp->client[0] = client; + + /* SFF-8472 spec requires that the second I2C address be 0x51 */ + if (NUM_ADDRESS == 2) { + sfp->client[1] = i2c_new_dummy(client->adapter, SFP_EEPROM_A2_ADDR); + if (!sfp->client[1]) { + printk(KERN_ALERT "%s - address 0x51 unavailable\n", __func__); + err = -EADDRINUSE; + goto err_struct; + } + } + + i2c_set_clientdata(client, sfp); return 0; +err_struct: + if (NUM_ADDRESS == 2) { + if (sfp->client[1]) + i2c_unregister_device(sfp->client[1]); + } err_clients: - kfree(data); + kfree(sfp); +exit: + DBG(printk(KERN_ALERT "%s - probe error %d\n", __func__, err)); return err; } static int fn_6254_dn_f_sfp_device_remove(struct i2c_client *client) { - struct fn_6254_dn_f_sfp_data *data; + struct fn_6254_dn_f_sfp_data *sfp; int i; - data = i2c_get_clientdata(client); + sfp = i2c_get_clientdata(client); - switch(data->driver_data) + switch(sfp->driver_data) { case cpld_group_a: sysfs_remove_group(&client->dev.kobj, &fn_6254_dn_f_sfpA_group); @@ -371,6 +871,10 @@ static int fn_6254_dn_f_sfp_device_remove(struct i2c_client *client) break; } + for (i = 1; i < NUM_ADDRESS; i++) + i2c_unregister_device(sfp->client[i]); + + kfree(sfp); return 0; } @@ -390,7 +894,6 @@ static struct i2c_driver fn_6254_dn_f_sfp_driver = { .probe = fn_6254_dn_f_sfp_device_probe, .remove = fn_6254_dn_f_sfp_device_remove, .id_table = fn_6254_dn_f_sfp_id, - .address_list = normal_i2c, }; static int __init fn_6254_dn_f_sfp_init(void) @@ -414,6 +917,13 @@ static int __init fn_6254_dn_f_sfp_init(void) sprintf(SFP_CPLD_GROUPC_MAPPING[i], "sfp%d_eeprom",i+1+CPLDA_SFP_NUM+CPLDB_SFP_NUM); } + if (!io_limit) { + pr_err("optoe: io_limit must not be 0!\n"); + return -EINVAL; + } + + io_limit = rounddown_pow_of_two(io_limit); + return i2c_add_driver(&fn_6254_dn_f_sfp_driver); } @@ -423,7 +933,7 @@ static void __exit fn_6254_dn_f_sfp_exit(void) } MODULE_AUTHOR("Peter5 Lin "); -MODULE_DESCRIPTION("fn_6254_dn_f_cpld_mux driver"); +MODULE_DESCRIPTION("fn_6254_dn_f_sfp driver"); MODULE_LICENSE("GPL"); module_init(fn_6254_dn_f_sfp_init); From 3953574a9810fa1640969d3255005dd69a775214 Mon Sep 17 00:00:00 2001 From: PeterLin Date: Thu, 22 Aug 2019 09:27:38 +0800 Subject: [PATCH 20/20] 1. get ixgbe driver from sonic storage 2. change sfp eeprom driver of Porsche project from at24 to optoe --- .../debian/rules | 17 +- .../porsche/modules/pegatron_porsche_sfp.c | 768 +++++++++++++++--- 2 files changed, 651 insertions(+), 134 deletions(-) diff --git a/platform/nephos/sonic-platform-modules-pegatron/debian/rules b/platform/nephos/sonic-platform-modules-pegatron/debian/rules index 11f0ca097d7c..059dc84bffeb 100755 --- a/platform/nephos/sonic-platform-modules-pegatron/debian/rules +++ b/platform/nephos/sonic-platform-modules-pegatron/debian/rules @@ -26,7 +26,7 @@ SERVICE_DIR := service SCRIPTS_DIR := scripts CONF_DIR := conf PROCESS_DIR := pegaProcess - +IXGBE_VERSION := 5.2.4 %: dh $@ --with systemd,python2,python3 --buildsystem=pybuild @@ -37,10 +37,17 @@ clean: build: # For fn-6254-dn-f ixgbe driver - git clone https://github.com/Peter5Lin/kernelDriver - git --git-dir=kernelDriver/.git/ --work-tree=kernelDriver am ../fn-6254-dn-f/$(MODULE_DIR)/pegatron_fn_6254_dn_f_ixgbe/0001-modify-Intel-ixgbe-driver-for-fn-6254-dn-f.patch - cp kernelDriver/* $(MOD_SRC_DIR)/fn-6254-dn-f/$(MODULE_DIR)/pegatron_fn_6254_dn_f_ixgbe/ - rm -rf kernelDriver + rm -rf ./ixgbe-$(IXGBE_VERSION) + wget -O ixgbe.tar.gz "https://sonicstorage.blob.core.windows.net/packages/ixgbe-5.2.4.tar.gz?sv=2015-04-05&sr=b&sig=AaqJHHaPiJRp8R3HKobi0GNDgHAVnqijk6hpahwJ0Mg%3D&se=2154-10-05T22%3A19%3A29Z&sp=r" + tar xzf ixgbe.tar.gz + rm ixgbe-$(IXGBE_VERSION)/src/Makefile ixgbe-$(IXGBE_VERSION)/src/common.mk ixgbe-$(IXGBE_VERSION)/src/Module.supported + git init ixgbe-$(IXGBE_VERSION)/src + git --git-dir=./ixgbe-$(IXGBE_VERSION)/src/.git --work-tree=ixgbe-$(IXGBE_VERSION)/src add --all + git --git-dir=./ixgbe-$(IXGBE_VERSION)/src/.git --work-tree=ixgbe-$(IXGBE_VERSION)/src commit -m "unmodified ixgbe source" + cp ./fn-6254-dn-f/modules/pegatron_fn_6254_dn_f_ixgbe/0001-modify-Intel-ixgbe-driver-for-fn-6254-dn-f.patch ./ixgbe-$(IXGBE_VERSION)/src/ + git --git-dir=./ixgbe-$(IXGBE_VERSION)/src/.git --work-tree=ixgbe-$(IXGBE_VERSION)/src am 0001-modify-Intel-ixgbe-driver-for-fn-6254-dn-f.patch + cp ./ixgbe-$(IXGBE_VERSION)/src/* $(MOD_SRC_DIR)/fn-6254-dn-f/$(MODULE_DIR)/pegatron_fn_6254_dn_f_ixgbe/ + (for mod in $(MODULE_DIRS); do \ make modules -C $(KERNEL_SRC)/build M=$(MOD_SRC_DIR)/$${mod}/modules; \ done) diff --git a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c index 9e4b8dbb975d..49a3e162bf0e 100644 --- a/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c +++ b/platform/nephos/sonic-platform-modules-pegatron/porsche/modules/pegatron_porsche_sfp.c @@ -1,23 +1,119 @@ /* - * A SFP driver for the porsche platform + * pegatron_porsche_sfp.c - A driver to read and write the EEPROM on optical transceivers + * (SFP and QSFP) * - * Copyright (C) 2018 Pegatron Corporation. - * Peter5_Lin + * Copyright (C) 2014 Cumulus networks Inc. + * Copyright (C) 2017 Finisar Corp. + * Copyright (C) 2019 Pegatron Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or + * the Freeoftware Foundation; either version 2 of the License, or * (at your option) any later version. + */ + +/* + * Description: + * a) Optical transceiver EEPROM read/write transactions are just like + * the at24 eeproms managed by the at24.c i2c driver + * b) The register/memory layout is up to 256 128 byte pages defined by + * a "pages valid" register and switched via a "page select" + * register as explained in below diagram. + * c) 256 bytes are mapped at a time. 'Lower page 00h' is the first 128 + * bytes of address space, and always references the same + * location, independent of the page select register. + * All mapped pages are mapped into the upper 128 bytes + * (offset 128-255) of the i2c address. + * d) Devices with one I2C address (eg QSFP) use I2C address 0x50 + * (A0h in the spec), and map all pages in the upper 128 bytes + * of that address. + * e) Devices with two I2C addresses (eg SFP) have 256 bytes of data + * at I2C address 0x50, and 256 bytes of data at I2C address + * 0x51 (A2h in the spec). Page selection and paged access + * only apply to this second I2C address (0x51). + * e) The address space is presented, by the driver, as a linear + * address space. For devices with one I2C client at address + * 0x50 (eg QSFP), offset 0-127 are in the lower + * half of address 50/A0h/client[0]. Offset 128-255 are in + * page 0, 256-383 are page 1, etc. More generally, offset + * 'n' resides in page (n/128)-1. ('page -1' is the lower + * half, offset 0-127). + * f) For devices with two I2C clients at address 0x50 and 0x51 (eg SFP), + * the address space places offset 0-127 in the lower + * half of 50/A0/client[0], offset 128-255 in the upper + * half. Offset 256-383 is in the lower half of 51/A2/client[1]. + * Offset 384-511 is in page 0, in the upper half of 51/A2/... + * Offset 512-639 is in page 1, in the upper half of 51/A2/... + * Offset 'n' is in page (n/128)-3 (for n > 383) * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * One I2c addressed (eg QSFP) Memory Map * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ + * 2-Wire Serial Address: 1010000x + * + * Lower Page 00h (128 bytes) + * ===================== + * | | + * | | + * | | + * | | + * | | + * | | + * | | + * | | + * | | + * | | + * |Page Select Byte(127)| + * ===================== + * | + * | + * | + * | + * V + * ------------------------------------------------------------ + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * | | | | + * V V V V + * ------------ -------------- --------------- -------------- + * | | | | | | | | + * | Upper | | Upper | | Upper | | Upper | + * | Page 00h | | Page 01h | | Page 02h | | Page 03h | + * | | | (Optional) | | (Optional) | | (Optional | + * | | | | | | | for Cable | + * | | | | | | | Assemblies) | + * | ID | | AST | | User | | | + * | Fields | | Table | | EEPROM Data | | | + * | | | | | | | | + * | | | | | | | | + * | | | | | | | | + * ------------ -------------- --------------- -------------- + * + * The SFF 8436 (QSFP) spec only defines the 4 pages described above. + * In anticipation of future applications and devices, this driver + * supports access to the full architected range, 256 pages. + * + * The CMIS (Common Management Interface Specification) defines use of + * considerably more pages (at least to page 0xAF), which this driver + * supports. + * + * NOTE: This version of the driver ONLY SUPPORTS BANK 0 PAGES on CMIS + * devices. + * + **/ + +#undef PEGA_DEBUG +/*#define pega_DEBUG*/ +#ifdef PEGA_DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif /* DEBUG */ #include #include @@ -26,28 +122,63 @@ #include #include #include -#include -#include -#include #include -#include #include +#include -#undef PEGA_DEBUG -/*#define PEGA_DEBUG*/ -#ifdef PEGA_DEBUG -#define DBG(x) x -#else -#define DBG(x) -#endif /* DEBUG */ +#define NUM_ADDRESS 2 + +/* The maximum length of a port name */ +#define MAX_PORT_NAME_LEN 20 + +/* fundamental unit of addressing for EEPROM */ +#define SFP_PAGE_SIZE 128 -#define SFP_EEPROM_SIZE 256 +/* + * Single address devices (eg QSFP) have 256 pages, plus the unpaged + * low 128 bytes. If the device does not support paging, it is + * only 2 'pages' long. + */ +#define SFP_ARCH_PAGES 256 +#define ONE_ADDR_EEPROM_SIZE ((1 + SFP_ARCH_PAGES) * SFP_PAGE_SIZE) +#define ONE_ADDR_EEPROM_UNPAGED_SIZE (2 * SFP_PAGE_SIZE) + +/* + * Dual address devices (eg SFP) have 256 pages, plus the unpaged + * low 128 bytes, plus 256 bytes at 0x50. If the device does not + * support paging, it is 4 'pages' long. + */ +#define TWO_ADDR_EEPROM_SIZE ((3 + SFP_ARCH_PAGES) * SFP_PAGE_SIZE) +#define TWO_ADDR_EEPROM_UNPAGED_SIZE (4 * SFP_PAGE_SIZE) +#define TWO_ADDR_NO_0X51_SIZE (2 * SFP_PAGE_SIZE) + +/* + * flags to distinguish one-address (QSFP family) from two-address (SFP family) + * If the family is not known, figure it out when the device is accessed + */ +#define ONE_ADDR 1 +#define TWO_ADDR 2 +#define CMIS_ADDR 3 + +/* a few constants to find our way around the EEPROM */\ #define SFP_EEPROM_A0_ADDR 0x50 #define SFP_EEPROM_A2_ADDR 0x51 -#define SFP_EEPROM_BUS_TYPE I2C_SMBUS_I2C_BLOCK_DATA +#define SFP_PAGE_SELECT_REG 0x7F +#define ONE_ADDR_PAGEABLE_REG 0x02 +#define QSFP_NOT_PAGEABLE (1<<2) +#define CMIS_NOT_PAGEABLE (1<<7) +#define TWO_ADDR_PAGEABLE_REG 0x40 +#define TWO_ADDR_PAGEABLE (1<<4) +#define TWO_ADDR_0X51_REG 92 +#define TWO_ADDR_0X51_SUPP (1<<6) +#define SFP_ID_REG 0 +#define SFP_READ_OP 0 +#define SFP_WRITE_OP 1 +#define SFP_EOF 0 /* used for access beyond end of device */ #define CPLDA_SFP_NUM 24 #define CPLDB_SFP_NUM 12 #define CPLDC_SFP_NUM 18 +#define MAX_PORT_NUM CPLDA_SFP_NUM #define CPLDA_ADDRESS 0x74 #define CPLDB_ADDRESS 0x75 #define CPLDC_ADDRESS 0x76 @@ -61,10 +192,31 @@ enum cpld_croups { cpld_group_a, cpld_group_b, cpld_group_c}; -static const unsigned short normal_i2c[] = { SFP_EEPROM_A0_ADDR, SFP_EEPROM_A2_ADDR, I2C_CLIENT_END }; -static char SFP_CPLD_GROUPA_MAPPING[CPLDA_SFP_NUM][16]={0}; -static char SFP_CPLD_GROUPB_MAPPING[CPLDB_SFP_NUM][16]={0}; -static char SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; +struct porsche_sfp_platform_data { + u32 byte_len; /* size (sum of all addr) */ + u16 page_size; /* for writes */ + u8 flags; + void *dummy1; /* backward compatibility */ + void *dummy2; /* backward compatibility */ + + /* dev_class: ONE_ADDR (QSFP) or TWO_ADDR (SFP) */ + int dev_class; + unsigned int write_max; +}; + +struct porsche_sfp_data { + struct porsche_sfp_platform_data chip[MAX_PORT_NUM]; + + /* + * Lock protects against activities from other Linux tasks, + * but not from changes by other I2C masters. + */ + struct mutex lock; + struct bin_attribute bin; + kernel_ulong_t driver_data; + + struct i2c_client *client[]; +}; /* * This parameter is to help this driver avoid blocking other drivers out @@ -75,48 +227,91 @@ static char SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; * * This value is forced to be a power of two so that writes align on pages. */ -static unsigned io_limit = 128; -module_param(io_limit, uint, 0); -MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)"); +static unsigned int io_limit = SFP_PAGE_SIZE; /* - * Specs often allow 5 msec for a page write, sometimes 20 msec; + * specs often allow 5 msec for a page write, sometimes 20 msec; * it's important to recover from write timeouts. */ -static unsigned write_timeout = 25; -module_param(write_timeout, uint, 0); -MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)"); - - -struct porsche_sfp_data { - struct mutex lock; - struct bin_attribute bin; - int use_smbus; - kernel_ulong_t driver_data; +static unsigned int write_timeout = 25; - struct i2c_client *client; -}; +static char SFP_CPLD_GROUPA_MAPPING[CPLDA_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPB_MAPPING[CPLDB_SFP_NUM][16]={0}; +static char SFP_CPLD_GROUPC_MAPPING[CPLDC_SFP_NUM][16]={0}; extern int pegatron_porsche_cpld_read(unsigned short cpld_addr, u8 reg); extern int pegatron_porsche_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -static ssize_t porsche_sfp_eeprom_read(struct porsche_sfp_data *data, char *buf, - unsigned offset, size_t count) +/*-------------------------------------------------------------------------*/ +/* + * This routine computes the addressing information to be used for + * a given r/w request. + * + * Task is to calculate the client (0 = i2c addr 50, 1 = i2c addr 51), + * the page, and the offset. + * + * Handles both single address (eg QSFP) and two address (eg SFP). + * For SFP, offset 0-255 are on client[0], >255 is on client[1] + * Offset 256-383 are on the lower half of client[1] + * Pages are accessible on the upper half of client[1]. + * Offset >383 are in 128 byte pages mapped into the upper half + * + * For QSFP, all offsets are on client[0] + * offset 0-127 are on the lower half of client[0] (no paging) + * Pages are accessible on the upper half of client[1]. + * Offset >127 are in 128 byte pages mapped into the upper half + * + * Callers must not read/write beyond the end of a client or a page + * without recomputing the client/page. Hence offset (within page) + * plus length must be less than or equal to 128. (Note that this + * routine does not have access to the length of the call, hence + * cannot do the validity check.) + * + * Offset within Lower Page 00h and Upper Page 00h are not recomputed + */ + +static uint8_t porsche_sfp_translate_offset(struct porsche_sfp_data *sfp, + loff_t *offset, struct i2c_client **client, int num) { - struct i2c_msg msg[2]; - u8 msgbuf[2]; - struct i2c_client *client = data->client; - unsigned long timeout, read_time; - int status; + unsigned int page = 0; + + *client = sfp->client[0]; - memset(msg, 0, sizeof(msg)); + /* if SFP style, offset > 255, shift to i2c addr 0x51 */ + if (sfp->chip[num].dev_class == TWO_ADDR) { + if (*offset > 255) { + /* like QSFP, but shifted to client[1] */ + *client = sfp->client[1]; + *offset -= 256; + } + } + + /* + * if offset is in the range 0-128... + * page doesn't matter (using lower half), return 0. + * offset is already correct (don't add 128 to get to paged area) + */ + if (*offset < SFP_PAGE_SIZE) + return page; + + /* note, page will always be positive since *offset >= 128 */ + page = (*offset >> 7)-1; + /* 0x80 places the offset in the top half, offset is last 7 bits */ + *offset = SFP_PAGE_SIZE + (*offset & 0x7f); - if (count > io_limit) - count = io_limit; + return page; /* note also returning cliporsche_sfp_translate_offsetent and offset */ +} + +static ssize_t porsche_sfp_eeprom_read(struct porsche_sfp_data *sfp, + struct i2c_client *client, + char *buf, unsigned int offset, size_t count) +{ + unsigned long timeout, read_time; + int status, i; - /* Smaller eeproms can work given some SMBus extension calls */ - if (count > I2C_SMBUS_BLOCK_MAX) - count = I2C_SMBUS_BLOCK_MAX; + /*smaller eeproms can work given some SMBus extension calls */ + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; /* * Reads fail if the previous write didn't complete yet. We may @@ -126,75 +321,323 @@ static ssize_t porsche_sfp_eeprom_read(struct porsche_sfp_data *data, char *buf, timeout = jiffies + msecs_to_jiffies(write_timeout); do { read_time = jiffies; - switch (data->use_smbus) { - case I2C_SMBUS_I2C_BLOCK_DATA: - status = i2c_smbus_read_i2c_block_data(client, offset, - count, buf); - break; - case I2C_SMBUS_WORD_DATA: - status = i2c_smbus_read_word_data(client, offset); - if (status >= 0) { - buf[0] = status & 0xff; - if (count == 2) - buf[1] = status >> 8; - status = count; - } - break; - case I2C_SMBUS_BYTE_DATA: - status = i2c_smbus_read_byte_data(client, offset); - if (status >= 0) { - buf[0] = status; - status = count; - } - break; - default: - status = i2c_transfer(client->adapter, msg, 2); - if (status == 2) - status = count; - } - dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", + + status = i2c_smbus_read_i2c_block_data(client, offset, count, buf); + + dev_dbg(&client->dev, "eeprom read %zu@%d --> %d (%ld)\n", count, offset, status, jiffies); - if (status == count) + if (status == count) /* happy path */ return count; + if (status == -ENXIO) /* no module present */ + return status; + /* REVISIT: at HZ=100, this is sloooow */ - msleep(1); + usleep_range(1000, 2000); } while (time_before(read_time, timeout)); return -ETIMEDOUT; } -static ssize_t porsche_sfp_read(struct porsche_sfp_data *data, - char *buf, loff_t off, size_t count) +static ssize_t porsche_sfp_eeprom_write(struct porsche_sfp_data *sfp, + struct i2c_client *client, + const char *buf, + unsigned int offset, size_t count) { - ssize_t retval = 0; + ssize_t status; + unsigned long timeout, write_time; + unsigned int next_page_start; + int i = 0; + + /* write max is at most a page + * (In this driver, write_max is actually one byte!) + */ + if (count > sfp->chip[i].write_max) + count = sfp->chip[i].write_max; + + /* shorten count if necessary to avoid crossing page boundary */ + next_page_start = roundup(offset + 1, SFP_PAGE_SIZE); + if (offset + count > next_page_start) + count = next_page_start - offset; - if (unlikely(!count)) - return count; + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; /* - * Read data from chip, protecting against concurrent updates - * from this host, but not from other I2C masters. + * Reads fail if the previous write didn't complete yet. We may + * loop a few times until this one succeeds, waiting at least + * long enough for one entire page write to work. */ - mutex_lock(&data->lock); + timeout = jiffies + msecs_to_jiffies(write_timeout); + do { + write_time = jiffies; + + status = i2c_smbus_write_i2c_block_data(client, + offset, count, buf); + if (status == 0) + return count; + + /* REVISIT: at HZ=100, this is sloooow */ + usleep_range(1000, 2000); + } while (time_before(write_time, timeout)); + + return -ETIMEDOUT; +} + +static ssize_t porsche_sfp_eeprom_update_client(struct porsche_sfp_data *sfp, + char *buf, loff_t off, size_t count, int num) +{ + struct i2c_client *client; + ssize_t retval = 0; + uint8_t page = 0; + loff_t phy_offset = off; + int ret = 0; + + page = porsche_sfp_translate_offset(sfp, &phy_offset, &client, num); + + dev_dbg(&client->dev, + "%s off %lld page:%d phy_offset:%lld, count:%ld\n", + __func__, off, page, phy_offset, (long int) count); + if (page > 0) { + ret = porsche_sfp_eeprom_write(sfp, client, &page, + SFP_PAGE_SELECT_REG, 1); + if (ret < 0) { + dev_dbg(&client->dev, + "Write page register for page %d failed ret:%d!\n", + page, ret); + return ret; + } + } while (count) { ssize_t status; - status = porsche_sfp_eeprom_read(data, buf, off, count); + status = porsche_sfp_eeprom_read(sfp, client, + buf, phy_offset, count); + if (status <= 0) { if (retval == 0) retval = status; break; } buf += status; - off += status; + phy_offset += status; count -= status; retval += status; } - mutex_unlock(&data->lock); + + if (page > 0) { + /* return the page register to page 0 (why?) */ + page = 0; + ret = porsche_sfp_eeprom_write(sfp, client, &page, + SFP_PAGE_SELECT_REG, 1); + if (ret < 0) { + dev_err(&client->dev, + "Restore page register to 0 failed:%d!\n", ret); + /* error only if nothing has been transferred */ + if (retval == 0) + retval = ret; + } + } + return retval; +} + +/* + * Figure out if this access is within the range of supported pages. + * Note this is called on every access because we don't know if the + * module has been replaced since the last call. + * If/when modules support more pages, this is the routine to update + * to validate and allow access to additional pages. + * + * Returns updated len for this access: + * - entire access is legal, original len is returned. + * - access begins legal but is too long, len is truncated to fit. + * - initial offset exceeds supported pages, return OPTOE_EOF (zero) + */ +static ssize_t porsche_sfp_page_legal(struct porsche_sfp_data *sfp, + loff_t off, size_t len, int num) +{ + struct i2c_client *client = sfp->client[0]; + u8 regval; + int not_pageable; + int status; + size_t maxlen; + + if (off < 0) + return -EINVAL; + + if (sfp->chip[num].dev_class == TWO_ADDR) { + /* SFP case */ + /* if only using addr 0x50 (first 256 bytes) we're good */ + if ((off + len) <= TWO_ADDR_NO_0X51_SIZE) + return len; + /* if offset exceeds possible pages, we're not good */ + if (off >= TWO_ADDR_EEPROM_SIZE) + return SFP_EOF; + /* in between, are pages supported? */ + status = porsche_sfp_eeprom_read(sfp, client, ®val, + TWO_ADDR_PAGEABLE_REG, 1); + if (status < 0) + return status; /* error out (no module?) */ + if (regval & TWO_ADDR_PAGEABLE) { + /* Pages supported, trim len to the end of pages */ + maxlen = TWO_ADDR_EEPROM_SIZE - off; + } else { + /* pages not supported, trim len to unpaged size */ + if (off >= TWO_ADDR_EEPROM_UNPAGED_SIZE) + return SFP_EOF; + + /* will be accessing addr 0x51, is that supported? */ + /* byte 92, bit 6 implies DDM support, 0x51 support */ + status = porsche_sfp_eeprom_read(sfp, client, ®val, + TWO_ADDR_0X51_REG, 1); + if (status < 0) + return status; + if (regval & TWO_ADDR_0X51_SUPP) { + /* addr 0x51 is OK */ + maxlen = TWO_ADDR_EEPROM_UNPAGED_SIZE - off; + } else { + /* addr 0x51 NOT supported, trim to 256 max */ + if (off >= TWO_ADDR_NO_0X51_SIZE) + return SFP_EOF; + maxlen = TWO_ADDR_NO_0X51_SIZE - off; + } + } + len = (len > maxlen) ? maxlen : len; + dev_dbg(&client->dev, + "page_legal, SFP, off %lld len %ld\n", + off, (long int) len); + } else { + /* QSFP case, CMIS case */ + /* if no pages needed, we're good */ + if ((off + len) <= ONE_ADDR_EEPROM_UNPAGED_SIZE) + return len; + /* if offset exceeds possible pages, we're not good */ + if (off >= ONE_ADDR_EEPROM_SIZE) + return SFP_EOF; + /* in between, are pages supported? */ + status = porsche_sfp_eeprom_read(sfp, client, ®val, + ONE_ADDR_PAGEABLE_REG, 1); + if (status < 0) + return status; /* error out (no module?) */ + + if (sfp->chip[num].dev_class == ONE_ADDR) { + not_pageable = QSFP_NOT_PAGEABLE; + } else { + not_pageable = CMIS_NOT_PAGEABLE; + } + dev_dbg(&client->dev, + "Paging Register: 0x%x; not_pageable mask: 0x%x\n", + regval, not_pageable); + + if (regval & not_pageable) { + /* pages not supported, trim len to unpaged size */ + if (off >= ONE_ADDR_EEPROM_UNPAGED_SIZE) + return SFP_EOF; + maxlen = ONE_ADDR_EEPROM_UNPAGED_SIZE - off; + } else { + /* Pages supported, trim len to the end of pages */ + maxlen = ONE_ADDR_EEPROM_SIZE - off; + } + len = (len > maxlen) ? maxlen : len; + dev_dbg(&client->dev, + "page_legal, QSFP, off %lld len %ld\n", + off, (long int) len); + } + return len; +} + +static ssize_t porsche_sfp_read(struct porsche_sfp_data *sfp, + char *buf, loff_t off, size_t len, int num) +{ + int chunk; + int status = 0; + ssize_t retval; + size_t pending_len = 0, chunk_len = 0; + loff_t chunk_offset = 0, chunk_start_offset = 0; + loff_t chunk_end_offset = 0; + + if (unlikely(!len)) + return len; + + /* + * Read data from chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&sfp->lock); + + /* + * Confirm this access fits within the device suppored addr range + */ + status = porsche_sfp_page_legal(sfp, off, len, num); + if ((status == SFP_EOF) || (status < 0)) { + mutex_unlock(&sfp->lock); + return status; + } + len = status; + + /* + * For each (128 byte) chunk involved in this request, issue a + * separate call to sff_eeprom_update_client(), to + * ensure that each access recalculates the client/page + * and writes the page register as needed. + * Note that chunk to page mapping is confusing, is different for + * QSFP and SFP, and never needs to be done. Don't try! + */ + pending_len = len; /* amount remaining to transfer */ + retval = 0; /* amount transferred */ + for (chunk = off >> 7; chunk <= (off + len - 1) >> 7; chunk++) { + + /* + * Compute the offset and number of bytes to be read/write + * + * 1. start at an offset not equal to 0 (within the chunk) + * and read/write less than the rest of the chunk + * 2. start at an offset not equal to 0 and read/write the rest + * of the chunk + * 3. start at offset 0 (within the chunk) and read/write less + * than entire chunk + * 4. start at offset 0 (within the chunk), and read/write + * the entire chunk + */ + chunk_start_offset = chunk * SFP_PAGE_SIZE; + chunk_end_offset = chunk_start_offset + SFP_PAGE_SIZE; + + if (chunk_start_offset < off) { + chunk_offset = off; + if ((off + pending_len) < chunk_end_offset) + chunk_len = pending_len; + else + chunk_len = chunk_end_offset - off; + } else { + chunk_offset = chunk_start_offset; + if (pending_len < SFP_PAGE_SIZE) + chunk_len = pending_len; + else + chunk_len = SFP_PAGE_SIZE; + } + + /* + * note: chunk_offset is from the start of the EEPROM, + * not the start of the chunk + */ + status = porsche_sfp_eeprom_update_client(sfp, buf, + chunk_offset, chunk_len, num); + if (status != chunk_len) { + /* This is another 'no device present' path */ + if (status > 0) + retval += status; + if (retval == 0) + retval = status; + break; + } + buf += status; + pending_len -= status; + retval += status; + } + mutex_unlock(&sfp->lock); return retval; } @@ -206,7 +649,9 @@ porsche_sfp_bin_read(struct file *filp, struct kobject *kobj, { int i; u8 cpldData = 0; - struct porsche_sfp_data *data; + struct porsche_sfp_data *sfp; + + sfp = dev_get_drvdata(container_of(kobj, struct device, kobj)); /*SFP 1-12*/ for(i=0; ilock); - data->use_smbus = use_smbus; - /* - * Export the EEPROM bytes through sysfs, since that's convenient. - * By default, only root should see the data (maybe passwords etc) - */ + int i, err; + struct porsche_sfp_platform_data chip[MAX_PORT_NUM]; + struct porsche_sfp_data *sfp; + + if (client->addr != SFP_EEPROM_A0_ADDR) { + DBG(printk(KERN_ALERT "%s - probe, bad i2c addr: 0x%x\n", __func__, client->addr)); + err = -EINVAL; + goto exit; + } - data->client = client; - data->driver_data = dev_id->driver_data; + sfp = kzalloc(sizeof(struct porsche_sfp_data) + + NUM_ADDRESS * sizeof(struct i2c_client *), + GFP_KERNEL); + if (!sfp) { + err = -ENOMEM; + goto exit; + } - sysfs_bin_attr_init(&data->bin); + mutex_init(&sfp->lock); switch(dev_id->driver_data) { case cpld_group_a: + for(i=0; idev.kobj, &porsche_sfpA_group); if (err) goto err_clients; break; case cpld_group_b: + for(i=0; idev.kobj, &porsche_sfpB_group); if (err) goto err_clients; break; case cpld_group_c: + for(i=0; idev.kobj, &porsche_sfpC_group); if (err) goto err_clients; @@ -339,23 +806,56 @@ static int porsche_sfp_device_probe(struct i2c_client *client, const struct i2c_ break; } - i2c_set_clientdata(client, data); + unsigned int write_max = 1; + + if (write_max > io_limit) + write_max = io_limit; + + if (write_max > I2C_SMBUS_BLOCK_MAX) + write_max = I2C_SMBUS_BLOCK_MAX; + + chip[i].write_max = write_max; + + for(i=0; ichip[i] = chip[i]; + + sfp->driver_data = dev_id->driver_data; + sfp->client[0] = client; + + /* SFF-8472 spec requires that the second I2C address be 0x51 */ + if (NUM_ADDRESS == 2) { + sfp->client[1] = i2c_new_dummy(client->adapter, SFP_EEPROM_A2_ADDR); + if (!sfp->client[1]) { + printk(KERN_ALERT "%s - address 0x51 unavailable\n", __func__); + err = -EADDRINUSE; + goto err_struct; + } + } + + i2c_set_clientdata(client, sfp); return 0; +err_struct: + if (NUM_ADDRESS == 2) { + if (sfp->client[1]) + i2c_unregister_device(sfp->client[1]); + } err_clients: - kfree(data); + kfree(sfp); +exit: + DBG(printk(KERN_ALERT "%s - probe error %d\n", __func__, err)); return err; } static int porsche_sfp_device_remove(struct i2c_client *client) { - struct porsche_sfp_data *data; + struct porsche_sfp_data *sfp; int i; - data = i2c_get_clientdata(client); + sfp = i2c_get_clientdata(client); - switch(data->driver_data) + switch(sfp->driver_data) { case cpld_group_a: sysfs_remove_group(&client->dev.kobj, &porsche_sfpA_group); @@ -371,6 +871,10 @@ static int porsche_sfp_device_remove(struct i2c_client *client) break; } + for (i = 1; i < NUM_ADDRESS; i++) + i2c_unregister_device(sfp->client[i]); + + kfree(sfp); return 0; } @@ -390,7 +894,6 @@ static struct i2c_driver porsche_sfp_driver = { .probe = porsche_sfp_device_probe, .remove = porsche_sfp_device_remove, .id_table = porsche_sfp_id, - .address_list = normal_i2c, }; static int __init porsche_sfp_init(void) @@ -414,6 +917,13 @@ static int __init porsche_sfp_init(void) sprintf(SFP_CPLD_GROUPC_MAPPING[i], "sfp%d_eeprom",i+1+CPLDA_SFP_NUM+CPLDB_SFP_NUM); } + if (!io_limit) { + pr_err("optoe: io_limit must not be 0!\n"); + return -EINVAL; + } + + io_limit = rounddown_pow_of_two(io_limit); + return i2c_add_driver(&porsche_sfp_driver); } @@ -423,7 +933,7 @@ static void __exit porsche_sfp_exit(void) } MODULE_AUTHOR("Peter5 Lin "); -MODULE_DESCRIPTION("porsche_cpld_mux driver"); +MODULE_DESCRIPTION("porsche_sfp driver"); MODULE_LICENSE("GPL"); module_init(porsche_sfp_init);