Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
This commit is contained in:
@ -656,25 +656,30 @@ static void efx_stop_datapath(struct efx_nic *efx)
|
||||
struct efx_channel *channel;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
struct efx_rx_queue *rx_queue;
|
||||
struct pci_dev *dev = efx->pci_dev;
|
||||
int rc;
|
||||
|
||||
EFX_ASSERT_RESET_SERIALISED(efx);
|
||||
BUG_ON(efx->port_enabled);
|
||||
|
||||
rc = efx_nic_flush_queues(efx);
|
||||
if (rc && EFX_WORKAROUND_7803(efx)) {
|
||||
/* Schedule a reset to recover from the flush failure. The
|
||||
* descriptor caches reference memory we're about to free,
|
||||
* but falcon_reconfigure_mac_wrapper() won't reconnect
|
||||
* the MACs because of the pending reset. */
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"Resetting to recover from flush failure\n");
|
||||
efx_schedule_reset(efx, RESET_TYPE_ALL);
|
||||
} else if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
|
||||
} else {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"successfully flushed all queues\n");
|
||||
/* Only perform flush if dma is enabled */
|
||||
if (dev->is_busmaster) {
|
||||
rc = efx_nic_flush_queues(efx);
|
||||
|
||||
if (rc && EFX_WORKAROUND_7803(efx)) {
|
||||
/* Schedule a reset to recover from the flush failure. The
|
||||
* descriptor caches reference memory we're about to free,
|
||||
* but falcon_reconfigure_mac_wrapper() won't reconnect
|
||||
* the MACs because of the pending reset. */
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"Resetting to recover from flush failure\n");
|
||||
efx_schedule_reset(efx, RESET_TYPE_ALL);
|
||||
} else if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
|
||||
} else {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"successfully flushed all queues\n");
|
||||
}
|
||||
}
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
@ -2492,8 +2497,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
|
||||
efx_fini_io(efx);
|
||||
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
|
||||
|
||||
pci_set_drvdata(pci_dev, NULL);
|
||||
efx_fini_struct(efx);
|
||||
pci_set_drvdata(pci_dev, NULL);
|
||||
free_netdev(efx->net_dev);
|
||||
};
|
||||
|
||||
@ -2695,6 +2700,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
|
||||
fail2:
|
||||
efx_fini_struct(efx);
|
||||
fail1:
|
||||
pci_set_drvdata(pci_dev, NULL);
|
||||
WARN_ON(rc > 0);
|
||||
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
|
||||
free_netdev(net_dev);
|
||||
|
@ -1108,6 +1108,39 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
|
||||
struct ethtool_eeprom *ee,
|
||||
u8 *data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
int ret;
|
||||
|
||||
if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
ret = efx->phy_op->get_module_eeprom(efx, ee, data);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int efx_ethtool_get_module_info(struct net_device *net_dev,
|
||||
struct ethtool_modinfo *modinfo)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
int ret;
|
||||
|
||||
if (!efx->phy_op || !efx->phy_op->get_module_info)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
ret = efx->phy_op->get_module_info(efx, modinfo);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct ethtool_ops efx_ethtool_ops = {
|
||||
.get_settings = efx_ethtool_get_settings,
|
||||
.set_settings = efx_ethtool_set_settings,
|
||||
@ -1137,4 +1170,6 @@ const struct ethtool_ops efx_ethtool_ops = {
|
||||
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
|
||||
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
|
||||
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
|
||||
.get_module_info = efx_ethtool_get_module_info,
|
||||
.get_module_eeprom = efx_ethtool_get_module_eeprom,
|
||||
};
|
||||
|
@ -739,6 +739,80 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define SFP_PAGE_SIZE 128
|
||||
#define SFP_NUM_PAGES 2
|
||||
static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
|
||||
struct ethtool_eeprom *ee, u8 *data)
|
||||
{
|
||||
u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX];
|
||||
u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN];
|
||||
size_t outlen;
|
||||
int rc;
|
||||
unsigned int payload_len;
|
||||
unsigned int space_remaining = ee->len;
|
||||
unsigned int page;
|
||||
unsigned int page_off;
|
||||
unsigned int to_copy;
|
||||
u8 *user_data = data;
|
||||
|
||||
BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN);
|
||||
|
||||
page_off = ee->offset % SFP_PAGE_SIZE;
|
||||
page = ee->offset / SFP_PAGE_SIZE;
|
||||
|
||||
while (space_remaining && (page < SFP_NUM_PAGES)) {
|
||||
MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO,
|
||||
inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf),
|
||||
&outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
|
||||
SFP_PAGE_SIZE))
|
||||
return -EIO;
|
||||
|
||||
payload_len = MCDI_DWORD(outbuf,
|
||||
GET_PHY_MEDIA_INFO_OUT_DATALEN);
|
||||
if (payload_len != SFP_PAGE_SIZE)
|
||||
return -EIO;
|
||||
|
||||
/* Copy as much as we can into data */
|
||||
payload_len -= page_off;
|
||||
to_copy = (space_remaining < payload_len) ?
|
||||
space_remaining : payload_len;
|
||||
|
||||
memcpy(user_data,
|
||||
outbuf + page_off +
|
||||
MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
|
||||
to_copy);
|
||||
|
||||
space_remaining -= to_copy;
|
||||
user_data += to_copy;
|
||||
page_off = 0;
|
||||
page++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
|
||||
struct ethtool_modinfo *modinfo)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
|
||||
|
||||
switch (phy_cfg->media) {
|
||||
case MC_CMD_MEDIA_SFP_PLUS:
|
||||
modinfo->type = ETH_MODULE_SFF_8079;
|
||||
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
const struct efx_phy_operations efx_mcdi_phy_ops = {
|
||||
.probe = efx_mcdi_phy_probe,
|
||||
.init = efx_port_dummy_op_int,
|
||||
@ -751,4 +825,6 @@ const struct efx_phy_operations efx_mcdi_phy_ops = {
|
||||
.test_alive = efx_mcdi_phy_test_alive,
|
||||
.run_tests = efx_mcdi_phy_run_tests,
|
||||
.test_name = efx_mcdi_phy_test_name,
|
||||
.get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
|
||||
.get_module_info = efx_mcdi_phy_get_module_info,
|
||||
};
|
||||
|
@ -252,8 +252,6 @@ struct efx_rx_page_state {
|
||||
* @max_fill: RX descriptor maximum fill level (<= ring size)
|
||||
* @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
|
||||
* (<= @max_fill)
|
||||
* @fast_fill_limit: The level to which a fast fill will fill
|
||||
* (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
|
||||
* @min_fill: RX descriptor minimum non-zero fill level.
|
||||
* This records the minimum fill level observed when a ring
|
||||
* refill was triggered.
|
||||
@ -274,7 +272,6 @@ struct efx_rx_queue {
|
||||
int removed_count;
|
||||
unsigned int max_fill;
|
||||
unsigned int fast_fill_trigger;
|
||||
unsigned int fast_fill_limit;
|
||||
unsigned int min_fill;
|
||||
unsigned int min_overfill;
|
||||
unsigned int alloc_page_count;
|
||||
@ -522,6 +519,11 @@ struct efx_phy_operations {
|
||||
int (*test_alive) (struct efx_nic *efx);
|
||||
const char *(*test_name) (struct efx_nic *efx, unsigned int index);
|
||||
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
|
||||
int (*get_module_eeprom) (struct efx_nic *efx,
|
||||
struct ethtool_eeprom *ee,
|
||||
u8 *data);
|
||||
int (*get_module_info) (struct efx_nic *efx,
|
||||
struct ethtool_modinfo *modinfo);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -449,6 +449,37 @@ static void qt202x_phy_remove(struct efx_nic *efx)
|
||||
efx->phy_data = NULL;
|
||||
}
|
||||
|
||||
static int qt202x_phy_get_module_info(struct efx_nic *efx,
|
||||
struct ethtool_modinfo *modinfo)
|
||||
{
|
||||
modinfo->type = ETH_MODULE_SFF_8079;
|
||||
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
|
||||
struct ethtool_eeprom *ee, u8 *data)
|
||||
{
|
||||
int mmd, reg_base, rc, i;
|
||||
|
||||
if (efx->phy_type == PHY_TYPE_QT2025C) {
|
||||
mmd = MDIO_MMD_PCS;
|
||||
reg_base = 0xd000;
|
||||
} else {
|
||||
mmd = MDIO_MMD_PMAPMD;
|
||||
reg_base = 0x8007;
|
||||
}
|
||||
|
||||
for (i = 0; i < ee->len; i++) {
|
||||
rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
data[i] = rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct efx_phy_operations falcon_qt202x_phy_ops = {
|
||||
.probe = qt202x_phy_probe,
|
||||
.init = qt202x_phy_init,
|
||||
@ -459,4 +490,6 @@ const struct efx_phy_operations falcon_qt202x_phy_ops = {
|
||||
.get_settings = qt202x_phy_get_settings,
|
||||
.set_settings = efx_mdio_set_settings,
|
||||
.test_alive = efx_mdio_test_alive,
|
||||
.get_module_eeprom = qt202x_phy_get_module_eeprom,
|
||||
.get_module_info = qt202x_phy_get_module_info,
|
||||
};
|
||||
|
@ -76,12 +76,7 @@ static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
|
||||
/* This is the percentage fill level below which new RX descriptors
|
||||
* will be added to the RX descriptor ring.
|
||||
*/
|
||||
static unsigned int rx_refill_threshold = 90;
|
||||
|
||||
/* This is the percentage fill level to which an RX queue will be refilled
|
||||
* when the "RX refill threshold" is reached.
|
||||
*/
|
||||
static unsigned int rx_refill_limit = 95;
|
||||
static unsigned int rx_refill_threshold;
|
||||
|
||||
/*
|
||||
* RX maximum head room required.
|
||||
@ -342,7 +337,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
|
||||
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
|
||||
* @rx_queue: RX descriptor queue
|
||||
* This will aim to fill the RX descriptor queue up to
|
||||
* @rx_queue->@fast_fill_limit. If there is insufficient atomic
|
||||
* @rx_queue->@max_fill. If there is insufficient atomic
|
||||
* memory to do so, a slow fill will be scheduled.
|
||||
*
|
||||
* The caller must provide serialisation (none is used here). In practise,
|
||||
@ -367,15 +362,14 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
|
||||
rx_queue->min_fill = fill_level;
|
||||
}
|
||||
|
||||
space = rx_queue->fast_fill_limit - fill_level;
|
||||
if (space < EFX_RX_BATCH)
|
||||
goto out;
|
||||
space = rx_queue->max_fill - fill_level;
|
||||
EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
|
||||
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filling descriptor ring from"
|
||||
" level %d to level %d using %s allocation\n",
|
||||
efx_rx_queue_index(rx_queue), fill_level,
|
||||
rx_queue->fast_fill_limit,
|
||||
rx_queue->max_fill,
|
||||
channel->rx_alloc_push_pages ? "page" : "skb");
|
||||
|
||||
do {
|
||||
@ -681,7 +675,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int max_fill, trigger, limit;
|
||||
unsigned int max_fill, trigger, max_trigger;
|
||||
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
@ -694,12 +688,17 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
|
||||
/* Initialise limit fields */
|
||||
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
|
||||
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
|
||||
limit = max_fill * min(rx_refill_limit, 100U) / 100U;
|
||||
max_trigger = max_fill - EFX_RX_BATCH;
|
||||
if (rx_refill_threshold != 0) {
|
||||
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
|
||||
if (trigger > max_trigger)
|
||||
trigger = max_trigger;
|
||||
} else {
|
||||
trigger = max_trigger;
|
||||
}
|
||||
|
||||
rx_queue->max_fill = max_fill;
|
||||
rx_queue->fast_fill_trigger = trigger;
|
||||
rx_queue->fast_fill_limit = limit;
|
||||
|
||||
/* Set up RX descriptor ring */
|
||||
rx_queue->enabled = true;
|
||||
@ -746,5 +745,5 @@ MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
|
||||
|
||||
module_param(rx_refill_threshold, uint, 0444);
|
||||
MODULE_PARM_DESC(rx_refill_threshold,
|
||||
"RX descriptor ring fast/slow fill threshold (%)");
|
||||
"RX descriptor ring refill threshold (%)");
|
||||
|
||||
|
@ -136,6 +136,23 @@ struct ethtool_eeprom {
|
||||
__u8 data[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ethtool_modinfo - plugin module eeprom information
|
||||
* @cmd: %ETHTOOL_GMODULEINFO
|
||||
* @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
|
||||
* @eeprom_len: Length of the eeprom
|
||||
*
|
||||
* This structure is used to return the information to
|
||||
* properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
|
||||
* The type code indicates the eeprom data format
|
||||
*/
|
||||
struct ethtool_modinfo {
|
||||
__u32 cmd;
|
||||
__u32 type;
|
||||
__u32 eeprom_len;
|
||||
__u32 reserved[8];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ethtool_coalesce - coalescing parameters for IRQs and stats updates
|
||||
* @cmd: ETHTOOL_{G,S}COALESCE
|
||||
@ -920,6 +937,9 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
|
||||
* @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
|
||||
* Drivers supporting transmit time stamps in software should set this to
|
||||
* ethtool_op_get_ts_info().
|
||||
* @get_module_info: Get the size and type of the eeprom contained within
|
||||
* a plug-in module.
|
||||
* @get_module_eeprom: Get the eeprom information from the plug-in module
|
||||
*
|
||||
* All operations are optional (i.e. the function pointer may be set
|
||||
* to %NULL) and callers must take this into account. Callers must
|
||||
@ -982,6 +1002,11 @@ struct ethtool_ops {
|
||||
struct ethtool_dump *, void *);
|
||||
int (*set_dump)(struct net_device *, struct ethtool_dump *);
|
||||
int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
|
||||
int (*get_module_info)(struct net_device *,
|
||||
struct ethtool_modinfo *);
|
||||
int (*get_module_eeprom)(struct net_device *,
|
||||
struct ethtool_eeprom *, u8 *);
|
||||
|
||||
|
||||
};
|
||||
#endif /* __KERNEL__ */
|
||||
@ -1057,6 +1082,8 @@ struct ethtool_ops {
|
||||
#define ETHTOOL_GET_DUMP_FLAG 0x0000003f /* Get dump settings */
|
||||
#define ETHTOOL_GET_DUMP_DATA 0x00000040 /* Get dump data */
|
||||
#define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */
|
||||
#define ETHTOOL_GMODULEINFO 0x00000042 /* Get plug-in module information */
|
||||
#define ETHTOOL_GMODULEEEPROM 0x00000043 /* Get plug-in module eeprom */
|
||||
|
||||
/* compatibility with older code */
|
||||
#define SPARC_ETH_GSET ETHTOOL_GSET
|
||||
@ -1206,6 +1233,12 @@ struct ethtool_ops {
|
||||
#define RX_CLS_LOC_FIRST 0xfffffffe
|
||||
#define RX_CLS_LOC_LAST 0xfffffffd
|
||||
|
||||
/* EEPROM Standards for plug in modules */
|
||||
#define ETH_MODULE_SFF_8079 0x1
|
||||
#define ETH_MODULE_SFF_8079_LEN 256
|
||||
#define ETH_MODULE_SFF_8472 0x2
|
||||
#define ETH_MODULE_SFF_8472_LEN 512
|
||||
|
||||
/* Reset flags */
|
||||
/* The reset() operation must clear the flags for the components which
|
||||
* were actually reset. On successful return, the flags indicate the
|
||||
|
@ -751,18 +751,17 @@ static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
|
||||
static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
|
||||
int (*getter)(struct net_device *,
|
||||
struct ethtool_eeprom *, u8 *),
|
||||
u32 total_len)
|
||||
{
|
||||
struct ethtool_eeprom eeprom;
|
||||
const struct ethtool_ops *ops = dev->ethtool_ops;
|
||||
void __user *userbuf = useraddr + sizeof(eeprom);
|
||||
u32 bytes_remaining;
|
||||
u8 *data;
|
||||
int ret = 0;
|
||||
|
||||
if (!ops->get_eeprom || !ops->get_eeprom_len)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -771,7 +770,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check for exceeding total eeprom len */
|
||||
if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
|
||||
if (eeprom.offset + eeprom.len > total_len)
|
||||
return -EINVAL;
|
||||
|
||||
data = kmalloc(PAGE_SIZE, GFP_USER);
|
||||
@ -782,7 +781,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
|
||||
while (bytes_remaining > 0) {
|
||||
eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
|
||||
|
||||
ret = ops->get_eeprom(dev, &eeprom, data);
|
||||
ret = getter(dev, &eeprom, data);
|
||||
if (ret)
|
||||
break;
|
||||
if (copy_to_user(userbuf, data, eeprom.len)) {
|
||||
@ -803,6 +802,17 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
|
||||
{
|
||||
const struct ethtool_ops *ops = dev->ethtool_ops;
|
||||
|
||||
if (!ops->get_eeprom || !ops->get_eeprom_len)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
|
||||
ops->get_eeprom_len(dev));
|
||||
}
|
||||
|
||||
static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
|
||||
{
|
||||
struct ethtool_eeprom eeprom;
|
||||
@ -1325,6 +1335,47 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ethtool_get_module_info(struct net_device *dev,
|
||||
void __user *useraddr)
|
||||
{
|
||||
int ret;
|
||||
struct ethtool_modinfo modinfo;
|
||||
const struct ethtool_ops *ops = dev->ethtool_ops;
|
||||
|
||||
if (!ops->get_module_info)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = ops->get_module_info(dev, &modinfo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (copy_to_user(useraddr, &modinfo, sizeof(modinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ethtool_get_module_eeprom(struct net_device *dev,
|
||||
void __user *useraddr)
|
||||
{
|
||||
int ret;
|
||||
struct ethtool_modinfo modinfo;
|
||||
const struct ethtool_ops *ops = dev->ethtool_ops;
|
||||
|
||||
if (!ops->get_module_info || !ops->get_module_eeprom)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = ops->get_module_info(dev, &modinfo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom,
|
||||
modinfo.eeprom_len);
|
||||
}
|
||||
|
||||
/* The main entry point in this file. Called from net/core/dev.c */
|
||||
|
||||
int dev_ethtool(struct net *net, struct ifreq *ifr)
|
||||
@ -1549,6 +1600,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
|
||||
case ETHTOOL_GET_TS_INFO:
|
||||
rc = ethtool_get_ts_info(dev, useraddr);
|
||||
break;
|
||||
case ETHTOOL_GMODULEINFO:
|
||||
rc = ethtool_get_module_info(dev, useraddr);
|
||||
break;
|
||||
case ETHTOOL_GMODULEEEPROM:
|
||||
rc = ethtool_get_module_eeprom(dev, useraddr);
|
||||
break;
|
||||
default:
|
||||
rc = -EOPNOTSUPP;
|
||||
}
|
||||
|
Reference in New Issue
Block a user