Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Merge 'net' into 'net-next' to get the AF_PACKET bug fix that
Daniel's direct transmit changes depend upon.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-12-09 20:20:14 -05:00
commit 34f9f43710
37 changed files with 310 additions and 204 deletions

View File

@ -4,7 +4,7 @@ This file provides information, what the device node
for the davinci_emac interface contains. for the davinci_emac interface contains.
Required properties: Required properties:
- compatible: "ti,davinci-dm6467-emac"; - compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
- reg: Offset and length of the register set for the device - reg: Offset and length of the register set for the device
- ti,davinci-ctrl-reg-offset: offset to control register - ti,davinci-ctrl-reg-offset: offset to control register
- ti,davinci-ctrl-mod-reg-offset: offset to control module register - ti,davinci-ctrl-mod-reg-offset: offset to control module register

View File

@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
[shutdown] close() --------> destruction of the transmission socket and [shutdown] close() --------> destruction of the transmission socket and
deallocation of all associated resources. deallocation of all associated resources.
Socket creation and destruction is also straight forward, and is done
the same way as in capturing described in the previous paragraph:
int fd = socket(PF_PACKET, mode, 0);
The protocol can optionally be 0 in case we only want to transmit
via this socket, which avoids an expensive call to packet_rcv().
In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
Binding the socket to your network interface is mandatory (with zero copy) to Binding the socket to your network interface is mandatory (with zero copy) to
know the header size of frames used in the circular buffer. know the header size of frames used in the circular buffer.

View File

@ -4450,10 +4450,8 @@ M: Bruce Allan <bruce.w.allan@intel.com>
M: Carolyn Wyborny <carolyn.wyborny@intel.com> M: Carolyn Wyborny <carolyn.wyborny@intel.com>
M: Don Skidmore <donald.c.skidmore@intel.com> M: Don Skidmore <donald.c.skidmore@intel.com>
M: Greg Rose <gregory.v.rose@intel.com> M: Greg Rose <gregory.v.rose@intel.com>
M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
M: Alex Duyck <alexander.h.duyck@intel.com> M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com> M: John Ronciak <john.ronciak@intel.com>
M: Tushar Dave <tushar.n.dave@intel.com>
L: e1000-devel@lists.sourceforge.net L: e1000-devel@lists.sourceforge.net
W: http://www.intel.com/support/feedback.htm W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/ W: http://e1000.sourceforge.net/

View File

@ -4199,9 +4199,9 @@ static int bond_check_params(struct bond_params *params)
(arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
/* not complete check, but should be good enough to /* not complete check, but should be good enough to
catch mistakes */ catch mistakes */
__be32 ip = in_aton(arp_ip_target[i]); __be32 ip;
if (!isdigit(arp_ip_target[i][0]) || ip == 0 || if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
ip == htonl(INADDR_BROADCAST)) { IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[i]); arp_ip_target[i]);
arp_interval = 0; arp_interval = 0;

View File

@ -1634,12 +1634,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
char *buf) char *buf)
{ {
struct bonding *bond = to_bond(d); struct bonding *bond = to_bond(d);
int packets_per_slave = bond->params.packets_per_slave; unsigned int packets_per_slave = bond->params.packets_per_slave;
if (packets_per_slave > 1) if (packets_per_slave > 1)
packets_per_slave = reciprocal_value(packets_per_slave); packets_per_slave = reciprocal_value(packets_per_slave);
return sprintf(buf, "%d\n", packets_per_slave); return sprintf(buf, "%u\n", packets_per_slave);
} }
static ssize_t bonding_store_packets_per_slave(struct device *d, static ssize_t bonding_store_packets_per_slave(struct device *d,

View File

@ -3114,6 +3114,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{ {
struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
if (!IS_SRIOV(bp)) {
BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
return -EINVAL;
}
DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
num_vfs_param, BNX2X_NR_VIRTFN(bp)); num_vfs_param, BNX2X_NR_VIRTFN(bp));

View File

@ -64,6 +64,9 @@
#define SLIPORT_ERROR_NO_RESOURCE1 0x2 #define SLIPORT_ERROR_NO_RESOURCE1 0x2
#define SLIPORT_ERROR_NO_RESOURCE2 0x9 #define SLIPORT_ERROR_NO_RESOURCE2 0x9
#define SLIPORT_ERROR_FW_RESET1 0x2
#define SLIPORT_ERROR_FW_RESET2 0x0
/********* Memory BAR register ************/ /********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt

View File

@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter)
*/ */
if (sliport_status & SLIPORT_STATUS_ERR_MASK) { if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->hw_error = true; adapter->hw_error = true;
dev_err(&adapter->pdev->dev, /* Do not log error messages if its a FW reset */
"Error detected in the card\n"); if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
dev_info(&adapter->pdev->dev,
"Firmware update in progress\n");
return;
} else {
dev_err(&adapter->pdev->dev,
"Error detected in the card\n");
}
} }
if (sliport_status & SLIPORT_STATUS_ERR_MASK) { if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@ -2932,28 +2940,35 @@ static void be_cancel_worker(struct be_adapter *adapter)
} }
} }
static int be_clear(struct be_adapter *adapter) static void be_mac_clear(struct be_adapter *adapter)
{ {
int i; int i;
if (adapter->pmac_id) {
for (i = 0; i < (adapter->uc_macs + 1); i++)
be_cmd_pmac_del(adapter, adapter->if_handle,
adapter->pmac_id[i], 0);
adapter->uc_macs = 0;
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
}
}
static int be_clear(struct be_adapter *adapter)
{
be_cancel_worker(adapter); be_cancel_worker(adapter);
if (sriov_enabled(adapter)) if (sriov_enabled(adapter))
be_vf_clear(adapter); be_vf_clear(adapter);
/* delete the primary mac along with the uc-mac list */ /* delete the primary mac along with the uc-mac list */
for (i = 0; i < (adapter->uc_macs + 1); i++) be_mac_clear(adapter);
be_cmd_pmac_del(adapter, adapter->if_handle,
adapter->pmac_id[i], 0);
adapter->uc_macs = 0;
be_cmd_if_destroy(adapter, adapter->if_handle, 0); be_cmd_if_destroy(adapter, adapter->if_handle, 0);
be_clear_queues(adapter); be_clear_queues(adapter);
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
be_msix_disable(adapter); be_msix_disable(adapter);
return 0; return 0;
} }
@ -3812,6 +3827,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
} }
if (change_status == LANCER_FW_RESET_NEEDED) { if (change_status == LANCER_FW_RESET_NEEDED) {
dev_info(&adapter->pdev->dev,
"Resetting adapter to activate new FW\n");
status = lancer_physdev_ctrl(adapter, status = lancer_physdev_ctrl(adapter,
PHYSDEV_CONTROL_FW_RESET_MASK); PHYSDEV_CONTROL_FW_RESET_MASK);
if (status) { if (status) {
@ -4363,13 +4380,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
goto err; goto err;
} }
dev_err(dev, "Error recovery successful\n"); dev_err(dev, "Adapter recovery successful\n");
return 0; return 0;
err: err:
if (status == -EAGAIN) if (status == -EAGAIN)
dev_err(dev, "Waiting for resource provisioning\n"); dev_err(dev, "Waiting for resource provisioning\n");
else else
dev_err(dev, "Error recovery failed\n"); dev_err(dev, "Adapter recovery failed\n");
return status; return status;
} }

View File

@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
rx_desc->data_size, DMA_FROM_DEVICE); MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
} }
if (rx_done) if (rx_done)
@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
} }
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
rx_desc->data_size, DMA_FROM_DEVICE); MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
rx_bytes = rx_desc->data_size - rx_bytes = rx_desc->data_size -
(ETH_FCS_LEN + MVNETA_MH_SIZE); (ETH_FCS_LEN + MVNETA_MH_SIZE);

View File

@ -5149,8 +5149,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
{ {
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev); u8 __iomem *base = get_hwbase(dev);
int result; int result, count;
memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
count = nv_get_sset_count(dev, ETH_SS_TEST);
memset(buffer, 0, count * sizeof(u64));
if (!nv_link_test(dev)) { if (!nv_link_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED; test->flags |= ETH_TEST_FL_FAILED;
@ -5194,7 +5196,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
return; return;
} }
if (!nv_loopback_test(dev)) { if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED; test->flags |= ETH_TEST_FL_FAILED;
buffer[3] = 1; buffer[3] = 1;
} }

View File

@ -18,7 +18,7 @@
*/ */
#define DRV_NAME "qlge" #define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
#define DRV_VERSION "1.00.00.33" #define DRV_VERSION "1.00.00.34"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */

View File

@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
}; };
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
#define QLGE_RCV_MAC_ERR_STATS 7
static int ql_update_ring_coalescing(struct ql_adapter *qdev) static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{ {
@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
iter++; iter++;
} }
/* Update receive mac error statistics */
iter += QLGE_RCV_MAC_ERR_STATS;
/* /*
* Get Per-priority TX pause frame counter statistics. * Get Per-priority TX pause frame counter statistics.
*/ */

View File

@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features) netdev_features_t features)
{ {
int err; int err;
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_CTAG_TX;
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
/* Update the behavior of vlan accel in the adapter */ /* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features); err = qlge_update_hw_vlan_features(ndev, features);

View File

@ -61,6 +61,7 @@
#include <linux/davinci_emac.h> #include <linux/davinci_emac.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_net.h> #include <linux/of_net.h>
@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = {
#endif #endif
}; };
static const struct of_device_id davinci_emac_of_match[];
static struct emac_platform_data * static struct emac_platform_data *
davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
{ {
struct device_node *np; struct device_node *np;
const struct of_device_id *match;
const struct emac_platform_data *auxdata;
struct emac_platform_data *pdata = NULL; struct emac_platform_data *pdata = NULL;
const u8 *mac_addr; const u8 *mac_addr;
@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0); priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!priv->phy_node) if (!priv->phy_node)
pdata->phy_id = ""; pdata->phy_id = NULL;
auxdata = pdev->dev.platform_data;
if (auxdata) {
pdata->interrupt_enable = auxdata->interrupt_enable;
pdata->interrupt_disable = auxdata->interrupt_disable;
}
match = of_match_device(davinci_emac_of_match, &pdev->dev);
if (match && match->data) {
auxdata = match->data;
pdata->version = auxdata->version;
pdata->hw_ram_addr = auxdata->hw_ram_addr;
}
pdev->dev.platform_data = pdata; pdev->dev.platform_data = pdata;
@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
}; };
#if IS_ENABLED(CONFIG_OF) #if IS_ENABLED(CONFIG_OF)
static const struct emac_platform_data am3517_emac_data = {
.version = EMAC_VERSION_2,
.hw_ram_addr = 0x01e20000,
};
static const struct of_device_id davinci_emac_of_match[] = { static const struct of_device_id davinci_emac_of_match[] = {
{.compatible = "ti,davinci-dm6467-emac", }, {.compatible = "ti,davinci-dm6467-emac", },
{.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, davinci_emac_of_match); MODULE_DEVICE_TABLE(of, davinci_emac_of_match);

View File

@ -872,6 +872,8 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK); ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */ ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
if (ret > 0)
iocb->ki_pos = ret;
out: out:
return ret; return ret;
} }

View File

@ -1355,6 +1355,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
ret = tun_do_read(tun, tfile, iv, len, ret = tun_do_read(tun, tfile, iv, len,
file->f_flags & O_NONBLOCK); file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len); ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
out: out:
tun_put(tun); tun_put(tun);
return ret; return ret;

View File

@ -425,10 +425,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len); pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++; dev->stats.rx_length_errors++;
if (vi->big_packets) if (vi->mergeable_rx_bufs)
give_pages(rq, buf);
else if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf)); put_page(virt_to_head_page(buf));
else if (vi->big_packets)
give_pages(rq, buf);
else else
dev_kfree_skb(buf); dev_kfree_skb(buf);
return; return;
@ -1366,6 +1366,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
static void virtnet_free_queues(struct virtnet_info *vi) static void virtnet_free_queues(struct virtnet_info *vi)
{ {
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
netif_napi_del(&vi->rq[i].napi);
kfree(vi->rq); kfree(vi->rq);
kfree(vi->sq); kfree(vi->sq);
} }
@ -1395,10 +1400,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq; struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
if (vi->big_packets) if (vi->mergeable_rx_bufs)
give_pages(&vi->rq[i], buf);
else if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf)); put_page(virt_to_head_page(buf));
else if (vi->big_packets)
give_pages(&vi->rq[i], buf);
else else
dev_kfree_skb(buf); dev_kfree_skb(buf);
--vi->rq[i].num; --vi->rq[i].num;

View File

@ -1149,49 +1149,72 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return 0; return 0;
} }
static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
unsigned int max)
{ {
if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { if (skb_headlen(skb) >= len)
/* If we need to pullup then pullup to the max, so we return 0;
* won't need to do it again.
*/ /* If we need to pullup then pullup to the max, so we
int target = min_t(int, skb->len, MAX_TCP_HEADER); * won't need to do it again.
__pskb_pull_tail(skb, target - skb_headlen(skb)); */
} if (max > skb->len)
max = skb->len;
if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
return -ENOMEM;
if (skb_headlen(skb) < len)
return -EPROTO;
return 0;
} }
/* This value should be large enough to cover a tagged ethernet header plus
* maximally sized IP and TCP or UDP headers.
*/
#define MAX_IP_HDR_LEN 128
static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
int recalculate_partial_csum) int recalculate_partial_csum)
{ {
struct iphdr *iph = (void *)skb->data;
unsigned int header_size;
unsigned int off; unsigned int off;
int err = -EPROTO; bool fragment;
int err;
off = sizeof(struct iphdr); fragment = false;
header_size = skb->network_header + off + MAX_IPOPTLEN; err = maybe_pull_tail(skb,
maybe_pull_tail(skb, header_size); sizeof(struct iphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
off = iph->ihl * 4; if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
fragment = true;
switch (iph->protocol) { off = ip_hdrlen(skb);
err = -EPROTO;
switch (ip_hdr(skb)->protocol) {
case IPPROTO_TCP: case IPPROTO_TCP:
if (!skb_partial_csum_set(skb, off, if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check))) offsetof(struct tcphdr, check)))
goto out; goto out;
if (recalculate_partial_csum) { if (recalculate_partial_csum) {
struct tcphdr *tcph = tcp_hdr(skb); err = maybe_pull_tail(skb,
off + sizeof(struct tcphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
header_size = skb->network_header + tcp_hdr(skb)->check =
off + ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
sizeof(struct tcphdr); ip_hdr(skb)->daddr,
maybe_pull_tail(skb, header_size); skb->len - off,
IPPROTO_TCP, 0);
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - off,
IPPROTO_TCP, 0);
} }
break; break;
case IPPROTO_UDP: case IPPROTO_UDP:
@ -1200,24 +1223,20 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
goto out; goto out;
if (recalculate_partial_csum) { if (recalculate_partial_csum) {
struct udphdr *udph = udp_hdr(skb); err = maybe_pull_tail(skb,
off + sizeof(struct udphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
header_size = skb->network_header + udp_hdr(skb)->check =
off + ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
sizeof(struct udphdr); ip_hdr(skb)->daddr,
maybe_pull_tail(skb, header_size); skb->len - off,
IPPROTO_UDP, 0);
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - off,
IPPROTO_UDP, 0);
} }
break; break;
default: default:
if (net_ratelimit())
netdev_err(vif->dev,
"Attempting to checksum a non-TCP/UDP packet, "
"dropping a protocol %d packet\n",
iph->protocol);
goto out; goto out;
} }
@ -1227,75 +1246,99 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
return err; return err;
} }
/* This value should be large enough to cover a tagged ethernet header plus
* an IPv6 header, all options, and a maximal TCP or UDP header.
*/
#define MAX_IPV6_HDR_LEN 256
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
int recalculate_partial_csum) int recalculate_partial_csum)
{ {
int err = -EPROTO; int err;
struct ipv6hdr *ipv6h = (void *)skb->data;
u8 nexthdr; u8 nexthdr;
unsigned int header_size;
unsigned int off; unsigned int off;
unsigned int len;
bool fragment; bool fragment;
bool done; bool done;
fragment = false;
done = false; done = false;
off = sizeof(struct ipv6hdr); off = sizeof(struct ipv6hdr);
header_size = skb->network_header + off; err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
maybe_pull_tail(skb, header_size); if (err < 0)
goto out;
nexthdr = ipv6h->nexthdr; nexthdr = ipv6_hdr(skb)->nexthdr;
while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
!done) { while (off <= len && !done) {
switch (nexthdr) { switch (nexthdr) {
case IPPROTO_DSTOPTS: case IPPROTO_DSTOPTS:
case IPPROTO_HOPOPTS: case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING: { case IPPROTO_ROUTING: {
struct ipv6_opt_hdr *hp = (void *)(skb->data + off); struct ipv6_opt_hdr *hp;
header_size = skb->network_header + err = maybe_pull_tail(skb,
off + off +
sizeof(struct ipv6_opt_hdr); sizeof(struct ipv6_opt_hdr),
maybe_pull_tail(skb, header_size); MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
nexthdr = hp->nexthdr; nexthdr = hp->nexthdr;
off += ipv6_optlen(hp); off += ipv6_optlen(hp);
break; break;
} }
case IPPROTO_AH: { case IPPROTO_AH: {
struct ip_auth_hdr *hp = (void *)(skb->data + off); struct ip_auth_hdr *hp;
header_size = skb->network_header + err = maybe_pull_tail(skb,
off + off +
sizeof(struct ip_auth_hdr); sizeof(struct ip_auth_hdr),
maybe_pull_tail(skb, header_size); MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct ip_auth_hdr, skb, off);
nexthdr = hp->nexthdr; nexthdr = hp->nexthdr;
off += (hp->hdrlen+2)<<2; off += ipv6_authlen(hp);
break;
}
case IPPROTO_FRAGMENT: {
struct frag_hdr *hp;
err = maybe_pull_tail(skb,
off +
sizeof(struct frag_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct frag_hdr, skb, off);
if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
fragment = true;
nexthdr = hp->nexthdr;
off += sizeof(struct frag_hdr);
break; break;
} }
case IPPROTO_FRAGMENT:
fragment = true;
/* fall through */
default: default:
done = true; done = true;
break; break;
} }
} }
if (!done) { err = -EPROTO;
if (net_ratelimit())
netdev_err(vif->dev, "Failed to parse packet header\n");
goto out;
}
if (fragment) { if (!done || fragment)
if (net_ratelimit())
netdev_err(vif->dev, "Packet is a fragment!\n");
goto out; goto out;
}
switch (nexthdr) { switch (nexthdr) {
case IPPROTO_TCP: case IPPROTO_TCP:
@ -1304,17 +1347,17 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
goto out; goto out;
if (recalculate_partial_csum) { if (recalculate_partial_csum) {
struct tcphdr *tcph = tcp_hdr(skb); err = maybe_pull_tail(skb,
off + sizeof(struct tcphdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
header_size = skb->network_header + tcp_hdr(skb)->check =
off + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
sizeof(struct tcphdr); &ipv6_hdr(skb)->daddr,
maybe_pull_tail(skb, header_size); skb->len - off,
IPPROTO_TCP, 0);
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
&ipv6h->daddr,
skb->len - off,
IPPROTO_TCP, 0);
} }
break; break;
case IPPROTO_UDP: case IPPROTO_UDP:
@ -1323,25 +1366,20 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
goto out; goto out;
if (recalculate_partial_csum) { if (recalculate_partial_csum) {
struct udphdr *udph = udp_hdr(skb); err = maybe_pull_tail(skb,
off + sizeof(struct udphdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
header_size = skb->network_header + udp_hdr(skb)->check =
off + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
sizeof(struct udphdr); &ipv6_hdr(skb)->daddr,
maybe_pull_tail(skb, header_size); skb->len - off,
IPPROTO_UDP, 0);
udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
&ipv6h->daddr,
skb->len - off,
IPPROTO_UDP, 0);
} }
break; break;
default: default:
if (net_ratelimit())
netdev_err(vif->dev,
"Attempting to checksum a non-TCP/UDP packet, "
"dropping a protocol %d packet\n",
nexthdr);
goto out; goto out;
} }

View File

@ -4,6 +4,7 @@
#include <uapi/linux/ipv6.h> #include <uapi/linux/ipv6.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) #define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
/* /*
* This structure contains configuration options per IPv6 link. * This structure contains configuration options per IPv6 link.
*/ */

View File

@ -110,7 +110,8 @@ struct frag_hdr {
__be32 identification; __be32 identification;
}; };
#define IP6_MF 0x0001 #define IP6_MF 0x0001
#define IP6_OFFSET 0xFFF8
#include <net/sock.h> #include <net/sock.h>

View File

@ -1035,7 +1035,6 @@ enum cg_proto_flags {
}; };
struct cg_proto { struct cg_proto {
void (*enter_memory_pressure)(struct sock *sk);
struct res_counter memory_allocated; /* Current allocated memory. */ struct res_counter memory_allocated; /* Current allocated memory. */
struct percpu_counter sockets_allocated; /* Current number of sockets. */ struct percpu_counter sockets_allocated; /* Current number of sockets. */
int memory_pressure; int memory_pressure;
@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot; struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
if (cg_proto->memory_pressure) cg_proto->memory_pressure = 0;
cg_proto->memory_pressure = 0;
} }
} }
@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot; struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
cg_proto->enter_memory_pressure(sk); cg_proto->memory_pressure = 1;
} }
sk->sk_prot->enter_memory_pressure(sk); sk->sk_prot->enter_memory_pressure(sk);

View File

@ -426,6 +426,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
int br_handle_frame_finish(struct sk_buff *skb); int br_handle_frame_finish(struct sk_buff *skb);
rx_handler_result_t br_handle_frame(struct sk_buff **pskb); rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
{
return rcu_dereference(dev->rx_handler) == br_handle_frame;
}
static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
{
return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
}
/* br_ioctl.c */ /* br_ioctl.c */
int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,

View File

@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
goto err; goto err;
p = br_port_get_rcu(dev); p = br_port_get_check_rcu(dev);
if (!p) if (!p)
goto err; goto err;

View File

@ -3584,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->tstamp.tv64 = 0; skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST; skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0; skb->skb_iif = 0;
skb->local_df = 0;
skb_dst_drop(skb); skb_dst_drop(skb);
skb->mark = 0; skb->mark = 0;
secpath_reset(skb); secpath_reset(skb);

View File

@ -6,13 +6,6 @@
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/module.h> #include <linux/module.h>
static void memcg_tcp_enter_memory_pressure(struct sock *sk)
{
if (sk->sk_cgrp->memory_pressure)
sk->sk_cgrp->memory_pressure = 1;
}
EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{ {
/* /*

View File

@ -237,6 +237,30 @@ struct packet_skb_cb {
static void __fanout_unlink(struct sock *sk, struct packet_sock *po); static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po); static void __fanout_link(struct sock *sk, struct packet_sock *po);
static struct net_device *packet_cached_dev_get(struct packet_sock *po)
{
struct net_device *dev;
rcu_read_lock();
dev = rcu_dereference(po->cached_dev);
if (likely(dev))
dev_hold(dev);
rcu_read_unlock();
return dev;
}
static void packet_cached_dev_assign(struct packet_sock *po,
struct net_device *dev)
{
rcu_assign_pointer(po->cached_dev, dev);
}
static void packet_cached_dev_reset(struct packet_sock *po)
{
RCU_INIT_POINTER(po->cached_dev, NULL);
}
/* register_prot_hook must be invoked with the po->bind_lock held, /* register_prot_hook must be invoked with the po->bind_lock held,
* or from a context in which asynchronous accesses to the packet * or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()). * socket is not possible (packet_create()).
@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk)
struct packet_sock *po = pkt_sk(sk); struct packet_sock *po = pkt_sk(sk);
if (!po->running) { if (!po->running) {
if (po->fanout) { if (po->fanout)
__fanout_link(sk, po); __fanout_link(sk, po);
} else { else
dev_add_pack(&po->prot_hook); dev_add_pack(&po->prot_hook);
rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
}
sock_hold(sk); sock_hold(sk);
po->running = 1; po->running = 1;
@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
struct packet_sock *po = pkt_sk(sk); struct packet_sock *po = pkt_sk(sk);
po->running = 0; po->running = 0;
if (po->fanout) {
if (po->fanout)
__fanout_unlink(sk, po); __fanout_unlink(sk, po);
} else { else
__dev_remove_pack(&po->prot_hook); __dev_remove_pack(&po->prot_hook);
RCU_INIT_POINTER(po->cached_dev, NULL);
}
__sock_put(sk); __sock_put(sk);
@ -2061,19 +2082,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
return tp_len; return tp_len;
} }
static struct net_device *packet_cached_dev_get(struct packet_sock *po)
{
struct net_device *dev;
rcu_read_lock();
dev = rcu_dereference(po->cached_dev);
if (dev)
dev_hold(dev);
rcu_read_unlock();
return dev;
}
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@ -2090,7 +2098,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
mutex_lock(&po->pg_vec_lock); mutex_lock(&po->pg_vec_lock);
if (saddr == NULL) { if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po); dev = packet_cached_dev_get(po);
proto = po->num; proto = po->num;
addr = NULL; addr = NULL;
@ -2244,7 +2252,7 @@ static int packet_snd(struct socket *sock,
* Get and verify the address. * Get and verify the address.
*/ */
if (saddr == NULL) { if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po); dev = packet_cached_dev_get(po);
proto = po->num; proto = po->num;
addr = NULL; addr = NULL;
@ -2453,6 +2461,8 @@ static int packet_release(struct socket *sock)
spin_lock(&po->bind_lock); spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false); unregister_prot_hook(sk, false);
packet_cached_dev_reset(po);
if (po->prot_hook.dev) { if (po->prot_hook.dev) {
dev_put(po->prot_hook.dev); dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL; po->prot_hook.dev = NULL;
@ -2508,14 +2518,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
spin_lock(&po->bind_lock); spin_lock(&po->bind_lock);
unregister_prot_hook(sk, true); unregister_prot_hook(sk, true);
po->num = protocol; po->num = protocol;
po->prot_hook.type = protocol; po->prot_hook.type = protocol;
if (po->prot_hook.dev) if (po->prot_hook.dev)
dev_put(po->prot_hook.dev); dev_put(po->prot_hook.dev);
po->prot_hook.dev = dev;
po->prot_hook.dev = dev;
po->ifindex = dev ? dev->ifindex : 0; po->ifindex = dev ? dev->ifindex : 0;
packet_cached_dev_assign(po, dev);
if (protocol == 0) if (protocol == 0)
goto out_unlock; goto out_unlock;
@ -2628,7 +2641,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
po = pkt_sk(sk); po = pkt_sk(sk);
sk->sk_family = PF_PACKET; sk->sk_family = PF_PACKET;
po->num = proto; po->num = proto;
RCU_INIT_POINTER(po->cached_dev, NULL);
packet_cached_dev_reset(po);
sk->sk_destruct = packet_sock_destruct; sk->sk_destruct = packet_sock_destruct;
sk_refcnt_debug_inc(sk); sk_refcnt_debug_inc(sk);
@ -3339,6 +3353,7 @@ static int packet_notifier(struct notifier_block *this,
sk->sk_error_report(sk); sk->sk_error_report(sk);
} }
if (msg == NETDEV_UNREGISTER) { if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
po->ifindex = -1; po->ifindex = -1;
if (po->prot_hook.dev) if (po->prot_hook.dev)
dev_put(po->prot_hook.dev); dev_put(po->prot_hook.dev);

View File

@ -270,6 +270,16 @@ int tcf_register_action(struct tc_action_ops *act)
{ {
struct tc_action_ops *a, **ap; struct tc_action_ops *a, **ap;
/* Must supply act, dump, cleanup and init */
if (!act->act || !act->dump || !act->cleanup || !act->init)
return -EINVAL;
/* Supply defaults */
if (!act->lookup)
act->lookup = tcf_hash_search;
if (!act->walk)
act->walk = tcf_generic_walker;
write_lock(&act_mod_lock); write_lock(&act_mod_lock);
for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
@ -381,7 +391,7 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
} }
while ((a = act) != NULL) { while ((a = act) != NULL) {
repeat: repeat:
if (a->ops && a->ops->act) { if (a->ops) {
ret = a->ops->act(skb, a, res); ret = a->ops->act(skb, a, res);
if (TC_MUNGED & skb->tc_verd) { if (TC_MUNGED & skb->tc_verd) {
/* copied already, allow trampling */ /* copied already, allow trampling */
@ -405,7 +415,7 @@ void tcf_action_destroy(struct tc_action *act, int bind)
struct tc_action *a; struct tc_action *a;
for (a = act; a; a = act) { for (a = act; a; a = act) {
if (a->ops && a->ops->cleanup) { if (a->ops) {
if (a->ops->cleanup(a, bind) == ACT_P_DELETED) if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
module_put(a->ops->owner); module_put(a->ops->owner);
act = act->next; act = act->next;
@ -424,7 +434,7 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{ {
int err = -EINVAL; int err = -EINVAL;
if (a->ops == NULL || a->ops->dump == NULL) if (a->ops == NULL)
return err; return err;
return a->ops->dump(skb, a, bind, ref); return a->ops->dump(skb, a, bind, ref);
} }
@ -436,7 +446,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest; struct nlattr *nest;
if (a->ops == NULL || a->ops->dump == NULL) if (a->ops == NULL)
return err; return err;
if (nla_put_string(skb, TCA_KIND, a->ops->kind)) if (nla_put_string(skb, TCA_KIND, a->ops->kind))
@ -723,8 +733,6 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
if (a->ops == NULL) if (a->ops == NULL)
goto err_free; goto err_free;
if (a->ops->lookup == NULL)
goto err_mod;
err = -ENOENT; err = -ENOENT;
if (a->ops->lookup(a, index) == 0) if (a->ops->lookup(a, index) == 0)
goto err_mod; goto err_mod;
@ -1084,12 +1092,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
memset(&a, 0, sizeof(struct tc_action)); memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o; a.ops = a_o;
if (a_o->walk == NULL) {
WARN(1, "tc_dump_action: %s !capable of dumping table\n",
a_o->kind);
goto out_module_put;
}
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*t), 0); cb->nlh->nlmsg_type, sizeof(*t), 0);
if (!nlh) if (!nlh)

View File

@ -585,9 +585,7 @@ static struct tc_action_ops act_csum_ops = {
.act = tcf_csum, .act = tcf_csum,
.dump = tcf_csum_dump, .dump = tcf_csum_dump,
.cleanup = tcf_csum_cleanup, .cleanup = tcf_csum_cleanup,
.lookup = tcf_hash_search,
.init = tcf_csum_init, .init = tcf_csum_init,
.walk = tcf_generic_walker
}; };
MODULE_DESCRIPTION("Checksum updating actions"); MODULE_DESCRIPTION("Checksum updating actions");

View File

@ -206,9 +206,7 @@ static struct tc_action_ops act_gact_ops = {
.act = tcf_gact, .act = tcf_gact,
.dump = tcf_gact_dump, .dump = tcf_gact_dump,
.cleanup = tcf_gact_cleanup, .cleanup = tcf_gact_cleanup,
.lookup = tcf_hash_search,
.init = tcf_gact_init, .init = tcf_gact_init,
.walk = tcf_generic_walker
}; };
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");

View File

@ -298,9 +298,7 @@ static struct tc_action_ops act_ipt_ops = {
.act = tcf_ipt, .act = tcf_ipt,
.dump = tcf_ipt_dump, .dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup, .cleanup = tcf_ipt_cleanup,
.lookup = tcf_hash_search,
.init = tcf_ipt_init, .init = tcf_ipt_init,
.walk = tcf_generic_walker
}; };
static struct tc_action_ops act_xt_ops = { static struct tc_action_ops act_xt_ops = {
@ -312,9 +310,7 @@ static struct tc_action_ops act_xt_ops = {
.act = tcf_ipt, .act = tcf_ipt,
.dump = tcf_ipt_dump, .dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup, .cleanup = tcf_ipt_cleanup,
.lookup = tcf_hash_search,
.init = tcf_ipt_init, .init = tcf_ipt_init,
.walk = tcf_generic_walker
}; };
MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");

View File

@ -271,9 +271,7 @@ static struct tc_action_ops act_mirred_ops = {
.act = tcf_mirred, .act = tcf_mirred,
.dump = tcf_mirred_dump, .dump = tcf_mirred_dump,
.cleanup = tcf_mirred_cleanup, .cleanup = tcf_mirred_cleanup,
.lookup = tcf_hash_search,
.init = tcf_mirred_init, .init = tcf_mirred_init,
.walk = tcf_generic_walker
}; };
MODULE_AUTHOR("Jamal Hadi Salim(2002)"); MODULE_AUTHOR("Jamal Hadi Salim(2002)");

View File

@ -308,9 +308,7 @@ static struct tc_action_ops act_nat_ops = {
.act = tcf_nat, .act = tcf_nat,
.dump = tcf_nat_dump, .dump = tcf_nat_dump,
.cleanup = tcf_nat_cleanup, .cleanup = tcf_nat_cleanup,
.lookup = tcf_hash_search,
.init = tcf_nat_init, .init = tcf_nat_init,
.walk = tcf_generic_walker
}; };
MODULE_DESCRIPTION("Stateless NAT actions"); MODULE_DESCRIPTION("Stateless NAT actions");

View File

@ -243,9 +243,7 @@ static struct tc_action_ops act_pedit_ops = {
.act = tcf_pedit, .act = tcf_pedit,
.dump = tcf_pedit_dump, .dump = tcf_pedit_dump,
.cleanup = tcf_pedit_cleanup, .cleanup = tcf_pedit_cleanup,
.lookup = tcf_hash_search,
.init = tcf_pedit_init, .init = tcf_pedit_init,
.walk = tcf_generic_walker
}; };
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");

View File

@ -407,7 +407,6 @@ static struct tc_action_ops act_police_ops = {
.act = tcf_act_police, .act = tcf_act_police,
.dump = tcf_act_police_dump, .dump = tcf_act_police_dump,
.cleanup = tcf_act_police_cleanup, .cleanup = tcf_act_police_cleanup,
.lookup = tcf_hash_search,
.init = tcf_act_police_locate, .init = tcf_act_police_locate,
.walk = tcf_act_police_walker .walk = tcf_act_police_walker
}; };

View File

@ -201,7 +201,6 @@ static struct tc_action_ops act_simp_ops = {
.dump = tcf_simp_dump, .dump = tcf_simp_dump,
.cleanup = tcf_simp_cleanup, .cleanup = tcf_simp_cleanup,
.init = tcf_simp_init, .init = tcf_simp_init,
.walk = tcf_generic_walker,
}; };
MODULE_AUTHOR("Jamal Hadi Salim(2005)"); MODULE_AUTHOR("Jamal Hadi Salim(2005)");

View File

@ -202,7 +202,6 @@ static struct tc_action_ops act_skbedit_ops = {
.dump = tcf_skbedit_dump, .dump = tcf_skbedit_dump,
.cleanup = tcf_skbedit_cleanup, .cleanup = tcf_skbedit_cleanup,
.init = tcf_skbedit_init, .init = tcf_skbedit_init,
.walk = tcf_generic_walker,
}; };
MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");

View File

@ -572,7 +572,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t)
u32 old_cwnd = t->cwnd; u32 old_cwnd = t->cwnd;
u32 max_burst_bytes; u32 max_burst_bytes;
if (t->burst_limited) if (t->burst_limited || asoc->max_burst == 0)
return; return;
max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);