From: Scott Moser <smoser@redhat.com> Date: Sun, 14 Oct 2007 13:04:42 -0400 Subject: [net] ibmveth: Checksum offload support Message-id: Pine.LNX.4.64.0710141303250.3159@squad5-lp1.lab.boston.redhat.com O-Subject: [PATCH RHEL5u2] bz254035 Checksum offload support for ibmveth Bugzilla: 254035 Bug 254035 [1] --------------- Description: ----------- This patchset enables TCP checksum offload support for IPV4 on ibmveth. This completely eliminates the generation and checking of the checksum for packets that are completely virtual and never touch a physical network. A simple TCP_STREAM netperf run on a virtual network with maximum mtu set yielded a significant increase in throughput. This feature is enabled by default on systems that support it, but can be disabled with a module option. Kernel Version: -------------- Patch built against 2.6.18-52 Upstream Status: --------------- These changes were recently accepted into mainline for inclusion into 2.6.24 at with the following git-commits: f4ff28720f45354573dcf4e0eb5a2dc5452cb3e1 5fc7e01cb77132f96e171a37f9f792270b1603f6 80e536770c2fcb8d2b7be9f5a36b85c36fd5943a ddbb4de9672097da2c0f19c6ebca0ebb5672e9b8 3449a2ab31681420515e242920e755262b4f41e9 79ef4a4dd44cd4f9942975b0f625bd01549a2aa9 Test Status: ---- To ensure cross platform build, a brew scratch build has been done against 2.6.18-52 at [2]. I've tested these changes in the kernel listed above using the script attached to the bug (254035-nettest). No regressions were found and netperf tests with checksum offload show a small performance increase with mtu of 1500 (~ 5%) and significant performance increase with large mtu of 64000 (~40%). Brian King of IBM has run the LTP test 'networktest.sh' to verify no regressions. Please review patch below for RHEL5u2 -- [1]:https://bugzilla.redhat.com/show_bug.cgi?id=254035 [2]:http://brewweb.devel.redhat.com/brew/taskinfo?taskID=1004258 Acked-by: Pete Zaitcev <zaitcev@redhat.com> diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index a6e864e..25b59ae 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -28,7 +28,6 @@ /**************************************************************************/ /* TODO: - - remove frag processing code - no longer needed - add support for sysfs - possibly remove procfs support */ @@ -47,6 +46,8 @@ #include <linux/mm.h> #include <linux/ethtool.h> #include <linux/proc_fs.h> +#include <linux/in.h> +#include <linux/ip.h> #include <asm/semaphore.h> #include <asm/hvcall.h> #include <asm/atomic.h> @@ -111,20 +112,49 @@ MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(ibmveth_driver_version); +struct ibmveth_stat { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) +#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) + +struct ibmveth_stat ibmveth_stats[] = { + { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, + { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, + { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) }, + { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, + { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, + { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, + { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, + { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, +}; + /* simple methods of getting data from the current rxq entry */ +static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) +{ + return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off; +} + +static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) +{ + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; +} + static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle); + return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); } static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid); + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); } static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset); + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); } static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) @@ -132,6 +162,11 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); } +static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) +{ + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); +} + /* setup the initial settings for a buffer pool */ static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) { @@ -229,9 +264,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc correlator = ((u64)pool->index << 32) | index; *(u64*)skb->data = correlator; - desc.desc = 0; - desc.fields.valid = 1; - desc.fields.length = pool->buff_size; + desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; desc.fields.address = dma_addr; lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); @@ -372,9 +405,8 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) return; } - desc.desc = 0; - desc.fields.valid = 1; - desc.fields.length = adapter->rx_buff_pool[pool].buff_size; + desc.fields.flags_len = IBMVETH_BUF_VALID | + adapter->rx_buff_pool[pool].buff_size; desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); @@ -525,9 +557,7 @@ static int ibmveth_open(struct net_device *netdev) memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); mac_address = mac_address >> 16; - rxq_desc.desc = 0; - rxq_desc.fields.valid = 1; - rxq_desc.fields.length = adapter->rx_queue.queue_len; + rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; rxq_desc.fields.address = adapter->rx_queue.queue_dma; ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); @@ -635,12 +665,163 @@ static u32 netdev_get_link(struct net_device *dev) { return 1; } +static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if (data) + adapter->rx_csum = 1; + else { + /* + * Since the ibmveth firmware interface does not have the concept of + * separate tx/rx checksum offload enable, if rx checksum is disabled + * we also have to disable tx checksum offload. Once we disable rx + * checksum offload, we are no longer allowed to send tx buffers that + * are not properly checksummed. + */ + adapter->rx_csum = 0; + dev->features &= ~NETIF_F_IP_CSUM; + } +} + +static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if (data) { + dev->features |= NETIF_F_IP_CSUM; + adapter->rx_csum = 1; + } else + dev->features &= ~NETIF_F_IP_CSUM; +} + +static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, + void (*done) (struct net_device *, u32)) +{ + struct ibmveth_adapter *adapter = dev->priv; + u64 set_attr, clr_attr, ret_attr; + long ret; + int rc1 = 0, rc2 = 0; + int restart = 0; + + if (netif_running(dev)) { + restart = 1; + adapter->pool_config = 1; + ibmveth_close(dev); + adapter->pool_config = 0; + } + + set_attr = 0; + clr_attr = 0; + + if (data) + set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + else + clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + + ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && + !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && + (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { + ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, + set_attr, &ret_attr); + + if (ret != H_SUCCESS) { + rc1 = -EIO; + ibmveth_error_printk("unable to change checksum offload settings." + " %d rc=%ld\n", data, ret); + + ret = h_illan_attributes(adapter->vdev->unit_address, + set_attr, clr_attr, &ret_attr); + } else + done(dev, data); + } else { + rc1 = -EIO; + ibmveth_error_printk("unable to change checksum offload settings." + " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); + } + + if (restart) + rc2 = ibmveth_open(dev); + + return rc1 ? rc1 : rc2; +} + +static int ibmveth_set_rx_csum(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum)) + return 0; + + return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags); +} + +static int ibmveth_set_tx_csum(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + int rc = 0; + + if (data && (dev->features & NETIF_F_IP_CSUM)) + return 0; + if (!data && !(dev->features & NETIF_F_IP_CSUM)) + return 0; + + if (data && !adapter->rx_csum) + rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); + else + ibmveth_set_tx_csum_flags(dev, data); + + return rc; +} + +static u32 ibmveth_get_rx_csum(struct net_device *dev) +{ + struct ibmveth_adapter *adapter = dev->priv; + return adapter->rx_csum; +} + +static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN); +} + +static int ibmveth_get_stats_count(struct net_device *dev) +{ + return ARRAY_SIZE(ibmveth_stats); +} + +static void ibmveth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + int i; + struct ibmveth_adapter *adapter = dev->priv; + + for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) + data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); +} + static struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .get_link = netdev_get_link, .get_sg = ethtool_op_get_sg, .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ibmveth_set_tx_csum, + .get_rx_csum = ibmveth_get_rx_csum, + .set_rx_csum = ibmveth_set_rx_csum, + .get_tso = ethtool_op_get_tso, + .get_ufo = ethtool_op_get_ufo, + .get_strings = ibmveth_get_strings, + .get_stats_count = ibmveth_get_stats_count, + .get_ethtool_stats = ibmveth_get_ethtool_stats, }; static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -653,9 +834,8 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev->priv; - union ibmveth_buf_desc desc[IbmVethMaxSendFrags]; + union ibmveth_buf_desc desc; unsigned long lpar_rc; - int nfrags = 0, curfrag; unsigned long correlator; unsigned long flags; unsigned int retry_count; @@ -665,82 +845,47 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_send_failed = 0; unsigned int tx_map_failed = 0; + desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; + desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); - if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { + if (skb->ip_summed == CHECKSUM_HW && + skb->nh.iph->protocol != IPPROTO_TCP && skb_checksum_help(skb, 0)) { + ibmveth_error_printk("tx: failed to checksum packet\n"); tx_dropped++; goto out; } - memset(&desc, 0, sizeof(desc)); - - /* nfrags = number of frags after the initial fragment */ - nfrags = skb_shinfo(skb)->nr_frags; + if (skb->ip_summed == CHECKSUM_HW) { + unsigned char *buf = skb->h.raw + skb->csum; - if(nfrags) - adapter->tx_multidesc_send++; + desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); - /* map the initial fragment */ - desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len; - desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, - desc[0].fields.length, DMA_TO_DEVICE); - desc[0].fields.valid = 1; + /* Need to zero out the checksum */ + buf[0] = 0; + buf[1] = 0; + } - if(dma_mapping_error(desc[0].fields.address)) { - ibmveth_error_printk("tx: unable to map initial fragment\n"); + if (dma_mapping_error(desc.fields.address)) { + ibmveth_error_printk("tx: unable to map xmit buffer\n"); tx_map_failed++; tx_dropped++; goto out; } - curfrag = nfrags; - - /* map fragments past the initial portion if there are any */ - while(curfrag--) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag]; - desc[curfrag+1].fields.address - = dma_map_single(&adapter->vdev->dev, - page_address(frag->page) + frag->page_offset, - frag->size, DMA_TO_DEVICE); - desc[curfrag+1].fields.length = frag->size; - desc[curfrag+1].fields.valid = 1; - - if(dma_mapping_error(desc[curfrag+1].fields.address)) { - ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); - tx_map_failed++; - tx_dropped++; - /* Free all the mappings we just created */ - while(curfrag < nfrags) { - dma_unmap_single(&adapter->vdev->dev, - desc[curfrag+1].fields.address, - desc[curfrag+1].fields.length, - DMA_TO_DEVICE); - curfrag++; - } - goto out; - } - } - /* send the frame. Arbitrarily set retrycount to 1024 */ correlator = 0; retry_count = 1024; do { lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, - desc[0].desc, - desc[1].desc, - desc[2].desc, - desc[3].desc, - desc[4].desc, - desc[5].desc, - correlator); + desc.desc, 0, 0, 0, 0, 0, correlator); } while ((lpar_rc == H_BUSY) && (retry_count--)); if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { - int i; ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); - for(i = 0; i < 6; i++) { - ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, - desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); - } + ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", + (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, + skb->len, desc.fields.address); tx_send_failed++; tx_dropped++; } else { @@ -749,11 +894,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) netdev->trans_start = jiffies; } - do { - dma_unmap_single(&adapter->vdev->dev, - desc[nfrags].fields.address, - desc[nfrags].fields.length, DMA_TO_DEVICE); - } while(--nfrags >= 0); + dma_unmap_single(&adapter->vdev->dev, desc.fields.address, + skb->len, DMA_TO_DEVICE); out: spin_lock_irqsave(&adapter->stats_lock, flags); adapter->stats.tx_dropped += tx_dropped; @@ -792,7 +934,11 @@ static int ibmveth_poll(struct net_device *netdev, int *budget) } else { int length = ibmveth_rxq_frame_length(adapter); int offset = ibmveth_rxq_frame_offset(adapter); + int csum_good = ibmveth_rxq_csum_good(adapter); + skb = ibmveth_rxq_get_buffer(adapter); + if (csum_good) + skb->ip_summed = CHECKSUM_UNNECESSARY; ibmveth_rxq_harvest_buffer(adapter); @@ -963,8 +1109,10 @@ static void ibmveth_poll_controller(struct net_device *dev) static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) { int rc, i; + long ret; struct net_device *netdev; struct ibmveth_adapter *adapter = NULL; + u64 set_attr, ret_attr; unsigned char *mac_addr_p; unsigned int *mcastFilterSize_p; @@ -1059,6 +1207,22 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ ibmveth_debug_printk("registering netdev...\n"); + ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr); + + if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && + !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && + (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { + set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + + ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr); + + if (ret == H_SUCCESS) { + adapter->rx_csum = 1; + netdev->features |= NETIF_F_IP_CSUM; + } else + ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr); + } + rc = register_netdev(netdev); if(rc) { @@ -1142,10 +1306,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v) firmware_mac[3], firmware_mac[4], firmware_mac[5]); seq_printf(seq, "\nAdapter Statistics:\n"); - seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); - seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); - seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed); - seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed); + seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed); seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 413002f..62d1c44 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h @@ -25,8 +25,6 @@ #ifndef _IBMVETH_H #define _IBMVETH_H -#define IbmVethMaxSendFrags 6 - /* constants for H_MULTICAST_CTRL */ #define IbmVethMcastReceptionModifyBit 0x80000UL #define IbmVethMcastReceptionEnableBit 0x20000UL @@ -50,6 +48,13 @@ #define H_MULTICAST_CTRL 0x130 #define H_CHANGE_LOGICAL_LAN_MAC 0x14C #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 +#define H_ILLAN_ATTRIBUTES 0x244 + +#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000ULL +#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00ULL +#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004ULL +#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002ULL +#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001ULL /* hcall macros */ #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ @@ -64,6 +69,21 @@ #define h_send_logical_lan(ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator) \ plpar_hcall_8arg_2ret(H_SEND_LOGICAL_LAN, ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator, &correlator) +static inline long h_illan_attributes(unsigned long unit_address, + unsigned long reset_mask, unsigned long set_mask, + unsigned long *ret_attributes) +{ + long rc; + unsigned long retbuf[3]; + + rc = plpar_hcall(H_ILLAN_ATTRIBUTES, unit_address, reset_mask, set_mask, + 0, &retbuf[0], &retbuf[1], &retbuf[2]); + + *ret_attributes = retbuf[0]; + + return rc; +} + #define h_multicast_ctrl(ua, cmd, mac) \ plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac) @@ -120,6 +140,7 @@ struct ibmveth_adapter { struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; struct ibmveth_rx_q rx_queue; int pool_config; + int rx_csum; /* adapter specific stats */ u64 replenish_task_cycles; @@ -128,20 +149,19 @@ struct ibmveth_adapter { u64 replenish_add_buff_success; u64 rx_invalid_buffer; u64 rx_no_buffer; - u64 tx_multidesc_send; - u64 tx_linearized; - u64 tx_linearize_failed; u64 tx_map_failed; u64 tx_send_failed; spinlock_t stats_lock; }; struct ibmveth_buf_desc_fields { - u32 valid : 1; - u32 toggle : 1; - u32 reserved : 6; - u32 length : 24; - u32 address; + u32 flags_len; +#define IBMVETH_BUF_VALID 0x80000000 +#define IBMVETH_BUF_TOGGLE 0x40000000 +#define IBMVETH_BUF_NO_CSUM 0x02000000 +#define IBMVETH_BUF_CSUM_GOOD 0x01000000 +#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF + u32 address; }; union ibmveth_buf_desc { @@ -150,12 +170,16 @@ union ibmveth_buf_desc { }; struct ibmveth_rx_q_entry { - u16 toggle : 1; - u16 valid : 1; - u16 reserved : 14; - u16 offset; - u32 length; - u64 correlator; + u32 flags_off; +#define IBMVETH_RXQ_TOGGLE 0x80000000 +#define IBMVETH_RXQ_TOGGLE_SHIFT 31 +#define IBMVETH_RXQ_VALID 0x40000000 +#define IBMVETH_RXQ_NO_CSUM 0x02000000 +#define IBMVETH_RXQ_CSUM_GOOD 0x01000000 +#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF + + u32 length; + u64 correlator; }; #endif /* _IBMVETH_H */