Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
67bcf37
net: intel: introduce Intel Ethernet common library
alobakin Mar 8, 2023
0e490c5
iavf: kill "legacy-rx" for good
alobakin Feb 1, 2023
88798b9
iavf: optimize Rx buffer allocation a bunch
alobakin Feb 2, 2023
46eb61c
iavf: remove page splitting/recycling
alobakin Feb 17, 2023
95a993b
iavf: always use a full order-0 page
alobakin Feb 3, 2023
49b4e5c
net: page_pool: allow DMA mapping with %DMA_ATTR_WEAK_ORDERING
alobakin Feb 3, 2023
b71da32
net: page_pool: add DMA-sync-for-CPU inline helpers
alobakin Mar 9, 2023
b71ce3c
iavf: switch to Page Pool
alobakin Mar 9, 2023
3d884e3
libie: add common queue stats
alobakin Mar 14, 2023
16d126c
libie: add per-queue Page Pool stats
alobakin Mar 16, 2023
1f934b6
iavf: switch queue stats to libie
alobakin Mar 15, 2023
4e60236
selftests/bpf: robustify test_xdp_do_redirect with more payload magics
alobakin Mar 13, 2023
2e28dff
net: page_pool, skbuff: make skb_mark_for_recycle() always available
alobakin Mar 3, 2023
df09c63
xdp: recycle Page Pool backed skbs built from XDP frames
alobakin Mar 1, 2023
4fcbfcd
xdp: remove unused {__,}xdp_release_frame()
alobakin Mar 1, 2023
897dcfd
iavf: optimize Rx hotpath a bunch -- vol. 2
alobakin Mar 17, 2023
d6ea05c
iavf: fixup for optimize vol. 2
alobakin Mar 23, 2023
8aca6e1
i40e: Unify handling of zero ring length in 'configure queue'
michalQb Oct 7, 2022
557b392
iavf: Remove IAVF_TX_FLAGS_FD_SB flag
walking-machine Feb 22, 2023
80f873c
iavf: Use separate ring masks for TX and RX in q_vector
michalQb Feb 15, 2023
455dee1
iavf: Prepare VIRTCHNL functions to support XDP
michalQb Nov 29, 2022
d1aea70
iavf: Refactor ring initialization functions to handle XDP
michalQb Nov 29, 2022
76bf498
iavf: Prepare rings to support XDP
michalQb Dec 2, 2022
629e722
iavf: don't hardcode DMA direction, headroom and buffer len on Rx
alobakin Feb 22, 2023
9f85b9d
iavf: Handle XDP_SETUP_PROG command in .ndo_bpf
michalQb Dec 2, 2022
c59cad5
iavf: Add XDP_PASS and XDP_DROP support
walking-machine Nov 29, 2022
19cfad9
iavf: Implement XDP_TX action
walking-machine Nov 30, 2022
ed6e942
iavf: Implement XDP redirect path
walking-machine Nov 30, 2022
55adf95
iavf: Allow XDP TxQ sharing
walking-machine Nov 30, 2022
8f9b548
iavf: Enable XDP netdev features
michalQb Mar 9, 2023
7910142
iavf: Add AF_XDP initialization
michalQb Nov 30, 2022
af588dd
iavf: Implement Tx path for AF_XDP
michalQb Nov 30, 2022
af38da2
iavf: Implement AF_XDP RX processing
walking-machine Nov 30, 2022
f057680
iavf: consolidate skb fields processing
alobakin Feb 23, 2023
7f53f95
iavf: Implement XDP_PASS path in AF_XDP processing
walking-machine Nov 30, 2022
3e2fafc
iavf: Make request and free traffic irqs symmetric
michalQb Dec 6, 2022
a3e00d6
iavf: Do not reset the number of requested queues
michalQb Dec 13, 2022
c784562
iavf: Limit number of channels in ethtool when XDP is enabled
walking-machine Jan 24, 2023
b7fbed3
iavf: lock XDP queue while using in ZC mode
walking-machine Feb 24, 2023
f71cafe
iavf: Enable AF_XDP zero-copy feature in netdev
michalQb Mar 9, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions drivers/net/ethernet/intel/iavf/iavf.h
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,19 @@ static inline bool iavf_adapter_xdp_active(struct iavf_adapter *adapter)
return !!READ_ONCE(adapter->xdp_prog);
}

static inline struct xsk_buff_pool *iavf_xsk_pool(struct iavf_ring *ring)
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unused?

{
struct iavf_adapter *adapter = ring->vsi->back;
struct iavf_vsi *vsi = ring->vsi;
u16 qid = ring->queue_index;

if (!iavf_adapter_xdp_active(adapter) ||
!test_bit(qid, adapter->af_xdp_zc_qps))
return NULL;

return xsk_get_pool_from_qid(vsi->netdev, qid);
}

int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
Expand Down
32 changes: 25 additions & 7 deletions drivers/net/ethernet/intel/iavf/iavf_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -780,14 +780,29 @@ void iavf_configure_rx_ring(struct iavf_adapter *adapter,
rx_ring->queue_index,
rx_ring->q_vector->napi.napi_id);

err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
rx_ring->pool);
if (err)
netdev_err(adapter->netdev, "Could not register XDP memory model for RX queue %u, error: %d\n",
queue_idx, err);
if (rx_ring->flags & IAVF_TXRX_FLAGS_XSK) {
err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (err)
netdev_err(adapter->netdev, "xdp_rxq_info_reg_mem_model returned %d\n",
err);

xsk_pool_set_rxq_info(rx_ring->xsk_pool, &rx_ring->xdp_rxq);

iavf_check_alloc_rx_buffers_zc(adapter, rx_ring);
} else {
err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
MEM_TYPE_PAGE_POOL,
rx_ring->pool);
if (err)
netdev_err(adapter->netdev, "Could not register XDP memory model for RX queue %u, error: %d\n",
queue_idx, err);

iavf_alloc_rx_pages(rx_ring);
}

RCU_INIT_POINTER(rx_ring->xdp_prog, adapter->xdp_prog);
iavf_alloc_rx_pages(rx_ring);
}

/**
Expand Down Expand Up @@ -3657,10 +3672,13 @@ static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
**/
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
{
struct iavf_ring *rx_ring;
int i, err = 0;

for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].count = adapter->rx_desc_count;
rx_ring = &adapter->rx_rings[i];
rx_ring->count = adapter->rx_desc_count;

err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
if (!err)
continue;
Expand Down
8 changes: 8 additions & 0 deletions drivers/net/ethernet/intel/iavf/iavf_trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,14 @@ DEFINE_EVENT(

TP_ARGS(ring, desc, skb));

DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_zc,
TP_PROTO(struct iavf_ring *ring,
union iavf_32byte_rx_desc *desc,
struct sk_buff *skb),

TP_ARGS(ring, desc, skb));

DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct iavf_ring *ring,
Expand Down
82 changes: 45 additions & 37 deletions drivers/net/ethernet/intel/iavf/iavf_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,11 @@ void iavf_free_tx_resources(struct iavf_ring *tx_ring)
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;

if (tx_ring->flags & IAVF_TXRX_FLAGS_XSK) {
tx_ring->dev = tx_ring->xsk_pool->dev;
tx_ring->flags &= ~IAVF_TXRX_FLAGS_XSK;
}

if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring->desc, tx_ring->dma);
Expand Down Expand Up @@ -734,6 +739,22 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
return -ENOMEM;
}

static void iavf_clean_rx_pages(struct iavf_ring *rx_ring)
{
for (u32 i = 0; i < rx_ring->count; i++) {
struct page *page = rx_ring->rx_pages[i];

if (!page)
continue;

/* Invalidate cache lines that may have been written to by
* device so that we avoid corrupting memory.
*/
page_pool_dma_sync_full_for_cpu(rx_ring->pool, page);
page_pool_put_full_page(rx_ring->pool, page, false);
}
}

/**
* iavf_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
Expand All @@ -749,19 +770,10 @@ void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
rx_ring->skb = NULL;
}

/* Free all the Rx ring sk_buffs */
for (u32 i = 0; i < rx_ring->count; i++) {
struct page *page = rx_ring->rx_pages[i];

if (!page)
continue;

/* Invalidate cache lines that may have been written to by
* device so that we avoid corrupting memory.
*/
page_pool_dma_sync_full_for_cpu(rx_ring->pool, page);
page_pool_put_full_page(rx_ring->pool, page, false);
}
if (rx_ring->flags & IAVF_TXRX_FLAGS_XSK)
iavf_xsk_clean_rx_ring(rx_ring);
else
iavf_clean_rx_pages(rx_ring);

rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
Expand All @@ -775,7 +787,7 @@ void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
**/
void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{
struct device *dev = rx_ring->pool->p.dev;
struct device *dev;

iavf_clean_rx_ring(rx_ring);
kfree(rx_ring->rx_pages);
Expand All @@ -785,7 +797,14 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);

libie_rx_page_pool_destroy(rx_ring->pool, &rx_ring->rq_stats);
if (rx_ring->flags & IAVF_TXRX_FLAGS_XSK) {
dev = rx_ring->xsk_pool->dev;
rx_ring->flags &= ~IAVF_TXRX_FLAGS_XSK;
} else {
dev = rx_ring->pool->p.dev;
libie_rx_page_pool_destroy(rx_ring->pool, &rx_ring->rq_stats);
}

rx_ring->dev = dev;

if (rx_ring->desc) {
Expand Down Expand Up @@ -820,6 +839,8 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)

/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_pages);

/* Both iavf_ring::rx_pages and ::xdp_buff are arrays of pointers */
rx_ring->rx_pages = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_pages),
GFP_KERNEL);
if (!rx_ring->rx_pages)
Expand All @@ -837,6 +858,10 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
goto err;
}

iavf_xsk_setup_rx_ring(rx_ring);
if (rx_ring->flags & IAVF_TXRX_FLAGS_XSK)
goto finish;

pool = libie_rx_page_pool_create(rx_ring->netdev, rx_ring->count,
iavf_is_xdp_enabled(rx_ring));
if (IS_ERR(pool)) {
Expand All @@ -846,6 +871,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)

rx_ring->pool = pool;

finish:
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;

Expand All @@ -860,24 +886,6 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
return ret;
}

/**
* iavf_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
**/
static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;

/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(val, rx_ring->tail);
}

/**
* iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
Expand Down Expand Up @@ -1372,9 +1380,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
}
}

libie_rq_napi_stats_add(&rx_ring->rq_stats, &stats);
rx_ring->q_vector->rx.total_packets += stats.packets;
rx_ring->q_vector->rx.total_bytes += stats.bytes;
iavf_update_rx_ring_stats(rx_ring, &stats);

return cleaned_count;
}
Expand Down Expand Up @@ -1534,7 +1540,9 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
rcu_read_lock();

iavf_for_each_ring(ring, q_vector->rx) {
int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
int cleaned = !!(ring->flags & IAVF_TXRX_FLAGS_XSK) ?
iavf_clean_rx_irq_zc(ring, budget_per_ring) :
iavf_clean_rx_irq(ring, budget_per_ring);

work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
Expand Down
53 changes: 47 additions & 6 deletions drivers/net/ethernet/intel/iavf/iavf_txrx.h
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ struct iavf_tx_buffer {
struct sk_buff *skb; /* used for .ndo_start_xmit() */
struct page *page; /* used for XDP_TX */
struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
struct xdp_buff *xdp; /* used for XDP_TX in ZC mode */
};
unsigned int bytecount;
unsigned short gso_segs;
Expand Down Expand Up @@ -243,6 +244,7 @@ struct iavf_ring {
struct net_device *netdev; /* netdev ring maps to */
union {
struct iavf_tx_buffer *tx_bi;
struct xdp_buff **xdp_buff;
struct page **rx_pages;
};
u8 __iomem *tail;
Expand Down Expand Up @@ -370,7 +372,7 @@ static inline __le64 iavf_build_ctob(u32 td_cmd, u32 td_offset,
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
*/
static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
{
const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
Expand All @@ -395,7 +397,7 @@ static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
*/
static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
{
if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
Expand All @@ -411,7 +413,7 @@ static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
*/
static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
{
/* Both TSO and single send will work if count is less than 8 */
Expand All @@ -427,7 +429,7 @@ static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
/**
* txring_txq - helper to convert from a ring to a queue
* @ring: Tx ring to find the netdev equivalent of
**/
*/
static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
Expand All @@ -454,7 +456,7 @@ static inline void iavf_xdp_ring_update_tail(const struct iavf_ring *xdp_ring)
* @tc: TODO
* @total_pkts: Number of packets transmitted since the last update
* @total_bytes: Number of bytes transmitted since the last update
**/
*/
static inline void
__iavf_update_tx_ring_stats(struct iavf_ring *tx_ring,
struct iavf_ring_container *tc,
Expand All @@ -468,8 +470,47 @@ __iavf_update_tx_ring_stats(struct iavf_ring *tx_ring,
#define iavf_update_tx_ring_stats(r, s) \
__iavf_update_tx_ring_stats(r, &(r)->q_vector->tx, s)

/**
* iavf_update_rx_ring_stats - Update RX ring stats
* @rx_ring: ring to bump
* @rc: TODO
* @rx_bytes: number of bytes processed since last update
* @rx_packets: number of packets processed since last update
*/
static inline void
__iavf_update_rx_ring_stats(struct iavf_ring *rx_ring,
struct iavf_ring_container *rc,
const struct libie_rq_onstack_stats *stats)
{
libie_rq_napi_stats_add(&rx_ring->rq_stats, stats);
rc->total_packets += stats->packets;
rc->total_bytes += stats->bytes;
}

#define iavf_update_rx_ring_stats(r, s) \
__iavf_update_rx_ring_stats(r, &(r)->q_vector->rx, s)

/**
* iavf_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
*/
static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;

/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(val, rx_ring->tail);
}

#define IAVF_RXQ_XDP_ACT_FINALIZE_TX BIT(0)
#define IAVF_RXQ_XDP_ACT_FINALIZE_REDIR BIT(1)
#define IAVF_RXQ_XDP_ACT_STOP_NOW BIT(2)

/**
* iavf_set_rs_bit - set RS bit on last produced descriptor.
Expand All @@ -495,7 +536,7 @@ static inline u16 iavf_set_rs_bit(struct iavf_ring *xdp_ring)
* @xdp_ring: XDP TX queue assigned to a given RX ring
* @rxq_xdp_act: Logical OR of flags of XDP actions that require finalization
* @first_idx: index of the first frame in the transmitted batch on XDP queue
**/
*/
static inline void iavf_finalize_xdp_rx(struct iavf_ring *xdp_ring,
u32 rxq_xdp_act, u32 first_idx)
{
Expand Down
14 changes: 11 additions & 3 deletions drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
Original file line number Diff line number Diff line change
Expand Up @@ -415,8 +415,8 @@ static void iavf_set_qp_config_info(struct virtchnl_queue_pair_info *vqpi,
bool xdp_pair)
{
struct iavf_ring *rxq = &adapter->rx_rings[queue_index];
const struct page_pool_params *pp = &rxq->pool->p;
struct iavf_ring *txq;
u32 hr, max_len;
int xdpq_idx;

if (xdp_pair) {
Expand All @@ -437,12 +437,20 @@ static void iavf_set_qp_config_info(struct virtchnl_queue_pair_info *vqpi,
return;
}

max_frame = min_not_zero(max_frame, LIBIE_MAX_RX_FRM_LEN(pp->offset));
if (rxq->flags & IAVF_TXRX_FLAGS_XSK) {
hr = xsk_pool_get_headroom(rxq->xsk_pool);
max_len = xsk_pool_get_rx_frame_size(rxq->xsk_pool);
} else {
hr = rxq->pool->p.offset;
max_len = rxq->pool->p.max_len;
}

max_frame = min_not_zero(max_frame, LIBIE_MAX_RX_FRM_LEN(hr));

vqpi->rxq.ring_len = rxq->count;
vqpi->rxq.dma_ring_addr = rxq->dma;
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = pp->max_len;
vqpi->rxq.databuffer_size = max_len;
}

/**
Expand Down
Loading