xx_vv_immortalwrt/package/kernel/qca-nss-dp/patches/0012-01-syn-gmac-use-standard-DMA-api.patch
Kristian Skramstad a76651a24a kernel: qca-nss-dp: update to 12.5.5
There is some new changes since 12.5.r2 to 12.5.5, so refresh
and update patches.

Changes:
modified:   package/kernel/qca-nss-dp/Makefile
modified:   package/kernel/qca-nss-dp/patches/0006-nss_dp_main-Use-a-phy-handle-property-to-connect-to-.patch
modified:   package/kernel/qca-nss-dp/patches/0008-nss-dp-allow-setting-netdev-name-from-DTS.patch
deleted:    package/kernel/qca-nss-dp/patches/0011-01-edma_v1-rework-hw_reset-logic-to-permit-rmmod-and-in.patch
deleted:    package/kernel/qca-nss-dp/patches/0011-02-nss_dp_switchdev-correctly-unregister-notifier-on-dp.patch
deleted:    package/kernel/qca-nss-dp/patches/0011-03-nss_dp_main-swap-dp_exit-function-call.patch
deleted:    package/kernel/qca-nss-dp/patches/0011-04-nss_dp_main-call-unregister_netdev-first-in-dp_remov.patch
deleted:    package/kernel/qca-nss-dp/patches/0011-05-nss_dp_main-use-phy_detach-instead-of-disconnect-in-.patch
deleted:    package/kernel/qca-nss-dp/patches/0011-06-edma_v1-skip-edma_disable_port-in-edma_cleanup-subse.patch

Log:
2024-10-16 |4c9f671| [qca-nss-dp] removed the calling of fal_port_autoneg_status_get
2024-10-16 |fb33119| Merge "[qca-nss-dp] Change Debug Level of TX Complete Errors"
2024-10-15 |e197b9d| [qca-nss-dp] Change Debug Level of TX Complete Errors
2024-10-03 |1b7aeb1| Merge remote-tracking branch origin/AU_LINUX_QSDK_GINGER_OPEN_TARGET_ALL.12.5.5.230.918 into HEAD
2024-10-01 |830ac73| Merge "[qca-nss-dp] Enable rmmod support for qca-nss-dp module."
2024-06-05 |aef242d| [qca-nss-dp] Enable rmmod support for qca-nss-dp module.
2024-09-18 |b1bfeb6| Merge "[qca-nss-dp] Move pr_info to pr_debug"
2024-09-12 |54aee26| Merge "[qca-nss-dp] Fix Loopback ring teardown path"
2024-09-10 |21f6567| Merge "[qca-nss-dp] Extend Tx Complete errors to track individual error type"
2024-09-10 |7fee76f| Merge "[qca-nss-dp] Destination VP xmit support"
2024-09-09 |0d4ecfd| [qca-nss-dp] Fix Loopback ring teardown path
2024-08-29 |438ab8e| [qca-nss-dp] Move pr_info to pr_debug
2024-08-29 |3f45e3d| Merge "[qca-nss-dp] Fix EDMA Recovery API to Prevent Multiple RX NAPI Add Calls"
2024-08-23 |7a792b1| [qca-nss-dp] Fix EDMA Recovery API to Prevent Multiple RX NAPI Add Calls
2024-07-08 |1227746| [qca-nss-dp] Add support for DDR extended buffer
2024-08-20 |d45bceb| [qca-nss-dp] Extend Tx Complete errors to track individual error type
2024-06-17 |a8f7155| [qca-nss-dp] Destination VP xmit support
2024-08-21 |a5d189b| Merge "[qca-nss-dp] Support for the PPE-VP Qdisc feature"
2024-08-16 |da97bc7| Merge "[qca-nss-dp] Restrict compilation of a file in DP module"
2024-06-22 |bf90fe9| [qca-nss-dp] Support for the PPE-VP Qdisc feature
2024-02-07 |6ceeb8f| [qca-nss-dp] Enable EDMA driver to allocate from beyond 4GB space.
2024-03-19 |4a7ff28| [qca-nss-dp] Restrict compilation of a file in DP module
2024-07-09 |73bad34| [qca-nss-dp] Check for DS node state before getting ppe queue
2024-05-03 |22cdbd6| [qca-nss-dp] Add PPE-DS Enqueue vp to queue mapping
2024-07-04 |5d6ef18| Merge "[qca-nss-dp] NAPI Budget change for KPI improvement"
2024-07-02 |cd0b543| Merge "[qca-nss-dp] Read fake_mac header indication from rx_desc"
2024-07-01 |160b988| [qca-nss-dp] NAPI Budget change for KPI improvement
2024-05-22 |54c2fd1| [qca-nss-dp]: Fixing SA warnings as part of qca-nss-dp module.
2024-06-22 |602534b| [qca-nss-dp] Read fake_mac header indication from rx_desc
2023-11-24 |10210e2| [qca-nss-dp] EDMA register changes for ipq54xx
2024-05-22 |1af0d03| Merge "[qca-nss-dp]: Change to support PPE-VP path for MLO Assist"
2024-05-15 |2acd9f3| [qca-nss-dp]: Change to support PPE-VP path for MLO Assist
2023-09-27 |1ca59f2| [qca-nss-dp] DP changes for ipq54xx
2024-04-18 |bc60c5a| Merge "[qca-nss-dp] Add support for the XGMAC latency computation"
2024-04-18 |6a67d6d| Merge "[qca-nss-dp] Fix the packets processing in the Rx NAPI"

Signed-off-by: Kristian Skramstad <kristian+github@83.no>
Link: https://github.com/openwrt/openwrt/pull/17731
Signed-off-by: Robert Marko <robimarko@gmail.com>
2025-02-25 10:46:33 +01:00

235 lines
8.4 KiB
Diff

From 5ad8cf24897ff903112967a9662cb13ed4cbbf57 Mon Sep 17 00:00:00 2001
From: Ziyang Huang <hzyitc@outlook.com>
Date: Mon, 22 Apr 2024 21:47:58 +0800
Subject: [PATCH] syn-gmac: use standard DMA api
The SSDK uses several old DMA API calls based on raw assembly which have
been deprecated. So let's convert to and use the standard Linux DMA API
instead.
Signed-off-by: Ziyang Huang <hzyitc@outlook.com>
Signed-off-by: George Moussalem <george.moussalem@outlook.com>
---
hal/dp_ops/syn_gmac_dp/syn_dp_cfg_rx.c | 14 ++++++--
hal/dp_ops/syn_gmac_dp/syn_dp_cfg_tx.c | 2 ++
hal/dp_ops/syn_gmac_dp/syn_dp_rx.c | 47 +++++++++++++-------------
hal/dp_ops/syn_gmac_dp/syn_dp_tx.c | 23 ++++---------
4 files changed, 42 insertions(+), 44 deletions(-)
--- a/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_rx.c
+++ b/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_rx.c
@@ -26,6 +26,7 @@ static int syn_dp_cfg_rx_setup_desc_queu
{
struct syn_dp_info_rx *rx_info = &dev_info->dp_info_rx;
struct dma_desc_rx *first_desc = NULL;
+ dma_addr_t dma_addr;
struct net_device *netdev = rx_info->netdev;
netdev_dbg(netdev, "Total size of memory required for Rx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc_rx) * SYN_DP_RX_DESC_SIZE)));
@@ -33,13 +34,15 @@ static int syn_dp_cfg_rx_setup_desc_queu
/*
* Allocate cacheable descriptors for Rx
*/
- first_desc = kzalloc(sizeof(struct dma_desc_rx) * SYN_DP_RX_DESC_SIZE, GFP_KERNEL);
+ first_desc = dma_alloc_coherent(rx_info->dev,
+ sizeof(struct dma_desc_rx) * SYN_DP_RX_DESC_SIZE,
+ &dma_addr, GFP_KERNEL);
if (!first_desc) {
netdev_dbg(netdev, "Error in Rx Descriptor Memory allocation in Ring mode\n");
return -ENOMEM;
}
- dev_info->rx_desc_dma_addr = (dma_addr_t)virt_to_phys(first_desc);
+ dev_info->rx_desc_dma_addr = dma_addr;
rx_info->rx_desc = first_desc;
syn_dp_gmac_rx_desc_init_ring(rx_info->rx_desc, SYN_DP_RX_DESC_SIZE);
@@ -98,6 +101,10 @@ void syn_dp_cfg_rx_cleanup_rings(struct
for (i = 0; i < rx_info->busy_rx_desc_cnt; i++) {
rx_skb_index = (rx_skb_index + i) & SYN_DP_RX_DESC_MAX_INDEX;
rxdesc = rx_info->rx_desc;
+
+ dma_unmap_single(rx_info->dev, rxdesc->buffer1,
+ rxdesc->length, DMA_FROM_DEVICE);
+
skb = rx_info->rx_buf_pool[rx_skb_index].skb;
if (unlikely(skb != NULL)) {
dev_kfree_skb_any(skb);
@@ -105,7 +112,8 @@ void syn_dp_cfg_rx_cleanup_rings(struct
}
}
- kfree(rx_info->rx_desc);
+ dma_free_coherent(rx_info->dev, (sizeof(struct dma_desc_rx) * SYN_DP_RX_DESC_SIZE),
+ rx_info->rx_desc, dev_info->rx_desc_dma_addr);
rx_info->rx_desc = NULL;
dev_info->rx_desc_dma_addr = (dma_addr_t)0;
}
--- a/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_tx.c
+++ b/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_tx.c
@@ -91,6 +91,8 @@ void syn_dp_cfg_tx_cleanup_rings(struct
tx_skb_index = syn_dp_tx_inc_index(tx_skb_index, i);
txdesc = tx_info->tx_desc;
+ dma_unmap_single(tx_info->dev, txdesc->buffer1, txdesc->length, DMA_TO_DEVICE);
+
skb = tx_info->tx_buf_pool[tx_skb_index].skb;
if (unlikely(skb != NULL)) {
dev_kfree_skb_any(skb);
--- a/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
+++ b/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
@@ -73,16 +73,6 @@ static inline void syn_dp_rx_refill_one_
*/
static inline void syn_dp_rx_inval_and_flush(struct syn_dp_info_rx *rx_info, uint32_t start, uint32_t end)
{
- /*
- * Batched flush and invalidation of the rx descriptors
- */
- if (end > start) {
- dmac_flush_range_no_dsb((void *)&rx_info->rx_desc[start], (void *)&rx_info->rx_desc[end] + sizeof(struct dma_desc_rx));
- } else {
- dmac_flush_range_no_dsb((void *)&rx_info->rx_desc[start], (void *)&rx_info->rx_desc[SYN_DP_RX_DESC_MAX_INDEX] + sizeof(struct dma_desc_rx));
- dmac_flush_range_no_dsb((void *)&rx_info->rx_desc[0], (void *)&rx_info->rx_desc[end] + sizeof(struct dma_desc_rx));
- }
-
dsb(st);
}
@@ -124,15 +114,19 @@ int syn_dp_rx_refill_page_mode(struct sy
break;
}
+ skb_fill_page_desc(skb, 0, pg, 0, PAGE_SIZE);
+
/*
* Get virtual address of allocated page.
*/
page_addr = page_address(pg);
- dma_addr = (dma_addr_t)virt_to_phys(page_addr);
-
- skb_fill_page_desc(skb, 0, pg, 0, PAGE_SIZE);
+ dma_addr = dma_map_page(rx_info->dev, pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_info->dev, dma_addr))) {
+ dev_kfree_skb(skb);
+ netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
+ break;
+ }
- dmac_inv_range_no_dsb(page_addr, (page_addr + PAGE_SIZE));
rx_refill_idx = rx_info->rx_refill_idx;
rx_desc = rx_info->rx_desc + rx_refill_idx;
@@ -181,8 +175,15 @@ int syn_dp_rx_refill(struct syn_dp_info_
skb_reserve(skb, SYN_DP_SKB_HEADROOM + NET_IP_ALIGN);
- dma_addr = (dma_addr_t)virt_to_phys(skb->data);
- dmac_inv_range_no_dsb((void *)skb->data, (void *)(skb->data + inval_len));
+ dma_addr = dma_map_single(rx_info->dev, skb->data,
+ inval_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_info->dev, dma_addr))) {
+ dev_kfree_skb(skb);
+ netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
+ break;
+ }
+
rx_refill_idx = rx_info->rx_refill_idx;
rx_desc = rx_info->rx_desc + rx_refill_idx;
@@ -407,12 +408,6 @@ int syn_dp_rx(struct syn_dp_info_rx *rx_
* this code is executing.
*/
end = syn_dp_rx_inc_index(rx_info->rx_idx, busy);
- if (end > start) {
- dmac_inv_range_no_dsb((void *)&rx_info->rx_desc[start], (void *)&rx_info->rx_desc[end] + sizeof(struct dma_desc_rx));
- } else {
- dmac_inv_range_no_dsb((void *)&rx_info->rx_desc[start], (void *)&rx_info->rx_desc[SYN_DP_RX_DESC_MAX_INDEX] + sizeof(struct dma_desc_rx));
- dmac_inv_range_no_dsb((void *)&rx_info->rx_desc[0], (void *)&rx_info->rx_desc[end] + sizeof(struct dma_desc_rx));
- }
dsb(st);
@@ -439,8 +434,12 @@ int syn_dp_rx(struct syn_dp_info_rx *rx_
* speculative prefetch by CPU may have occurred.
*/
frame_length = syn_dp_gmac_get_rx_desc_frame_length(status);
- dmac_inv_range((void *)rx_buf->map_addr_virt,
- (void *)(((uint8_t *)rx_buf->map_addr_virt) + frame_length));
+ if (likely(!rx_info->page_mode))
+ dma_unmap_single(rx_info->dev, rx_desc->buffer1,
+ rx_info->alloc_buf_len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(rx_info->dev, rx_desc->buffer1,
+ PAGE_SIZE, DMA_FROM_DEVICE);
prefetch((void *)rx_buf->map_addr_virt);
rx_next_idx = syn_dp_rx_inc_index(rx_idx, 1);
--- a/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c
+++ b/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c
@@ -104,9 +104,7 @@ static inline struct dma_desc_tx *syn_dp
BUG_ON(!length);
#endif
- dma_addr = (dma_addr_t)virt_to_phys(frag_addr);
-
- dmac_clean_range_no_dsb(frag_addr, frag_addr + length);
+ dma_addr = dma_map_single(tx_info->dev, frag_addr, length, DMA_TO_DEVICE);
*total_length += length;
tx_desc = syn_dp_tx_set_desc_sg(tx_info, dma_addr, length, DESC_OWN_BY_DMA);
@@ -150,8 +148,7 @@ int syn_dp_tx_nr_frags(struct syn_dp_inf
/*
* Flush the dma for non-paged skb data
*/
- dma_addr = (dma_addr_t)virt_to_phys(skb->data);
- dmac_clean_range_no_dsb((void *)skb->data, (void *)(skb->data + length));
+ dma_addr = dma_map_single(tx_info->dev, skb->data, length, DMA_TO_DEVICE);
total_len = length;
@@ -256,12 +253,7 @@ int syn_dp_tx_frag_list(struct syn_dp_in
return NETDEV_TX_BUSY;
}
- dma_addr = (dma_addr_t)virt_to_phys(skb->data);
-
- /*
- * Flush the data area of the head skb
- */
- dmac_clean_range_no_dsb((void *)skb->data, (void *)(skb->data + length));
+ dma_addr = dma_map_single(tx_info->dev, skb->data, length, DMA_TO_DEVICE);
total_len = length;
@@ -290,9 +282,7 @@ int syn_dp_tx_frag_list(struct syn_dp_in
BUG_ON(!length);
#endif
- dma_addr = (dma_addr_t)virt_to_phys(iter_skb->data);
-
- dmac_clean_range_no_dsb((void *)iter_skb->data, (void *)(iter_skb->data + length));
+ dma_addr = dma_map_single(tx_info->dev, iter_skb->data, length, DMA_TO_DEVICE);
total_len += length;
@@ -445,6 +435,7 @@ int syn_dp_tx_complete(struct syn_dp_inf
break;
}
+ dma_unmap_single(tx_info->dev, desc->buffer1, desc->length, DMA_TO_DEVICE);
if (likely(status & DESC_TX_LAST)) {
tx_skb_index = syn_dp_tx_comp_index_get(tx_info);
@@ -571,9 +562,7 @@ int syn_dp_tx(struct syn_dp_info_tx *tx_
return NETDEV_TX_BUSY;
}
- dma_addr = (dma_addr_t)virt_to_phys(skb->data);
-
- dmac_clean_range_no_dsb((void *)skb->data, (void *)(skb->data + skb->len));
+ dma_addr = dma_map_single(tx_info->dev, skb->data, skb->len, DMA_TO_DEVICE);
/*
* Queue packet to the GMAC rings