From: Marcin Wojtas <mw@semihalf.com>
To: edk2-devel@lists.01.org
Cc: leif.lindholm@linaro.org, ard.biesheuvel@linaro.org,
nadavh@marvell.com, neta@marvell.com, kostap@marvell.com,
jinghua@marvell.com, agraf@suse.de, mw@semihalf.com,
jsd@semihalf.com, Joe Zhou <shjzhou@marvell.com>
Subject: [platforms: PATCH v2 3/7] Drivers/Net/Pp2Dxe: Support multiple ethernet ports simultaneously
Date: Fri, 1 Sep 2017 13:17:55 +0200 [thread overview]
Message-ID: <1504264679-13613-4-git-send-email-mw@semihalf.com> (raw)
In-Reply-To: <1504264679-13613-1-git-send-email-mw@semihalf.com>
From: Joe Zhou <shjzhou@marvell.com>
In order to operate simultaneously properly, all ports
should use their own resources instead of shared BM
Pool and queues.
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Joe Zhou <shjzhou@marvell.com>
Signed-off-by: Marcin Wojtas <mw@semihalf.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
Platform/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c | 2 +-
Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c | 110 ++++++++++++++++---------
Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h | 10 ++-
3 files changed, 77 insertions(+), 45 deletions(-)
diff --git a/Platform/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c b/Platform/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c
index 27ae6b8..53154db 100644
--- a/Platform/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c
+++ b/Platform/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c
@@ -2747,7 +2747,7 @@ Mvpp2BmStop (
UINT32 Val, i;
for (i = 0; i < MVPP2_BM_SIZE; i++) {
- Mvpp2Read (Priv, MVPP2_BM_PHY_ALLOC_REG(0));
+ Mvpp2Read (Priv, MVPP2_BM_PHY_ALLOC_REG(Pool));
}
Val = Mvpp2Read (Priv, MVPP2_BM_POOL_CTRL_REG(Pool));
diff --git a/Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c b/Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c
index bdaf1a0..42cf0f9 100644
--- a/Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c
+++ b/Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c
@@ -189,32 +189,43 @@ Pp2DxeBmPoolInit (
Mvpp2BmIrqClear(Mvpp2Shared, Index);
}
- Mvpp2Shared->BmPools = AllocateZeroPool (sizeof(MVPP2_BMS_POOL));
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ Mvpp2Shared->BmPools[Index] = AllocateZeroPool (sizeof(MVPP2_BMS_POOL));
- if (Mvpp2Shared->BmPools == NULL) {
- return EFI_OUT_OF_RESOURCES;
- }
+ if (Mvpp2Shared->BmPools[Index] == NULL) {
+ Status = EFI_OUT_OF_RESOURCES;
+ goto FreePools;
+ }
- Status = DmaAllocateAlignedBuffer (EfiBootServicesData,
- EFI_SIZE_TO_PAGES (PoolSize),
- MVPP2_BM_POOL_PTR_ALIGN,
- (VOID **)&PoolAddr);
- if (EFI_ERROR (Status)) {
- goto FreePools;
- }
+ Status = DmaAllocateAlignedBuffer (EfiBootServicesData,
+ EFI_SIZE_TO_PAGES (PoolSize),
+ MVPP2_BM_POOL_PTR_ALIGN,
+ (VOID **)&PoolAddr);
+ if (EFI_ERROR (Status)) {
+ goto FreeBmPools;
+ }
- ZeroMem (PoolAddr, PoolSize);
+ ZeroMem (PoolAddr, PoolSize);
- Mvpp2Shared->BmPools->Id = MVPP2_BM_POOL;
- Mvpp2Shared->BmPools->VirtAddr = (UINT32 *)PoolAddr;
- Mvpp2Shared->BmPools->PhysAddr = (UINTN)PoolAddr;
+ Mvpp2Shared->BmPools[Index]->Id = Index;
+ Mvpp2Shared->BmPools[Index]->VirtAddr = (UINT32 *)PoolAddr;
+ Mvpp2Shared->BmPools[Index]->PhysAddr = (UINTN)PoolAddr;
- Mvpp2BmPoolHwCreate(Mvpp2Shared, Mvpp2Shared->BmPools, MVPP2_BM_SIZE);
+ Mvpp2BmPoolHwCreate(Mvpp2Shared, Mvpp2Shared->BmPools[Index], MVPP2_BM_SIZE);
+ }
return EFI_SUCCESS;
+FreeBmPools:
+ FreePool (Mvpp2Shared->BmPools[Index]);
FreePools:
- FreePool (Mvpp2Shared->BmPools);
+ while (Index-- >= 0) {
+ FreePool (Mvpp2Shared->BmPools[Index]);
+ DmaFreeBuffer (
+ EFI_SIZE_TO_PAGES (PoolSize),
+ Mvpp2Shared->BmPools[Index]->VirtAddr
+ );
+ }
return Status;
}
@@ -226,22 +237,24 @@ Pp2DxeBmStart (
)
{
UINT8 *Buff, *BuffPhys;
- INTN Index;
+ INTN Index, Pool;
ASSERT(BM_ALIGN >= sizeof(UINTN));
- Mvpp2BmPoolCtrl(Mvpp2Shared, MVPP2_BM_POOL, MVPP2_START);
- Mvpp2BmPoolBufsizeSet(Mvpp2Shared, Mvpp2Shared->BmPools, RX_BUFFER_SIZE);
+ for (Pool = 0; Pool < MVPP2_MAX_PORT; Pool++) {
+ Mvpp2BmPoolCtrl(Mvpp2Shared, Pool, MVPP2_START);
+ Mvpp2BmPoolBufsizeSet(Mvpp2Shared, Mvpp2Shared->BmPools[Pool], RX_BUFFER_SIZE);
- /* Fill BM pool with Buffers */
- for (Index = 0; Index < MVPP2_BM_SIZE; Index++) {
- Buff = (UINT8 *)(BufferLocation.RxBuffers + (Index * RX_BUFFER_SIZE));
- if (Buff == NULL) {
- return EFI_OUT_OF_RESOURCES;
- }
+ /* Fill BM pool with Buffers */
+ for (Index = 0; Index < MVPP2_BM_SIZE; Index++) {
+ Buff = (UINT8 *)(BufferLocation.RxBuffers[Pool] + (Index * RX_BUFFER_SIZE));
+ if (Buff == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
- BuffPhys = ALIGN_POINTER(Buff, BM_ALIGN);
- Mvpp2BmPoolPut(Mvpp2Shared, MVPP2_BM_POOL, (UINTN)BuffPhys, (UINTN)BuffPhys);
+ BuffPhys = ALIGN_POINTER(Buff, BM_ALIGN);
+ Mvpp2BmPoolPut(Mvpp2Shared, Pool, (UINTN)BuffPhys, (UINTN)BuffPhys);
+ }
}
return EFI_SUCCESS;
@@ -415,7 +428,7 @@ Pp2DxeLatePortInitialize (
}
/* Use preallocated area */
- Port->Txqs[0].Descs = BufferLocation.TxDescs;
+ Port->Txqs[0].Descs = BufferLocation.TxDescs[Port->Id];
for (Queue = 0; Queue < TxqNumber; Queue++) {
MVPP2_TX_QUEUE *Txq = &Port->Txqs[Queue];
@@ -431,7 +444,7 @@ Pp2DxeLatePortInitialize (
return EFI_OUT_OF_RESOURCES;
}
- Port->Rxqs[0].Descs = BufferLocation.RxDescs;
+ Port->Rxqs[0].Descs = BufferLocation.RxDescs[Port->Id];
for (Queue = 0; Queue < TxqNumber; Queue++) {
MVPP2_RX_QUEUE *Rxq = &Port->Rxqs[Queue];
@@ -465,8 +478,8 @@ Pp2DxeLateInitialize (
}
/* Attach pool to Rxq */
- Mvpp2RxqLongPoolSet(Port, 0, MVPP2_BM_POOL);
- Mvpp2RxqShortPoolSet(Port, 0, MVPP2_BM_POOL);
+ Mvpp2RxqLongPoolSet(Port, 0, Port->Id);
+ Mvpp2RxqShortPoolSet(Port, 0, Port->Id);
/*
* Mark this port being fully initialized,
@@ -654,9 +667,13 @@ Pp2DxeHalt (
PP2DXE_CONTEXT *Pp2Context = Context;
PP2DXE_PORT *Port = &Pp2Context->Port;
STATIC BOOLEAN CommonPartHalted = FALSE;
+ INTN Index;
if (!CommonPartHalted) {
- Mvpp2BmStop(Mvpp2Shared, MVPP2_BM_POOL);
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ Mvpp2BmStop(Mvpp2Shared, Index);
+ }
+
CommonPartHalted = TRUE;
}
@@ -1188,13 +1205,26 @@ Pp2DxeInitialise (
ZeroMem (BufferSpace, BD_SPACE);
- BufferLocation.TxDescs = BufferSpace;
- BufferLocation.AggrTxDescs = (MVPP2_TX_DESC *)((UINTN)BufferSpace + MVPP2_MAX_TXD * sizeof(MVPP2_TX_DESC));
- BufferLocation.RxDescs = (MVPP2_RX_DESC *)((UINTN)BufferSpace +
- (MVPP2_MAX_TXD + MVPP2_AGGR_TXQ_SIZE) * sizeof(MVPP2_TX_DESC));
- BufferLocation.RxBuffers = (DmaAddrT)(BufferSpace +
- (MVPP2_MAX_TXD + MVPP2_AGGR_TXQ_SIZE) * sizeof(MVPP2_TX_DESC) +
- MVPP2_MAX_RXD * sizeof(MVPP2_RX_DESC));
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ BufferLocation.TxDescs[Index] = (MVPP2_TX_DESC *)
+ (BufferSpace + Index * MVPP2_MAX_TXD * sizeof(MVPP2_TX_DESC));
+ }
+
+ BufferLocation.AggrTxDescs = (MVPP2_TX_DESC *)
+ ((UINTN)BufferSpace + MVPP2_MAX_TXD * MVPP2_MAX_PORT * sizeof(MVPP2_TX_DESC));
+
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ BufferLocation.RxDescs[Index] = (MVPP2_RX_DESC *)
+ ((UINTN)BufferSpace + (MVPP2_MAX_TXD * MVPP2_MAX_PORT + MVPP2_AGGR_TXQ_SIZE) *
+ sizeof(MVPP2_TX_DESC) + Index * MVPP2_MAX_RXD * sizeof(MVPP2_RX_DESC));
+ }
+
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ BufferLocation.RxBuffers[Index] = (DmaAddrT)
+ (BufferSpace + (MVPP2_MAX_TXD * MVPP2_MAX_PORT + MVPP2_AGGR_TXQ_SIZE) *
+ sizeof(MVPP2_TX_DESC) + MVPP2_MAX_RXD * MVPP2_MAX_PORT * sizeof(MVPP2_RX_DESC) +
+ Index * MVPP2_BM_SIZE * RX_BUFFER_SIZE);
+ }
/* Initialize HW */
Mvpp2AxiConfig(Mvpp2Shared);
diff --git a/Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h b/Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h
index 1e03a69..b85cff7 100644
--- a/Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h
+++ b/Platform/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h
@@ -56,6 +56,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "Mvpp2LibHw.h"
+#define MVPP2_MAX_PORT 3
+
#define PP2DXE_SIGNATURE SIGNATURE_32('P', 'P', '2', 'D')
#define INSTANCE_FROM_SNP(a) CR((a), PP2DXE_CONTEXT, Snp, PP2DXE_SIGNATURE)
@@ -276,7 +278,7 @@ typedef struct {
MVPP2_TX_QUEUE *AggrTxqs;
/* BM pools */
- MVPP2_BMS_POOL *BmPools;
+ MVPP2_BMS_POOL *BmPools[MVPP2_MAX_PORT];
/* PRS shadow table */
MVPP2_PRS_SHADOW *PrsShadow;
@@ -330,10 +332,10 @@ struct Pp2DxePort {
/* Structure for preallocation for buffer */
typedef struct {
- MVPP2_TX_DESC *TxDescs;
+ MVPP2_TX_DESC *TxDescs[MVPP2_MAX_PORT];
MVPP2_TX_DESC *AggrTxDescs;
- MVPP2_RX_DESC *RxDescs;
- DmaAddrT RxBuffers;
+ MVPP2_RX_DESC *RxDescs[MVPP2_MAX_PORT];
+ DmaAddrT RxBuffers[MVPP2_MAX_PORT];
} BUFFER_LOCATION;
typedef struct {
--
1.8.3.1
next prev parent reply other threads:[~2017-09-01 11:11 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-09-01 11:17 [platforms: PATCH v2 0/7] Armada 70x0/80x0 network improvements Marcin Wojtas
2017-09-01 11:17 ` [platforms: PATCH v2 1/7] Drivers/Net/Pp2Dxe: Move registers' description to macros Marcin Wojtas
2017-09-01 11:17 ` [platforms: PATCH v2 2/7] Drivers/Net/Pp2Dxe: Add SFI support Marcin Wojtas
2017-09-01 11:17 ` Marcin Wojtas [this message]
2017-09-01 11:17 ` [platforms: PATCH v2 4/7] Drivers/Net/Pp2Dxe: Increase amount of ingress resources Marcin Wojtas
2017-09-01 11:17 ` [platforms: PATCH v2 5/7] Platforms/Marvell: Update ethernet ports types on A70x0 DB Marcin Wojtas
2017-09-01 11:17 ` [platforms: PATCH v2 6/7] Drivers/Net/Pp2Dxe: Move devices description to MvHwDescLib Marcin Wojtas
2017-09-01 11:17 ` [platforms: PATCH v2 7/7] Drivers/Net/Pp2Dxe: Enable using ports from different controllers Marcin Wojtas
2017-09-01 12:07 ` [platforms: PATCH v2 0/7] Armada 70x0/80x0 network improvements Ard Biesheuvel
2017-09-01 12:41 ` Marcin Wojtas
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-list from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1504264679-13613-4-git-send-email-mw@semihalf.com \
--to=devel@edk2.groups.io \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox