libchip: Reduce tx interrupts

Reduce number of packet transmitted interrupts by using the interrupt mechanism only
if we run out of DMA descriptors.
Under normal conditions regaining DMA descriptors, mbufs and clusters is handled
via a counter.
This commit is contained in:
Ralf Kirchner
2014-04-10 17:06:14 +02:00
committed by Sebastian Huber
parent f28b8d4595
commit 1613a01bb0
4 changed files with 282 additions and 242 deletions

View File

@@ -147,7 +147,7 @@ static int dwmac1000_dma_init(
/* Mask interrupts by writing to CSR7 */ /* Mask interrupts by writing to CSR7 */
dwmac_core_enable_dma_irq_rx( self ); dwmac_core_enable_dma_irq_rx( self );
dwmac_core_enable_dma_irq_tx( self ); dwmac_core_enable_dma_irq_tx_default( self );
/* The base address of the RX/TX descriptor lists must be written into /* The base address of the RX/TX descriptor lists must be written into
* DMA CSR3 and CSR4, respectively. */ * DMA CSR3 and CSR4, respectively. */

View File

@@ -41,12 +41,17 @@
#define DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX \ #define DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX \
( \ ( \
DMAGRP_INTERRUPT_ENABLE_NIE \ DMAGRP_INTERRUPT_ENABLE_NIE \
| DMAGRP_INTERRUPT_ENABLE_TIE \
| DMAGRP_INTERRUPT_ENABLE_FBE \ | DMAGRP_INTERRUPT_ENABLE_FBE \
| DMAGRP_INTERRUPT_ENABLE_UNE \ | DMAGRP_INTERRUPT_ENABLE_UNE \
| DMAGRP_INTERRUPT_ENABLE_AIE \ | DMAGRP_INTERRUPT_ENABLE_AIE \
) )
#define DWMAC_CORE_INTR_ENABLE_ALL_MASK_TX \
( \
DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX \
| DMAGRP_INTERRUPT_ENABLE_TIE \
)
#define DWMAC_CORE_INTR_STATUS_DEFAULT_MASK_RX \ #define DWMAC_CORE_INTR_STATUS_DEFAULT_MASK_RX \
( \ ( \
DMAGRP_STATUS_NIS \ DMAGRP_STATUS_NIS \
@@ -67,19 +72,29 @@ void dwmac_core_dma_restart_tx( dwmac_common_context *self )
self->dmagrp->transmit_poll_demand = 1; self->dmagrp->transmit_poll_demand = 1;
} }
void dwmac_core_enable_dma_irq_tx( dwmac_common_context *self ) void dwmac_core_enable_dma_irq_tx_default( dwmac_common_context *self )
{ {
self->dmagrp->interrupt_enable |= DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX; self->dmagrp->interrupt_enable |= DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX;
} }
void dwmac_core_enable_dma_irq_tx_transmitted( dwmac_common_context *self )
{
self->dmagrp->interrupt_enable |= DMAGRP_INTERRUPT_ENABLE_TIE;
}
void dwmac_core_enable_dma_irq_rx( dwmac_common_context *self ) void dwmac_core_enable_dma_irq_rx( dwmac_common_context *self )
{ {
self->dmagrp->interrupt_enable |= DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_RX; self->dmagrp->interrupt_enable |= DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_RX;
} }
void dwmac_core_disable_dma_irq_tx( dwmac_common_context *self ) void dwmac_core_disable_dma_irq_tx_all( dwmac_common_context *self )
{ {
self->dmagrp->interrupt_enable &= ~DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX; self->dmagrp->interrupt_enable &= ~DWMAC_CORE_INTR_ENABLE_ALL_MASK_TX;
}
void dwmac_core_disable_dma_irq_tx_transmitted( dwmac_common_context *self )
{
self->dmagrp->interrupt_enable &= ~DMAGRP_INTERRUPT_ENABLE_TIE;
} }
void dwmac_core_reset_dma_irq_status_tx( dwmac_common_context *self ) void dwmac_core_reset_dma_irq_status_tx( dwmac_common_context *self )

View File

@@ -55,9 +55,13 @@ void dwmac_core_dma_restart_tx( dwmac_common_context *self );
void dwmac_core_enable_dma_irq_rx( dwmac_common_context *self ); void dwmac_core_enable_dma_irq_rx( dwmac_common_context *self );
void dwmac_core_enable_dma_irq_tx( dwmac_common_context *self ); void dwmac_core_enable_dma_irq_tx_default( dwmac_common_context *self );
void dwmac_core_disable_dma_irq_tx( dwmac_common_context *self ); void dwmac_core_enable_dma_irq_tx_transmitted( dwmac_common_context *self );
void dwmac_core_disable_dma_irq_tx_all( dwmac_common_context *self );
void dwmac_core_disable_dma_irq_tx_transmitted( dwmac_common_context *self );
void dwmac_core_disable_dma_irq_rx( dwmac_common_context *self ); void dwmac_core_disable_dma_irq_rx( dwmac_common_context *self );

View File

@@ -262,9 +262,14 @@ static inline void dwmac_enable_irq_rx( dwmac_common_context *self )
dwmac_core_enable_dma_irq_rx( self ); dwmac_core_enable_dma_irq_rx( self );
} }
static inline void dwmac_enable_irq_tx( dwmac_common_context *self ) static inline void dwmac_enable_irq_tx_default( dwmac_common_context *self )
{ {
dwmac_core_enable_dma_irq_tx( self ); dwmac_core_enable_dma_irq_tx_default( self );
}
static inline void dwmac_enable_irq_tx_transmitted( dwmac_common_context *self )
{
dwmac_core_enable_dma_irq_tx_transmitted( self );
} }
static inline void dwmac_disable_irq_rx( dwmac_common_context *self ) static inline void dwmac_disable_irq_rx( dwmac_common_context *self )
@@ -272,16 +277,20 @@ static inline void dwmac_disable_irq_rx( dwmac_common_context *self )
dwmac_core_disable_dma_irq_rx( self ); dwmac_core_disable_dma_irq_rx( self );
} }
static inline void dwmac_diable_irq_tx( dwmac_common_context *self ) static inline void dwmac_disable_irq_tx_all( dwmac_common_context *self )
{ {
dwmac_core_disable_dma_irq_tx( self ); dwmac_core_disable_dma_irq_tx_all( self );
}
static inline void dwmac_disable_irq_tx_transmitted ( dwmac_common_context *self )
{
dwmac_core_disable_dma_irq_tx_transmitted( self );
} }
static void dwmac_control_request_complete( const dwmac_common_context *self ) static void dwmac_control_request_complete( const dwmac_common_context *self )
{ {
rtems_status_code sc = rtems_event_transient_send( self->task_id_control ); rtems_status_code sc = rtems_event_transient_send( self->task_id_control );
assert( sc == RTEMS_SUCCESSFUL ); assert( sc == RTEMS_SUCCESSFUL );
} }
@@ -1342,8 +1351,10 @@ static void dwmac_task_tx( void *arg )
); );
assert( sc == RTEMS_SUCCESSFUL ); assert( sc == RTEMS_SUCCESSFUL );
while( events != 0 ) {
/* Handle a status change of the ethernet PHY */ /* Handle a status change of the ethernet PHY */
if ( ( events & DWMAC_COMMON_EVENT_TX_PHY_STATUS_CHANGE ) != 0 ) { if ( ( events & DWMAC_COMMON_EVENT_TX_PHY_STATUS_CHANGE ) != 0 ) {
events &= ~DWMAC_COMMON_EVENT_TX_PHY_STATUS_CHANGE;
dwmac_common_phy_status_counts *counts = dwmac_common_phy_status_counts *counts =
&self->stats.phy_status_counts; &self->stats.phy_status_counts;
dwmac_phy_event phy_events = 0; dwmac_phy_event phy_events = 0;
@@ -1380,7 +1391,7 @@ static void dwmac_task_tx( void *arg )
/* Stop the task */ /* Stop the task */
if ( ( events & DWMAC_COMMON_EVENT_TASK_STOP ) != 0 ) { if ( ( events & DWMAC_COMMON_EVENT_TASK_STOP ) != 0 ) {
dwmac_core_dma_stop_tx( self ); dwmac_core_dma_stop_tx( self );
dwmac_diable_irq_tx( self ); dwmac_disable_irq_tx_all( self );
/* Release all tx mbufs at the risk of data loss */ /* Release all tx mbufs at the risk of data loss */
( DESC_OPS->release_tx_bufs )( self ); ( DESC_OPS->release_tx_bufs )( self );
@@ -1389,11 +1400,13 @@ static void dwmac_task_tx( void *arg )
/* Return to events reception without re-enabling the interrupts /* Return to events reception without re-enabling the interrupts
* The task needs a re-initialization to to resume work */ * The task needs a re-initialization to to resume work */
events = 0;
continue; continue;
} }
/* Ininitialize / Re-initialize transmission handling */ /* Ininitialize / Re-initialize transmission handling */
if ( ( events & DWMAC_COMMON_EVENT_TASK_INIT ) != 0 ) { if ( ( events & DWMAC_COMMON_EVENT_TASK_INIT ) != 0 ) {
events &= ~DWMAC_COMMON_EVENT_TASK_INIT;
(void) dwmac_update_autonegotiation_params( self ); (void) dwmac_update_autonegotiation_params( self );
dwmac_core_dma_stop_tx( self ); dwmac_core_dma_stop_tx( self );
( DESC_OPS->release_tx_bufs )( self ); ( DESC_OPS->release_tx_bufs )( self );
@@ -1411,12 +1424,14 @@ static void dwmac_task_tx( void *arg )
/* Clear our interrupt statuses */ /* Clear our interrupt statuses */
dwmac_core_reset_dma_irq_status_tx( self ); dwmac_core_reset_dma_irq_status_tx( self );
dwmac_enable_irq_tx_default( self );
dwmac_control_request_complete( self ); dwmac_control_request_complete( self );
} }
/* Try to bump up the dma threshold due to a failure */ /* Try to bump up the dma threshold due to a failure */
if ( ( events & DWMAC_COMMON_EVENT_TX_BUMP_UP_DMA_THRESHOLD ) != 0 ) { if ( ( events & DWMAC_COMMON_EVENT_TX_BUMP_UP_DMA_THRESHOLD ) != 0 ) {
events &= ~DWMAC_COMMON_EVENT_TX_BUMP_UP_DMA_THRESHOLD;
if ( self->dma_threshold_control if ( self->dma_threshold_control
!= DWMAC_COMMON_DMA_MODE_STORE_AND_FORWARD != DWMAC_COMMON_DMA_MODE_STORE_AND_FORWARD
&& self->dma_threshold_control <= 256 ) { && self->dma_threshold_control <= 256 ) {
@@ -1431,13 +1446,17 @@ static void dwmac_task_tx( void *arg )
/* Handle one or more transmitted frames */ /* Handle one or more transmitted frames */
if ( ( events & DWMAC_COMMON_EVENT_TX_FRAME_TRANSMITTED ) != 0 ) { if ( ( events & DWMAC_COMMON_EVENT_TX_FRAME_TRANSMITTED ) != 0 ) {
events &= ~DWMAC_COMMON_EVENT_TX_FRAME_TRANSMITTED;
dwmac_common_tx_frame_counts *counts = &self->stats.frame_counts_tx; dwmac_common_tx_frame_counts *counts = &self->stats.frame_counts_tx;
dwmac_disable_irq_tx_transmitted( self );
/* Next index to be transmitted */ /* Next index to be transmitted */
unsigned int idx_transmitted_next = dwmac_increment( unsigned int idx_transmitted_next = dwmac_increment(
idx_transmitted, INDEX_MAX ); idx_transmitted, INDEX_MAX );
/* Free consumed fragments */ /* Free consumed fragments */
if( idx_release != idx_transmitted_next
&& ( DESC_OPS->am_i_tx_owner )( self, idx_release ) ) {
while ( idx_release != idx_transmitted_next while ( idx_release != idx_transmitted_next
&& ( DESC_OPS->am_i_tx_owner )( self, idx_release ) ) { && ( DESC_OPS->am_i_tx_owner )( self, idx_release ) ) {
/* Status handling per packet */ /* Status handling per packet */
@@ -1469,20 +1488,23 @@ static void dwmac_task_tx( void *arg )
idx_release = dwmac_increment( idx_release = dwmac_increment(
idx_release, INDEX_MAX ); idx_release, INDEX_MAX );
} }
/* Clear transmit interrupt status */
self->dmagrp->status = DMAGRP_STATUS_TI;
if ( ( self->arpcom.ac_if.if_flags & IFF_OACTIVE ) != 0 ) { if ( ( self->arpcom.ac_if.if_flags & IFF_OACTIVE ) != 0 ) {
/* The last tranmission has been incomplete /* The last tranmission has been incomplete
* (for example due to lack of DMA descriptors). * (for example due to lack of DMA descriptors).
* Continue it now! */ * Continue it now! */
events |= DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME; events |= DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME;
} }
} else {
/* Clear transmit interrupt status */
self->dmagrp->status = DMAGRP_STATUS_TI;
/* Get re-activated by the next interrupt */
dwmac_enable_irq_tx_transmitted( self );
}
} }
/* There are one or more frames to be transmitted. */ /* There are one or more frames to be transmitted. */
if ( ( events & DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME ) != 0 ) { if ( ( events & DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME ) != 0 ) {
events &= ~DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME;
dwmac_common_tx_frame_counts *counts = &self->stats.frame_counts_tx; dwmac_common_tx_frame_counts *counts = &self->stats.frame_counts_tx;
if ( p_m != NULL ) { if ( p_m != NULL ) {
@@ -1590,15 +1612,14 @@ static void dwmac_task_tx( void *arg )
* We will continue sending once descriptors * We will continue sending once descriptors
* have been freed due to a transmitted interupt */ * have been freed due to a transmitted interupt */
DWMAC_PRINT_DBG( "tx: transmission incomplete\n" ); DWMAC_PRINT_DBG( "tx: transmission incomplete\n" );
events |= DWMAC_COMMON_EVENT_TX_FRAME_TRANSMITTED;
} }
/* TODO: Add handling */ /* TODO: Add handling */
} }
DWMAC_PRINT_DBG( "tx: enable transmit interrupts\n" ); DWMAC_PRINT_DBG( "tx: enable transmit interrupts\n" );
}
/* Re-enable transmit interrupts */
dwmac_enable_irq_tx( self );
} }
} }