|
|
|
@ -5,6 +5,12 @@ |
|
|
|
|
* |
|
|
|
|
* Inspiration for this driver came from the original ADMtek 2.4 |
|
|
|
|
* driver, Copyright ADMtek Inc. |
|
|
|
|
* |
|
|
|
|
* NAPI extensions by Thomas Langer (Thomas.Langer@infineon.com) |
|
|
|
|
* and Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007 |
|
|
|
|
* |
|
|
|
|
* TODO: Add support of high prio queues (currently disabled) |
|
|
|
|
* |
|
|
|
|
*/ |
|
|
|
|
#include <linux/autoconf.h> |
|
|
|
|
#include <linux/module.h> |
|
|
|
@ -48,6 +54,9 @@ static unsigned char bw_matrix[SW_DEVS] = { |
|
|
|
|
static int adm5120_nrdevs; |
|
|
|
|
|
|
|
|
|
static struct net_device *adm5120_devs[SW_DEVS]; |
|
|
|
|
/* Lookup table port -> device */ |
|
|
|
|
static struct net_device *adm5120_port[SW_DEVS]; |
|
|
|
|
|
|
|
|
|
static struct adm5120_dma |
|
|
|
|
adm5120_dma_txh_v[ADM5120_DMA_TXH] __attribute__((aligned(16))), |
|
|
|
|
adm5120_dma_txl_v[ADM5120_DMA_TXL] __attribute__((aligned(16))), |
|
|
|
@ -62,13 +71,9 @@ static struct sk_buff |
|
|
|
|
*adm5120_skb_rxl[ADM5120_DMA_RXL], |
|
|
|
|
*adm5120_skb_txh[ADM5120_DMA_TXH], |
|
|
|
|
*adm5120_skb_txl[ADM5120_DMA_TXL]; |
|
|
|
|
static int adm5120_rxhi = 0; |
|
|
|
|
static int adm5120_rxli = 0; |
|
|
|
|
/* We don't use high priority tx for now */ |
|
|
|
|
/*static int adm5120_txhi = 0;*/ |
|
|
|
|
static int adm5120_txli = 0; |
|
|
|
|
static int adm5120_txhit = 0; |
|
|
|
|
static int adm5120_txlit = 0; |
|
|
|
|
/*static int adm5120_txhi = 0;*/ |
|
|
|
|
static int adm5120_if_open = 0; |
|
|
|
|
|
|
|
|
|
static inline void adm5120_set_reg(unsigned int reg, unsigned long val) |
|
|
|
@ -81,47 +86,50 @@ static inline unsigned long adm5120_get_reg(unsigned int reg) |
|
|
|
|
return *(volatile unsigned long*)(SW_BASE+reg); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static inline void adm5120_rxfixup(struct adm5120_dma *dma, |
|
|
|
|
struct sk_buff **skbl, int num) |
|
|
|
|
static inline void adm5120_rx_dma_update(struct adm5120_dma *dma, |
|
|
|
|
struct sk_buff *skb, int end) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
/* Resubmit the entire ring */ |
|
|
|
|
for (i=0; i<num; i++) { |
|
|
|
|
dma[i].status = 0; |
|
|
|
|
dma[i].cntl = 0; |
|
|
|
|
dma[i].len = ADM5120_DMA_RXSIZE; |
|
|
|
|
dma[i].data = ADM5120_DMA_ADDR(skbl[i]->data) | |
|
|
|
|
ADM5120_DMA_OWN | (i==num-1 ? ADM5120_DMA_RINGEND : 0); |
|
|
|
|
} |
|
|
|
|
dma->status = 0; |
|
|
|
|
dma->cntl = 0; |
|
|
|
|
dma->len = ADM5120_DMA_RXSIZE; |
|
|
|
|
dma->data = ADM5120_DMA_ADDR(skb->data) | |
|
|
|
|
ADM5120_DMA_OWN | (end ? ADM5120_DMA_RINGEND : 0); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static inline void adm5120_rx(struct adm5120_dma *dma, struct sk_buff **skbl, |
|
|
|
|
int *index, int num) |
|
|
|
|
static int adm5120_rx(struct net_device *dev,int *budget) |
|
|
|
|
{ |
|
|
|
|
struct sk_buff *skb, *skbn; |
|
|
|
|
struct adm5120_sw *priv; |
|
|
|
|
struct net_device *dev; |
|
|
|
|
int port, vlan, len; |
|
|
|
|
|
|
|
|
|
while (!(dma[*index].data & ADM5120_DMA_OWN)) { |
|
|
|
|
port = (dma[*index].status & ADM5120_DMA_PORTID); |
|
|
|
|
struct net_device *cdev; |
|
|
|
|
struct adm5120_dma *dma; |
|
|
|
|
int port, len, quota; |
|
|
|
|
|
|
|
|
|
quota = min(dev->quota, *budget); |
|
|
|
|
dma = &adm5120_dma_rxl[adm5120_rxli]; |
|
|
|
|
while (!(dma->data & ADM5120_DMA_OWN) && quota) { |
|
|
|
|
port = (dma->status & ADM5120_DMA_PORTID); |
|
|
|
|
port >>= ADM5120_DMA_PORTSHIFT; |
|
|
|
|
for (vlan = 0; vlan < adm5120_nrdevs; vlan++) { |
|
|
|
|
if ((1<<port) & vlan_matrix[vlan]) |
|
|
|
|
break; |
|
|
|
|
cdev = adm5120_port[port]; |
|
|
|
|
if (cdev != dev) { /* The current packet belongs to a different device */ |
|
|
|
|
if ((cdev==NULL) || !netif_running(cdev)) { |
|
|
|
|
/* discard (update with old skb) */ |
|
|
|
|
skb = skbn = NULL; |
|
|
|
|
goto rx_skip; |
|
|
|
|
} |
|
|
|
|
else { |
|
|
|
|
netif_rx_schedule(cdev);/* Start polling next device */ |
|
|
|
|
return 1; /* return 1 -> More packets to process */ |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
if (vlan == adm5120_nrdevs) |
|
|
|
|
vlan = 0; |
|
|
|
|
dev = adm5120_devs[vlan]; |
|
|
|
|
skb = skbl[*index]; |
|
|
|
|
len = (dma[*index].status & ADM5120_DMA_LEN); |
|
|
|
|
skb = adm5120_skb_rxl[adm5120_rxli]; |
|
|
|
|
len = (dma->status & ADM5120_DMA_LEN); |
|
|
|
|
len >>= ADM5120_DMA_LENSHIFT; |
|
|
|
|
len -= ETH_FCS; |
|
|
|
|
|
|
|
|
|
priv = netdev_priv(dev); |
|
|
|
|
if (len <= 0 || len > ADM5120_DMA_RXSIZE || |
|
|
|
|
dma[*index].status & ADM5120_DMA_FCSERR) { |
|
|
|
|
dma->status & ADM5120_DMA_FCSERR) { |
|
|
|
|
priv->stats.rx_errors++; |
|
|
|
|
skbn = NULL; |
|
|
|
|
} else { |
|
|
|
@ -133,69 +141,80 @@ static inline void adm5120_rx(struct adm5120_dma *dma, struct sk_buff **skbl, |
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY; |
|
|
|
|
dev->last_rx = jiffies; |
|
|
|
|
priv->stats.rx_packets++; |
|
|
|
|
priv->stats.rx_bytes+=len; |
|
|
|
|
skb_reserve(skbn, 2); |
|
|
|
|
skbl[*index] = skbn; |
|
|
|
|
priv->stats.rx_bytes += len; |
|
|
|
|
skb_reserve(skbn, NET_IP_ALIGN); |
|
|
|
|
adm5120_skb_rxl[adm5120_rxli] = skbn; |
|
|
|
|
} else { |
|
|
|
|
printk(KERN_INFO "%s recycling!\n", dev->name); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
rx_skip: |
|
|
|
|
adm5120_rx_dma_update(&adm5120_dma_rxl[adm5120_rxli], |
|
|
|
|
adm5120_skb_rxl[adm5120_rxli], |
|
|
|
|
(ADM5120_DMA_RXL-1==adm5120_rxli)); |
|
|
|
|
if (ADM5120_DMA_RXL == ++adm5120_rxli) |
|
|
|
|
adm5120_rxli = 0; |
|
|
|
|
dma = &adm5120_dma_rxl[adm5120_rxli]; |
|
|
|
|
if (skbn){ |
|
|
|
|
netif_receive_skb(skb); |
|
|
|
|
dev->quota--; |
|
|
|
|
(*budget)--; |
|
|
|
|
quota--; |
|
|
|
|
} |
|
|
|
|
} /* while */ |
|
|
|
|
/* If there are still packets to process, return 1 */ |
|
|
|
|
if (quota){ |
|
|
|
|
/* No more packets to process, so disable the polling and reenable the interrupts */ |
|
|
|
|
netif_rx_complete(dev); |
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, |
|
|
|
|
adm5120_get_reg(ADM5120_INT_MASK) & |
|
|
|
|
~(ADM5120_INT_RXL|ADM5120_INT_LFULL)); |
|
|
|
|
return 0; |
|
|
|
|
|
|
|
|
|
dma[*index].status = 0; |
|
|
|
|
dma[*index].cntl = 0; |
|
|
|
|
dma[*index].len = ADM5120_DMA_RXSIZE; |
|
|
|
|
dma[*index].data = ADM5120_DMA_ADDR(skbl[*index]->data) | |
|
|
|
|
ADM5120_DMA_OWN | |
|
|
|
|
(num-1==*index ? ADM5120_DMA_RINGEND : 0); |
|
|
|
|
if (num == ++*index) |
|
|
|
|
*index = 0; |
|
|
|
|
if (skbn) |
|
|
|
|
netif_rx(skb); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static inline void adm5120_tx(struct adm5120_dma *dma, struct sk_buff **skbl, |
|
|
|
|
int *index, int num) |
|
|
|
|
{ |
|
|
|
|
while((dma[*index].data & ADM5120_DMA_OWN) == 0 && skbl[*index]) { |
|
|
|
|
dev_kfree_skb_irq(skbl[*index]); |
|
|
|
|
skbl[*index] = NULL; |
|
|
|
|
if (++*index == num) |
|
|
|
|
*index = 0; |
|
|
|
|
} |
|
|
|
|
return 1; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static irqreturn_t adm5120_sw_irq(int irq, void *dev_id) |
|
|
|
|
{ |
|
|
|
|
unsigned long intreg; |
|
|
|
|
|
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, |
|
|
|
|
adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTHANDLE); |
|
|
|
|
|
|
|
|
|
intreg = adm5120_get_reg(ADM5120_INT_ST); |
|
|
|
|
adm5120_set_reg(ADM5120_INT_ST, intreg); |
|
|
|
|
|
|
|
|
|
if (intreg & ADM5120_INT_RXH) |
|
|
|
|
adm5120_rx(adm5120_dma_rxh, adm5120_skb_rxh, &adm5120_rxhi, |
|
|
|
|
ADM5120_DMA_RXH); |
|
|
|
|
if (intreg & ADM5120_INT_HFULL) |
|
|
|
|
adm5120_rxfixup(adm5120_dma_rxh, adm5120_skb_rxh, |
|
|
|
|
ADM5120_DMA_RXH); |
|
|
|
|
if (intreg & ADM5120_INT_RXL) |
|
|
|
|
adm5120_rx(adm5120_dma_rxl, adm5120_skb_rxl, &adm5120_rxli, |
|
|
|
|
ADM5120_DMA_RXL); |
|
|
|
|
if (intreg & ADM5120_INT_LFULL) |
|
|
|
|
adm5120_rxfixup(adm5120_dma_rxl, adm5120_skb_rxl, |
|
|
|
|
ADM5120_DMA_RXL); |
|
|
|
|
if (intreg & ADM5120_INT_TXH) |
|
|
|
|
adm5120_tx(adm5120_dma_txh, adm5120_skb_txh, &adm5120_txhit, |
|
|
|
|
ADM5120_DMA_TXH); |
|
|
|
|
if (intreg & ADM5120_INT_TXL) |
|
|
|
|
adm5120_tx(adm5120_dma_txl, adm5120_skb_txl, &adm5120_txlit, |
|
|
|
|
ADM5120_DMA_TXL); |
|
|
|
|
|
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, |
|
|
|
|
adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE); |
|
|
|
|
unsigned long intreg, intmask; |
|
|
|
|
int port; |
|
|
|
|
struct net_device *dev; |
|
|
|
|
|
|
|
|
|
intmask = adm5120_get_reg(ADM5120_INT_MASK); /* Remember interrupt mask */ |
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL); /* Disable interrupts */ |
|
|
|
|
|
|
|
|
|
intreg = adm5120_get_reg(ADM5120_INT_ST); /* Read interrupt status */ |
|
|
|
|
adm5120_set_reg(ADM5120_INT_ST, intreg); /* Clear interrupt status */ |
|
|
|
|
|
|
|
|
|
/* In NAPI operation the interrupts are disabled and the polling mechanism
|
|
|
|
|
* is activated. The interrupts are finally enabled again in the polling routine. |
|
|
|
|
*/ |
|
|
|
|
if (intreg & (ADM5120_INT_RXL|ADM5120_INT_LFULL)) { |
|
|
|
|
/* check rx buffer for port number */ |
|
|
|
|
port = adm5120_dma_rxl[adm5120_rxli].status & ADM5120_DMA_PORTID; |
|
|
|
|
port >>= ADM5120_DMA_PORTSHIFT; |
|
|
|
|
dev = adm5120_port[port]; |
|
|
|
|
if ((dev==NULL) || !netif_running(dev)) { |
|
|
|
|
/* discard (update with old skb) */ |
|
|
|
|
adm5120_rx_dma_update(&adm5120_dma_rxl[adm5120_rxli], |
|
|
|
|
adm5120_skb_rxl[adm5120_rxli], |
|
|
|
|
(ADM5120_DMA_RXL-1==adm5120_rxli)); |
|
|
|
|
if (ADM5120_DMA_RXL == ++adm5120_rxli) |
|
|
|
|
adm5120_rxli = 0; |
|
|
|
|
} |
|
|
|
|
else { |
|
|
|
|
netif_rx_schedule(dev); |
|
|
|
|
intmask |= (ADM5120_INT_RXL|ADM5120_INT_LFULL); /* Disable RX interrupts */ |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#ifdef CONFIG_DEBUG |
|
|
|
|
if (intreg & ~(intmask)) |
|
|
|
|
printk(KERN_INFO "adm5120sw: IRQ 0x%08X unexpected!\n", (unsigned int)(intreg & ~(intmask))); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, intmask); |
|
|
|
|
|
|
|
|
|
return IRQ_HANDLED; |
|
|
|
|
} |
|
|
|
@ -203,11 +222,20 @@ static irqreturn_t adm5120_sw_irq(int irq, void *dev_id) |
|
|
|
|
static void adm5120_set_vlan(char *matrix) |
|
|
|
|
{ |
|
|
|
|
unsigned long val; |
|
|
|
|
int vlan_port, port; |
|
|
|
|
|
|
|
|
|
val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24); |
|
|
|
|
adm5120_set_reg(ADM5120_VLAN_GI, val); |
|
|
|
|
val = matrix[4] + (matrix[5]<<8); |
|
|
|
|
adm5120_set_reg(ADM5120_VLAN_GII, val); |
|
|
|
|
/* Now set/update the port vs. device lookup table */ |
|
|
|
|
for (port=0; port<SW_DEVS; port++) { |
|
|
|
|
for (vlan_port=0; vlan_port<SW_DEVS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++); |
|
|
|
|
if (vlan_port <SW_DEVS) |
|
|
|
|
adm5120_port[port] = adm5120_devs[vlan_port]; |
|
|
|
|
else |
|
|
|
|
adm5120_port[port] = NULL; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void adm5120_set_bw(char *matrix) |
|
|
|
@ -225,70 +253,143 @@ static void adm5120_set_bw(char *matrix) |
|
|
|
|
else |
|
|
|
|
adm5120_set_reg(ADM5120_BW_CTL1, val & ~0x8000000); |
|
|
|
|
|
|
|
|
|
printk(KERN_DEBUG "D: ctl0 0x%x, ctl1 0x%x\n", |
|
|
|
|
printk(KERN_DEBUG "D: ctl0 0x%lx, ctl1 0x%lx\n", |
|
|
|
|
adm5120_get_reg(ADM5120_BW_CTL0), |
|
|
|
|
adm5120_get_reg(ADM5120_BW_CTL1)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int adm5120_sw_open(struct net_device *dev) |
|
|
|
|
{ |
|
|
|
|
if (!adm5120_if_open++) |
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, |
|
|
|
|
adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE); |
|
|
|
|
unsigned long val; |
|
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
netif_start_queue(dev); |
|
|
|
|
if (!adm5120_if_open++) { |
|
|
|
|
/* enable interrupts on first open */ |
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, |
|
|
|
|
adm5120_get_reg(ADM5120_INT_MASK) & |
|
|
|
|
~(ADM5120_INT_RXL|ADM5120_INT_LFULL)); |
|
|
|
|
} |
|
|
|
|
/* enable (additional) port */ |
|
|
|
|
val = adm5120_get_reg(ADM5120_PORT_CONF0); |
|
|
|
|
for (i=0; i<SW_DEVS; i++) { |
|
|
|
|
if (dev == adm5120_devs[i]) |
|
|
|
|
val &= ~vlan_matrix[i]; |
|
|
|
|
} |
|
|
|
|
adm5120_set_reg(ADM5120_PORT_CONF0, val); |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int adm5120_sw_stop(struct net_device *dev) |
|
|
|
|
{ |
|
|
|
|
unsigned long val; |
|
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
if (!--adm5120_if_open) { |
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL); |
|
|
|
|
} |
|
|
|
|
/* disable port if not assigned to other devices */ |
|
|
|
|
val = adm5120_get_reg(ADM5120_PORT_CONF0) | ADM5120_PORTDISALL; |
|
|
|
|
for (i=0; i<SW_DEVS; i++) { |
|
|
|
|
if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i])) |
|
|
|
|
val &= ~vlan_matrix[i]; |
|
|
|
|
} |
|
|
|
|
adm5120_set_reg(ADM5120_PORT_CONF0, val); |
|
|
|
|
netif_stop_queue(dev); |
|
|
|
|
if (!--adm5120_if_open) |
|
|
|
|
adm5120_set_reg(ADM5120_INT_MASK, |
|
|
|
|
adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTMASKALL); |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int adm5120_sw_tx(struct sk_buff *skb, struct net_device *dev) |
|
|
|
|
{ |
|
|
|
|
struct adm5120_dma *dma = adm5120_dma_txl; |
|
|
|
|
struct adm5120_dma *dma; |
|
|
|
|
struct sk_buff **skbl = adm5120_skb_txl; |
|
|
|
|
struct adm5120_sw *priv = netdev_priv(dev); |
|
|
|
|
int *index = &adm5120_txli; |
|
|
|
|
int num = ADM5120_DMA_TXL; |
|
|
|
|
int trigger = ADM5120_SEND_TRIG_L; |
|
|
|
|
unsigned long data; |
|
|
|
|
|
|
|
|
|
dev->trans_start = jiffies; |
|
|
|
|
if (dma[*index].data & ADM5120_DMA_OWN) { |
|
|
|
|
dma = &adm5120_dma_txl[adm5120_txli]; |
|
|
|
|
if (dma->data & ADM5120_DMA_OWN) { |
|
|
|
|
/* We want to write a packet but the TX queue is still
|
|
|
|
|
* occupied by the DMA. We are faster than the DMA... */ |
|
|
|
|
dev_kfree_skb(skb); |
|
|
|
|
priv->stats.tx_dropped++; |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
dma[*index].data = ADM5120_DMA_ADDR(skb->data) | ADM5120_DMA_OWN; |
|
|
|
|
if (*index == num-1) |
|
|
|
|
dma[*index].data |= ADM5120_DMA_RINGEND; |
|
|
|
|
dma[*index].status = |
|
|
|
|
data = ADM5120_DMA_ADDR(skb->data) | ADM5120_DMA_OWN; |
|
|
|
|
if (adm5120_txli == ADM5120_DMA_TXL-1) |
|
|
|
|
data |= ADM5120_DMA_RINGEND; |
|
|
|
|
dma->status = |
|
|
|
|
((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << ADM5120_DMA_LENSHIFT) | |
|
|
|
|
(0x1 << priv->port); |
|
|
|
|
dma[*index].len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; |
|
|
|
|
|
|
|
|
|
dma->len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; |
|
|
|
|
priv->stats.tx_packets++; |
|
|
|
|
priv->stats.tx_bytes += skb->len; |
|
|
|
|
skbl[*index]=skb; |
|
|
|
|
|
|
|
|
|
if (++*index == num) |
|
|
|
|
*index = 0; |
|
|
|
|
adm5120_set_reg(ADM5120_SEND_TRIG, trigger); |
|
|
|
|
/* free old skbs here instead of tx completion interrupt:
|
|
|
|
|
* will hold some more memory allocated but reduces interrupts */ |
|
|
|
|
if (skbl[adm5120_txli]){ |
|
|
|
|
dev_kfree_skb(skbl[adm5120_txli]); |
|
|
|
|
} |
|
|
|
|
skbl[adm5120_txli] = skb; |
|
|
|
|
|
|
|
|
|
dma->data = data; /* Here we enable the buffer for the TX DMA machine */ |
|
|
|
|
adm5120_set_reg(ADM5120_SEND_TRIG, ADM5120_SEND_TRIG_L); |
|
|
|
|
if (++adm5120_txli == ADM5120_DMA_TXL) |
|
|
|
|
adm5120_txli = 0; |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void adm5120_tx_timeout(struct net_device *dev) |
|
|
|
|
{ |
|
|
|
|
netif_wake_queue(dev); |
|
|
|
|
printk(KERN_INFO "%s: TX timeout\n",dev->name); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static struct net_device_stats *adm5120_sw_stats(struct net_device *dev) |
|
|
|
|
{ |
|
|
|
|
struct adm5120_sw *priv = netdev_priv(dev); |
|
|
|
|
int portmask; |
|
|
|
|
unsigned long adm5120_cpup_conf_reg; |
|
|
|
|
|
|
|
|
|
portmask = vlan_matrix[priv->port] & 0x3f; |
|
|
|
|
|
|
|
|
|
adm5120_cpup_conf_reg = adm5120_get_reg(ADM5120_CPUP_CONF); |
|
|
|
|
|
|
|
|
|
if (dev->flags & IFF_PROMISC) |
|
|
|
|
adm5120_cpup_conf_reg &= ~((portmask << ADM5120_DISUNSHIFT) & ADM5120_DISUNALL); |
|
|
|
|
else |
|
|
|
|
adm5120_cpup_conf_reg |= (portmask << ADM5120_DISUNSHIFT); |
|
|
|
|
|
|
|
|
|
if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI || dev->mc_count) |
|
|
|
|
adm5120_cpup_conf_reg &= ~((portmask << ADM5120_DISMCSHIFT) & ADM5120_DISMCALL); |
|
|
|
|
else |
|
|
|
|
adm5120_cpup_conf_reg |= (portmask << ADM5120_DISMCSHIFT); |
|
|
|
|
|
|
|
|
|
/* If there is any port configured to be in promiscuous mode, then the */ |
|
|
|
|
/* Bridge Test Mode has to be activated. This will result in */ |
|
|
|
|
/* transporting also packets learned in another VLAN to be forwarded */ |
|
|
|
|
/* to the CPU. */ |
|
|
|
|
/* The difficult scenario is when we want to build a bridge on the CPU.*/ |
|
|
|
|
/* Assume we have port0 and the CPU port in VLAN0 and port1 and the */ |
|
|
|
|
/* CPU port in VLAN1. Now we build a bridge on the CPU between */ |
|
|
|
|
/* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */ |
|
|
|
|
/* Now assume a packet with ethernet source address 99 enters port 0 */ |
|
|
|
|
/* It will be forwarded to the CPU because it is unknown. Then the */ |
|
|
|
|
/* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */ |
|
|
|
|
/* When now a packet with ethernet destination address 99 comes in at */ |
|
|
|
|
/* port 1 in VLAN1, then the switch has learned that this address is */ |
|
|
|
|
/* located at port 0 in VLAN0. Therefore the switch will drop */ |
|
|
|
|
/* this packet. In order to avoid this and to send the packet still */ |
|
|
|
|
/* to the CPU, the Bridge Test Mode has to be activated. */ |
|
|
|
|
|
|
|
|
|
/* Check if there is any vlan in promisc mode. */ |
|
|
|
|
if (~adm5120_cpup_conf_reg & ADM5120_DISUNALL) |
|
|
|
|
adm5120_cpup_conf_reg |= ADM5120_BTM; /* Set the BTM */ |
|
|
|
|
else |
|
|
|
|
adm5120_cpup_conf_reg &= ~ADM5120_BTM; /* Disable the BTM */ |
|
|
|
|
|
|
|
|
|
adm5120_set_reg(ADM5120_CPUP_CONF,adm5120_cpup_conf_reg); |
|
|
|
|
|
|
|
|
|
return &((struct adm5120_sw *)netdev_priv(dev))->stats; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -390,33 +491,29 @@ static int adm5120_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void adm5120_dma_tx_init(struct adm5120_dma *dma, struct sk_buff **skb, |
|
|
|
|
static void adm5120_dma_tx_init(struct adm5120_dma *dma, struct sk_buff **skbl, |
|
|
|
|
int num) |
|
|
|
|
{ |
|
|
|
|
memset(dma, 0, sizeof(struct adm5120_dma)*num); |
|
|
|
|
dma[num-1].data |= ADM5120_DMA_RINGEND; |
|
|
|
|
memset(skb, 0, sizeof(struct skb*)*num); |
|
|
|
|
memset(skbl, 0, sizeof(struct skb*)*num); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void adm5120_dma_rx_init(struct adm5120_dma *dma, struct sk_buff **skb, |
|
|
|
|
static void adm5120_dma_rx_init(struct adm5120_dma *dma, struct sk_buff **skbl, |
|
|
|
|
int num) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
memset(dma, 0, sizeof(struct adm5120_dma)*num); |
|
|
|
|
for (i=0; i<num; i++) { |
|
|
|
|
skb[i] = dev_alloc_skb(ADM5120_DMA_RXSIZE+16); |
|
|
|
|
if (!skb[i]) { |
|
|
|
|
skbl[i] = dev_alloc_skb(ADM5120_DMA_RXSIZE+16); |
|
|
|
|
if (!skbl[i]) { |
|
|
|
|
i=num; |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
skb_reserve(skb[i], 2); |
|
|
|
|
dma[i].data = ADM5120_DMA_ADDR(skb[i]->data) | ADM5120_DMA_OWN; |
|
|
|
|
dma[i].cntl = 0; |
|
|
|
|
dma[i].len = ADM5120_DMA_RXSIZE; |
|
|
|
|
dma[i].status = 0; |
|
|
|
|
skb_reserve(skbl[i], NET_IP_ALIGN); |
|
|
|
|
adm5120_rx_dma_update(&dma[i], skbl[i], (num-1==i)); |
|
|
|
|
} |
|
|
|
|
dma[i-1].data |= ADM5120_DMA_RINGEND; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int __init adm5120_sw_init(void) |
|
|
|
@ -433,7 +530,7 @@ static int __init adm5120_sw_init(void) |
|
|
|
|
adm5120_set_reg(ADM5120_CPUP_CONF, |
|
|
|
|
ADM5120_DISCCPUPORT | ADM5120_CRC_PADDING | |
|
|
|
|
ADM5120_DISUNALL | ADM5120_DISMCALL); |
|
|
|
|
adm5120_set_reg(ADM5120_PORT_CONF0, ADM5120_ENMC | ADM5120_ENBP); |
|
|
|
|
adm5120_set_reg(ADM5120_PORT_CONF0, ADM5120_ENMC | ADM5120_ENBP | ADM5120_PORTDISALL); |
|
|
|
|
|
|
|
|
|
adm5120_set_reg(ADM5120_PHY_CNTL2, adm5120_get_reg(ADM5120_PHY_CNTL2) | |
|
|
|
|
ADM5120_AUTONEG | ADM5120_NORMAL | ADM5120_AUTOMDIX); |
|
|
|
@ -457,8 +554,6 @@ static int __init adm5120_sw_init(void) |
|
|
|
|
adm5120_set_reg(ADM5120_RECEIVE_HBADDR, KSEG1ADDR(adm5120_dma_rxh)); |
|
|
|
|
adm5120_set_reg(ADM5120_RECEIVE_LBADDR, KSEG1ADDR(adm5120_dma_rxl)); |
|
|
|
|
|
|
|
|
|
adm5120_set_vlan(vlan_matrix); |
|
|
|
|
|
|
|
|
|
for (i=0; i<adm5120_nrdevs; i++) { |
|
|
|
|
adm5120_devs[i] = alloc_etherdev(sizeof(struct adm5120_sw)); |
|
|
|
|
if (!adm5120_devs[i]) { |
|
|
|
@ -481,6 +576,8 @@ static int __init adm5120_sw_init(void) |
|
|
|
|
dev->tx_timeout = adm5120_tx_timeout; |
|
|
|
|
dev->watchdog_timeo = ETH_TX_TIMEOUT; |
|
|
|
|
dev->set_mac_address = adm5120_sw_set_mac_address; |
|
|
|
|
dev->poll = adm5120_rx; |
|
|
|
|
dev->weight = 64; |
|
|
|
|
|
|
|
|
|
memcpy(dev->dev_addr, adm5120_eth_macs[i], 6); |
|
|
|
|
adm5120_write_mac(dev); |
|
|
|
@ -491,6 +588,9 @@ static int __init adm5120_sw_init(void) |
|
|
|
|
} |
|
|
|
|
printk(KERN_INFO "%s: ADM5120 switch port%d\n", dev->name, i); |
|
|
|
|
} |
|
|
|
|
/* setup vlan/port mapping after devs are filled up */ |
|
|
|
|
adm5120_set_vlan(vlan_matrix); |
|
|
|
|
|
|
|
|
|
adm5120_set_reg(ADM5120_CPUP_CONF, |
|
|
|
|
ADM5120_CRC_PADDING | ADM5120_DISUNALL | ADM5120_DISMCALL); |
|
|
|
|
|
|
|
|
|