diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/staging/sxg/sxg.c | 583 | ||||
-rw-r--r-- | drivers/staging/sxg/sxg.h | 313 | ||||
-rw-r--r-- | drivers/staging/sxg/sxg_os.h | 84 | ||||
-rw-r--r-- | drivers/staging/sxg/sxgdbg.h | 76 | ||||
-rw-r--r-- | drivers/staging/sxg/sxghif.h | 408 | ||||
-rw-r--r-- | drivers/staging/sxg/sxghw.h | 761 | ||||
-rw-r--r-- | drivers/staging/sxg/sxgphycode.h | 5 |
7 files changed, 1264 insertions, 966 deletions
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c index 815bfd2365e..679961f4233 100644 --- a/drivers/staging/sxg/sxg.c +++ b/drivers/staging/sxg/sxg.c @@ -82,28 +82,30 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size, enum sxg_buffer_type BufferType); -static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, void *RcvBlock, - dma_addr_t PhysicalAddress, - u32 Length); +static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, + void *RcvBlock, + dma_addr_t PhysicalAddress, + u32 Length); static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, struct sxg_scatter_gather *SxgSgl, dma_addr_t PhysicalAddress, u32 Length); static void sxg_mcast_init_crc32(void); - static int sxg_entry_open(struct net_device *dev); static int sxg_entry_halt(struct net_device *dev); static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev); static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb); -static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *SxgSgl); +static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, + struct sxg_scatter_gather *SxgSgl); static void sxg_handle_interrupt(struct adapter_t *adapter); static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId); static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId); static void sxg_complete_slow_send(struct adapter_t *adapter); -static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_event *Event); +static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, + struct sxg_event *Event); static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus); static bool sxg_mac_filter(struct adapter_t *adapter, struct ether_header *EtherHdr, ushort length); @@ -129,7 +131,8 @@ static int sxg_initialize_link(struct adapter_t *adapter); static int sxg_phy_init(struct adapter_t *adapter); static void sxg_link_event(struct adapter_t *adapter); static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter); -static void sxg_link_state(struct adapter_t *adapter, enum SXG_LINK_STATE LinkState); +static void sxg_link_state(struct adapter_t *adapter, + enum SXG_LINK_STATE LinkState); static int sxg_write_mdio_reg(struct adapter_t *adapter, u32 DevAddr, u32 RegAddr, u32 Value); static int sxg_read_mdio_reg(struct adapter_t *adapter, @@ -137,7 +140,8 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter, static unsigned int sxg_first_init = 1; static char *sxg_banner = - "Alacritech SLIC Technology(tm) Server and Storage 10Gbe Accelerator (Non-Accelerated)\n"; + "Alacritech SLIC Technology(tm) Server and Storage \ + 10Gbe Accelerator (Non-Accelerated)\n"; static int sxg_debug = 1; static int debug = -1; @@ -152,8 +156,10 @@ static u32 dynamic_intagg = 0; #define DRV_NAME "sxg" #define DRV_VERSION "1.0.1" #define DRV_AUTHOR "Alacritech, Inc. Engineering" -#define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver" -#define DRV_COPYRIGHT "Copyright 2000-2008 Alacritech, Inc. All rights reserved." +#define DRV_DESCRIPTION \ + "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver" +#define DRV_COPYRIGHT \ + "Copyright 2000-2008 Alacritech, Inc. All rights reserved." MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESCRIPTION); @@ -236,14 +242,15 @@ static struct sxg_trace_buffer *SxgTraceBuffer = NULL; * Return * int */ -static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL UcodeSel) +static bool sxg_download_microcode(struct adapter_t *adapter, + enum SXG_UCODE_SEL UcodeSel) { struct sxg_hw_regs *HwRegs = adapter->HwRegs; u32 Section; u32 ThisSectionSize; u32 *Instruction = NULL; u32 BaseAddress, AddressOffset, Address; -/* u32 Failure; */ + /* u32 Failure; */ u32 ValueRead; u32 i; u32 numSections = 0; @@ -289,7 +296,8 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL break; } BaseAddress = sectionStart[Section]; - ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */ + /* Size in instructions */ + ThisSectionSize = sectionSize[Section] / 12; for (AddressOffset = 0; AddressOffset < ThisSectionSize; AddressOffset++) { Address = BaseAddress + AddressOffset; @@ -333,7 +341,8 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL break; } BaseAddress = sectionStart[Section]; - ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */ + /* Size in instructions */ + ThisSectionSize = sectionSize[Section] / 12; for (AddressOffset = 0; AddressOffset < ThisSectionSize; AddressOffset++) { Address = BaseAddress + AddressOffset; @@ -346,7 +355,7 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL DBG_ERROR("sxg: %s PARITY ERROR\n", __func__); - return (FALSE); /* Parity error */ + return FALSE; /* Parity error */ } ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address); /* Read the instruction back and compare */ @@ -354,19 +363,19 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL if (ValueRead != *Instruction) { DBG_ERROR("sxg: %s MISCOMPARE LOW\n", __func__); - return (FALSE); /* Miscompare */ + return FALSE; /* Miscompare */ } READ_REG(HwRegs->UcodeDataMiddle, ValueRead); if (ValueRead != *(Instruction + 1)) { DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n", __func__); - return (FALSE); /* Miscompare */ + return FALSE; /* Miscompare */ } READ_REG(HwRegs->UcodeDataHigh, ValueRead); if (ValueRead != *(Instruction + 2)) { DBG_ERROR("sxg: %s MISCOMPARE HIGH\n", __func__); - return (FALSE); /* Miscompare */ + return FALSE; /* Miscompare */ } /* Advance 3 u32S to start of next instruction */ Instruction += 3; @@ -391,7 +400,7 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL if (i == 10000) { DBG_ERROR("sxg: %s TIMEOUT\n", __func__); - return (FALSE); /* Timeout */ + return FALSE; /* Timeout */ } /* * Now write the LoadSync register. This is used to @@ -413,18 +422,17 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL * sxg_allocate_resources - Allocate memory and locks * * Arguments - - * adapter - A pointer to our adapter structure + * adapter - A pointer to our adapter structure * - * Return - * int + * Return - int */ static int sxg_allocate_resources(struct adapter_t *adapter) { int status; u32 i; u32 RssIds, IsrCount; -/* struct sxg_xmt_ring *XmtRing; */ -/* struct sxg_rcv_ring *RcvRing; */ + /* struct sxg_xmt_ring *XmtRing; */ + /* struct sxg_rcv_ring *RcvRing; */ DBG_ERROR("%s ENTER\n", __func__); @@ -470,14 +478,15 @@ static int sxg_allocate_resources(struct adapter_t *adapter) (unsigned int)(sizeof(struct sxg_xmt_ring) * 1)); /* - * Start with big items first - receive and transmit rings. At the moment - * I'm going to keep the ring size fixed and adjust the - * TCBs if we fail. Later we might consider reducing the ring size as well.. + * Start with big items first - receive and transmit rings. + * At the moment I'm going to keep the ring size fixed and + * adjust the TCBs if we fail. Later we might + * consider reducing the ring size as well.. */ adapter->XmtRings = pci_alloc_consistent(adapter->pcidev, - sizeof(struct sxg_xmt_ring) * - 1, - &adapter->PXmtRings); + sizeof(struct sxg_xmt_ring) * + 1, + &adapter->PXmtRings); DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings); if (!adapter->XmtRings) { @@ -533,15 +542,15 @@ static int sxg_allocate_resources(struct adapter_t *adapter) * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK */ for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS; - i += SXG_RCV_DESCRIPTORS_PER_BLOCK) { - sxg_allocate_buffer_memory(adapter, - SXG_RCV_BLOCK_SIZE(adapter-> - ReceiveBufferSize), + i += SXG_RCV_DESCRIPTORS_PER_BLOCK) { + sxg_allocate_buffer_memory(adapter, + SXG_RCV_BLOCK_SIZE(adapter->ReceiveBufferSize), SXG_BUFFER_TYPE_RCV); } /* - * NBL resource allocation can fail in the 'AllocateComplete' routine, which - * doesn't return status. Make sure we got the number of buffers we requested + * NBL resource allocation can fail in the 'AllocateComplete' routine, + * which doesn't return status. Make sure we got the number of buffers + * we requested */ if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) { SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6", @@ -555,12 +564,13 @@ static int sxg_allocate_resources(struct adapter_t *adapter) /* Allocate event queues. */ adapter->EventRings = pci_alloc_consistent(adapter->pcidev, - sizeof(struct sxg_event_ring) * - RssIds, - &adapter->PEventRings); + sizeof(struct sxg_event_ring) * + RssIds, + &adapter->PEventRings); if (!adapter->EventRings) { - /* Caller will call SxgFreeAdapter to clean up above allocations */ + /* Caller will call SxgFreeAdapter to clean up above + * allocations */ SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8", adapter, SXG_MAX_ENTRIES, 0, 0); status = STATUS_RESOURCES; @@ -573,7 +583,8 @@ static int sxg_allocate_resources(struct adapter_t *adapter) adapter->Isr = pci_alloc_consistent(adapter->pcidev, IsrCount, &adapter->PIsr); if (!adapter->Isr) { - /* Caller will call SxgFreeAdapter to clean up above allocations */ + /* Caller will call SxgFreeAdapter to clean up above + * allocations */ SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9", adapter, SXG_MAX_ENTRIES, 0, 0); status = STATUS_RESOURCES; @@ -620,12 +631,19 @@ static void sxg_config_pci(struct pci_dev *pcidev) pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command); /* Set the command register */ - new_command = pci_command | (PCI_COMMAND_MEMORY | /* Memory Space Enable */ - PCI_COMMAND_MASTER | /* Bus master enable */ - PCI_COMMAND_INVALIDATE | /* Memory write and invalidate */ - PCI_COMMAND_PARITY | /* Parity error response */ - PCI_COMMAND_SERR | /* System ERR */ - PCI_COMMAND_FAST_BACK); /* Fast back-to-back */ + new_command = pci_command | ( + /* Memory Space Enable */ + PCI_COMMAND_MEMORY | + /* Bus master enable */ + PCI_COMMAND_MASTER | + /* Memory write and invalidate */ + PCI_COMMAND_INVALIDATE | + /* Parity error response */ + PCI_COMMAND_PARITY | + /* System ERR */ + PCI_COMMAND_SERR | + /* Fast back-to-back */ + PCI_COMMAND_FAST_BACK); if (pci_command != new_command) { DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n", __func__, pci_command, new_command); @@ -633,7 +651,8 @@ static void sxg_config_pci(struct pci_dev *pcidev) } } -static unsigned char temp_mac_address[6] = { 0x00, 0xab, 0xcd, 0xef, 0x12, 0x69 }; +static unsigned char temp_mac_address[6] = + { 0x00, 0xab, 0xcd, 0xef, 0x12, 0x69 }; /* * sxg_read_config * @adapter : Pointer to the adapter structure for the card @@ -647,13 +666,15 @@ static inline int sxg_read_config(struct adapter_t *adapter) unsigned long status; unsigned long i; - data = pci_alloc_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), &p_addr); + data = pci_alloc_consistent(adapter->pcidev, + sizeof(struct sw_cfg_data), &p_addr); if(!data) { /* * We cant get even this much memory. Raise a hell * Get out of here */ - printk(KERN_ERR"%s : Could not allocate memory for reading EEPROM\n", __FUNCTION__); + printk(KERN_ERR"%s : Could not allocate memory for reading \ + EEPROM\n", __FUNCTION__); return -ENOMEM; } @@ -668,22 +689,26 @@ static inline int sxg_read_config(struct adapter_t *adapter) } switch(status) { - case SXG_CFG_LOAD_EEPROM: /*Config read from EEPROM succeeded */ - case SXG_CFG_LOAD_FLASH: /* onfig read from Flash succeeded */ - /* Copy the MAC address to adapter structure */ - memcpy(temp_mac_address, data->MacAddr[0].MacAddr, 6); - /* TODO: We are not doing the remaining part : FRU, etc */ - break; - - case SXG_CFG_TIMEOUT: - case SXG_CFG_LOAD_INVALID: - case SXG_CFG_LOAD_ERROR: - default: /* Fix default handler later */ - printk(KERN_WARNING"%s : We could not read the config word." - "Status = %ld\n", __FUNCTION__, status); - break; + /* Config read from EEPROM succeeded */ + case SXG_CFG_LOAD_EEPROM: + /* Config read from Flash succeeded */ + case SXG_CFG_LOAD_FLASH: + /* Copy the MAC address to adapter structure */ + memcpy(temp_mac_address, data->MacAddr[0].MacAddr, 6); + /* TODO: We are not doing the remaining part : FRU, + * etc + */ + break; + case SXG_CFG_TIMEOUT: + case SXG_CFG_LOAD_INVALID: + case SXG_CFG_LOAD_ERROR: + default: /* Fix default handler later */ + printk(KERN_WARNING"%s : We could not read the config \ + word. Status = %ld\n", __FUNCTION__, status); + break; } - pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data, p_addr); + pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data, + p_addr); if (adapter->netdev) { memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6); @@ -782,9 +807,9 @@ static int sxg_entry_probe(struct pci_dev *pcidev, goto err_out_free_mmio_region; } - DBG_ERROR - ("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] len[%lx], IRQ %d.\n", - __func__, memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq); + DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \ + len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start, + mmio_len, pcidev->irq); adapter->HwRegs = (void *)memmapped_ioaddr; adapter->base_addr = memmapped_ioaddr; @@ -832,12 +857,12 @@ static int sxg_entry_probe(struct pci_dev *pcidev, adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE; } -/* - * status = SXG_READ_EEPROM(adapter); - * if (!status) { - * goto sxg_init_bad; - * } - */ + /* + * status = SXG_READ_EEPROM(adapter); + * if (!status) { + * goto sxg_init_bad; + * } + */ DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__); sxg_config_pci(pcidev); @@ -894,7 +919,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev, netdev->set_multicast_list = sxg_mcast_set_list; strcpy(netdev->name, "eth%d"); -/* strcpy(netdev->name, pci_name(pcidev)); */ + /* strcpy(netdev->name, pci_name(pcidev)); */ if ((err = register_netdev(netdev))) { DBG_ERROR("Cannot register net device, aborting. %s\n", netdev->name); @@ -902,14 +927,15 @@ static int sxg_entry_probe(struct pci_dev *pcidev, } DBG_ERROR - ("sxg: %s addr 0x%lx, irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", + ("sxg: %s addr 0x%lx, irq %d, MAC addr \ + %02X:%02X:%02X:%02X:%02X:%02X\n", netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); -/*sxg_init_bad: */ + /* sxg_init_bad: */ ASSERT(status == FALSE); -/* sxg_free_adapter(adapter); */ + /* sxg_free_adapter(adapter); */ DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__, status, jiffies, smp_processor_id()); @@ -991,18 +1017,17 @@ static void sxg_enable_interrupt(struct adapter_t *adapter) * sxg_isr - Process an line-based interrupt * * Arguments: - * Context - Our adapter structure + * Context - Our adapter structure * QueueDefault - Output parameter to queue to default CPU - * TargetCpus - Output bitmap to schedule DPC's + * TargetCpus - Output bitmap to schedule DPC's * - * Return Value: - * TRUE if our interrupt + * Return Value: TRUE if our interrupt */ static irqreturn_t sxg_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); -/* u32 CpuMask = 0, i; */ + /* u32 CpuMask = 0, i; */ adapter->Stats.NumInts++; if (adapter->Isr[0] == 0) { @@ -1023,7 +1048,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id) adapter->IsrCopy[0] = adapter->Isr[0]; adapter->Isr[0] = 0; WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE); -/* ASSERT(adapter->IsrDpcsPending == 0); */ + /* ASSERT(adapter->IsrDpcsPending == 0); */ #if XXXTODO /* RSS Stuff */ /* * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then @@ -1033,7 +1058,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id) for (i = 0; i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount; i++) { - struct sxg_event_ring *EventRing = &adapter->EventRings[i]; + struct sxg_event_ring *EventRing = + &adapter->EventRings[i]; struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[i]]; unsigned char Cpu = @@ -1044,7 +1070,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id) } } } - /* Now, either schedule the CPUs specified by the CpuMask, + /* + * Now, either schedule the CPUs specified by the CpuMask, * or queue default */ if (CpuMask) { @@ -1065,7 +1092,7 @@ int debug_inthandler = 0; static void sxg_handle_interrupt(struct adapter_t *adapter) { -/* unsigned char RssId = 0; */ + /* unsigned char RssId = 0; */ u32 NewIsr; if (++debug_inthandler < 20) { @@ -1154,10 +1181,12 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId) } /* Card crash */ if (Isr & SXG_ISR_DEAD) { - /* Set aside the crash info and set the adapter state to RESET */ - adapter->CrashCpu = - (unsigned char)((Isr & SXG_ISR_CPU) >> - SXG_ISR_CPU_SHIFT); + /* + * Set aside the crash info and set the adapter state + * to RESET + */ + adapter->CrashCpu = (unsigned char) + ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT); adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH); adapter->Dead = TRUE; DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__, @@ -1188,7 +1217,8 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId) } /* Dump */ if (Isr & SXG_ISR_UPC) { - ASSERT(adapter->DumpCmdRunning); /* Maybe change when debug is added.. */ + /* Maybe change when debug is added.. */ + ASSERT(adapter->DumpCmdRunning); adapter->DumpCmdRunning = FALSE; } /* Link event */ @@ -1199,8 +1229,8 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId) if (Isr & SXG_ISR_BREAK) { /* * At the moment AGDB isn't written to support interactive - * debug sessions. When it is, this interrupt will be used - * to signal AGDB that it has hit a breakpoint. For now, ASSERT. + * debug sessions. When it is, this interrupt will be used to + * signal AGDB that it has hit a breakpoint. For now, ASSERT. */ ASSERT(0); } @@ -1261,7 +1291,8 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId) adapter->NextEvent); switch (Event->Code) { case EVENT_CODE_BUFFERS: - ASSERT(!(Event->CommandIndex & 0xFF00)); /* struct sxg_ring_info Head & Tail == unsigned char */ + /* struct sxg_ring_info Head & Tail == unsigned char */ + ASSERT(!(Event->CommandIndex & 0xFF00)); sxg_complete_descriptor_blocks(adapter, Event->CommandIndex); break; @@ -1279,8 +1310,9 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId) * capability of an indication list. */ #else -/* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */ - rx_bytes = Event->Length; /* (rcvbuf->length & IRHDDR_FLEN_MSK); */ + /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */ + /* (rcvbuf->length & IRHDDR_FLEN_MSK); */ + rx_bytes = Event->Length; adapter->stats.rx_packets++; adapter->stats.rx_bytes += rx_bytes; #if SXG_OFFLOAD_IP_CHECKSUM @@ -1294,7 +1326,7 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId) default: DBG_ERROR("%s: ERROR Invalid EventCode %d\n", __func__, Event->Code); -/* ASSERT(0); */ + /* ASSERT(0); */ } /* * See if we need to restock card receive buffers. @@ -1404,7 +1436,8 @@ static void sxg_complete_slow_send(struct adapter_t *adapter) case SXG_SGL_DUMB: { struct sk_buff *skb; - struct sxg_scatter_gather *SxgSgl = (struct sxg_scatter_gather *)ContextType; + struct sxg_scatter_gather *SxgSgl = + (struct sxg_scatter_gather *)ContextType; /* Dumb-nic send. Command context is the dumb-nic SGL */ skb = (struct sk_buff *)ContextType; @@ -1415,13 +1448,14 @@ static void sxg_complete_slow_send(struct adapter_t *adapter) 0, 0); printk("ASK:sxg_complete_slow_send: freeing an skb [%p]\n", skb); ASSERT(adapter->Stats.XmtQLen); - adapter->Stats.XmtQLen--; /* within XmtZeroLock */ + adapter->Stats.XmtQLen--;/* within XmtZeroLock */ adapter->Stats.XmtOk++; /* - * Now drop the lock and complete the send back to - * Microsoft. We need to drop the lock because - * Microsoft can come back with a chimney send, which - * results in a double trip in SxgTcpOuput + * Now drop the lock and complete the send + * back to Microsoft. We need to drop the lock + * because Microsoft can come back with a + * chimney send, which results in a double trip + * in SxgTcpOuput */ spin_unlock(&adapter->XmtZeroLock); SXG_COMPLETE_DUMB_SEND(adapter, skb); @@ -1445,10 +1479,10 @@ static void sxg_complete_slow_send(struct adapter_t *adapter) * adapter - A pointer to our adapter structure * Event - Receive event * - * Return - * skb + * Return - skb */ -static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_event *Event) +static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, + struct sxg_event *Event) { struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; struct sk_buff *Packet; @@ -1476,12 +1510,15 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev goto drop; } - printk("ASK:sxg_slow_receive: event host handle %p\n", RcvDataBufferHdr); + printk("ASK:sxg_slow_receive:event host handle %p\n", RcvDataBufferHdr); data = SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr); for (i = 0; i < 32; i++) dptr += sprintf(dptr, "%02x ", (unsigned)data[i]); printk("ASK:sxg_slow_receive: data %s\n", dstr); - /* memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), RcvDataBufferHdr->VirtualAddress, Event->Length);*/ + /* + * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), + * RcvDataBufferHdr->VirtualAddress, Event->Length); + */ /* Change buffer state to UPSTREAM */ RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; @@ -1498,8 +1535,9 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev } #if XXXTODO /* VLAN stuff */ /* If there's a VLAN tag, extract it and validate it */ - if (((struct ether_header*) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))-> - EtherType == ETHERTYPE_VLAN) { + if (((struct ether_header *) + (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType + == ETHERTYPE_VLAN) { if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) != STATUS_SUCCESS) { SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, @@ -1526,7 +1564,8 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev Packet = RcvDataBufferHdr->SxgDumbRcvPacket; SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event); Packet->protocol = eth_type_trans(Packet, adapter->netdev); - printk("ASK:sxg_slow_receive: protocol %x\n", (unsigned) Packet->protocol); + printk("ASK:sxg_slow_receive: protocol %x\n", + (unsigned) Packet->protocol); SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv", RcvDataBufferHdr, Packet, Event->Length, 0); @@ -1554,8 +1593,7 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev * adapter - Adapter structure * ErrorStatus - 4-byte receive error status * - * Return Value: - * None + * Return Value : None */ static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus) { @@ -1633,11 +1671,10 @@ static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus) * pether - Ethernet header * length - Frame length * - * Return Value: - * TRUE if the frame is to be allowed + * Return Value : TRUE if the frame is to be allowed */ -static bool sxg_mac_filter(struct adapter_t *adapter, struct ether_header *EtherHdr, - ushort length) +static bool sxg_mac_filter(struct adapter_t *adapter, + struct ether_header *EtherHdr, ushort length) { bool EqualAddr; @@ -1927,17 +1964,19 @@ static int sxg_entry_halt(struct net_device *dev) static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { ASSERT(rq); -/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */ +/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/ switch (cmd) { case SIOCSLICSETINTAGG: { -/* struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); */ + /* struct adapter_t *adapter = (struct adapter_t *) + * netdev_priv(dev); + */ u32 data[7]; u32 intagg; if (copy_from_user(data, rq->ifr_data, 28)) { - DBG_ERROR - ("copy_from_user FAILED getting initial params\n"); + DBG_ERROR("copy_from_user FAILED getting \ + initial params\n"); return -EFAULT; } intagg = data[0]; @@ -1948,7 +1987,7 @@ static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } default: -/* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */ + /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */ return -EOPNOTSUPP; } return 0; @@ -1960,8 +1999,8 @@ static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) * sxg_send_packets - Send a skb packet * * Arguments: - * skb - The packet to send - * dev - Our linux net device that refs our adapter + * skb - The packet to send + * dev - Our linux net device that refs our adapter * * Return: * 0 regardless of outcome XXXTODO refer to e1000 driver @@ -2013,7 +2052,7 @@ static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev) /* reject & complete all the packets if they cant be sent */ if (status != STATUS_SUCCESS) { #if XXXTODO -/* sxg_send_packets_fail(adapter, skb, status); */ + /* sxg_send_packets_fail(adapter, skb, status); */ #else SXG_DROP_DUMB_SEND(adapter, skb); adapter->stats.tx_dropped++; @@ -2035,8 +2074,7 @@ static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev) * adapter - Pointer to our adapter structure * skb - The packet to be sent * - * Return - - * STATUS of send + * Return - STATUS of send */ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb) { @@ -2072,7 +2110,7 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb) /* Call the common sxg_dumb_sgl routine to complete the send. */ sxg_dumb_sgl(pSgl, SxgSgl); - /* Return success sxg_dumb_sgl (or something later) will complete it. */ + /* Return success sxg_dumb_sgl (or something later) will complete it.*/ return (STATUS_SUCCESS); } @@ -2086,7 +2124,8 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb) * Return Value: * None. */ -static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *SxgSgl) +static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, + struct sxg_scatter_gather *SxgSgl) { struct adapter_t *adapter = SxgSgl->adapter; struct sk_buff *skb = SxgSgl->DumbPacket; @@ -2094,10 +2133,10 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0]; struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo; struct sxg_cmd *XmtCmd = NULL; -/* u32 Index = 0; */ + /* u32 Index = 0; */ u32 DataLength = skb->len; -/* unsigned int BufLen; */ -/* u32 SglOffset; */ + /* unsigned int BufLen; */ + /* u32 SglOffset; */ u64 phys_addr; unsigned char*data; int i; @@ -2167,8 +2206,7 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx * Fill in the command * Copy out the first SGE to the command and adjust for offset */ - phys_addr = - pci_map_single(adapter->pcidev, skb->data, skb->len, + phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE); memset(XmtCmd, '\0', sizeof(*XmtCmd)); XmtCmd->Buffer.FirstSgeAddress = phys_addr; @@ -2210,8 +2248,8 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx adapter->Stats.XmtErrors++; SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal", pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail); - - SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); /* SxgSgl->DumbPacket is the skb */ + /* SxgSgl->DumbPacket is the skb */ + SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); } /* @@ -2277,15 +2315,24 @@ static int sxg_initialize_link(struct adapter_t *adapter) WRITE_REG(HwRegs->MacConfig0, 0, TRUE); /* Configure MAC */ - WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | /* Allow sending of pause */ - AXGMAC_CFG1_XMT_EN | /* Enable XMT */ - AXGMAC_CFG1_RCV_PAUSE | /* Enable detection of pause */ - AXGMAC_CFG1_RCV_EN | /* Enable receive */ - AXGMAC_CFG1_SHORT_ASSERT | /* short frame detection */ - AXGMAC_CFG1_CHECK_LEN | /* Verify frame length */ - AXGMAC_CFG1_GEN_FCS | /* Generate FCS */ - AXGMAC_CFG1_PAD_64), /* Pad frames to 64 bytes */ - TRUE); + WRITE_REG(HwRegs->MacConfig1, ( + /* Allow sending of pause */ + AXGMAC_CFG1_XMT_PAUSE | + /* Enable XMT */ + AXGMAC_CFG1_XMT_EN | + /* Enable detection of pause */ + AXGMAC_CFG1_RCV_PAUSE | + /* Enable receive */ + AXGMAC_CFG1_RCV_EN | + /* short frame detection */ + AXGMAC_CFG1_SHORT_ASSERT | + /* Verify frame length */ + AXGMAC_CFG1_CHECK_LEN | + /* Generate FCS */ + AXGMAC_CFG1_GEN_FCS | + /* Pad frames to 64 bytes */ + AXGMAC_CFG1_PAD_64), + TRUE); /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */ if (adapter->JumboEnabled) { @@ -2314,15 +2361,20 @@ static int sxg_initialize_link(struct adapter_t *adapter) /* * Per information given by Aeluros, wait 100 ms after removing reset. - * It's not enough to wait for the self-clearing reset bit in reg 0 to clear. + * It's not enough to wait for the self-clearing reset bit in reg 0 to + * clear. */ mdelay(100); - /* Verify the PHY has come up by checking that the Reset bit has cleared. */ - status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ - PHY_PMA_CONTROL1, /* PMA/PMD control register */ - &Value); - DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value, (Value & PMA_CONTROL1_RESET)); + /* Verify the PHY has come up by checking that the Reset bit has + * cleared. + */ + status = sxg_read_mdio_reg(adapter, + MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ + PHY_PMA_CONTROL1, /* PMA/PMD control register */ + &Value); + DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value, + (Value & PMA_CONTROL1_RESET)); if (status != STATUS_SUCCESS) return (STATUS_FAILURE); if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */ @@ -2343,16 +2395,26 @@ static int sxg_initialize_link(struct adapter_t *adapter) return (STATUS_FAILURE); /* Enable the Link Alarm */ - status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ - LASI_CONTROL, /* LASI control register */ - LASI_CTL_LS_ALARM_ENABLE); /* enable link alarm bit */ + + /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module + * LASI_CONTROL - LASI control register + * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit + */ + status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, + LASI_CONTROL, + LASI_CTL_LS_ALARM_ENABLE); if (status != STATUS_SUCCESS) return (STATUS_FAILURE); /* XXXTODO - temporary - verify bit is set */ - status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ - LASI_CONTROL, /* LASI control register */ + + /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module + * LASI_CONTROL - LASI control register + */ + status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, + LASI_CONTROL, &Value); + if (status != STATUS_SUCCESS) return (STATUS_FAILURE); if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) { @@ -2397,16 +2459,20 @@ static int sxg_phy_init(struct adapter_t *adapter) DBG_ERROR("ENTER %s\n", __func__); - /* Read a register to identify the PHY type */ - status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ - 0xC205, /* PHY ID register (?) */ - &Value); /* XXXTODO - add def */ + /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module + * 0xC205 - PHY ID register (?) + * &Value - XXXTODO - add def + */ + status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, + 0xC205, + &Value); if (status != STATUS_SUCCESS) return (STATUS_FAILURE); - if (Value == 0x0012) { /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */ - DBG_ERROR - ("AEL2005C PHY detected. Downloading PHY microcode.\n"); + if (Value == 0x0012) { + /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */ + DBG_ERROR("AEL2005C PHY detected. Downloading PHY \ + microcode.\n"); /* Initialize AEL2005C PHY and download PHY microcode */ for (p = PhyUcode; p->Addr != 0xFFFF; p++) { @@ -2414,10 +2480,13 @@ static int sxg_phy_init(struct adapter_t *adapter) /* if address == 0, data == sleep time in ms */ mdelay(p->Data); } else { - /* write the given data to the specified address */ - status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ - p->Addr, /* PHY address */ - p->Data); /* PHY data */ + /* write the given data to the specified address */ + status = sxg_write_mdio_reg(adapter, + MIIM_DEV_PHY_PMA, + /* PHY address */ + p->Addr, + /* PHY data */ + p->Data); if (status != STATUS_SUCCESS) return (STATUS_FAILURE); } @@ -2458,13 +2527,15 @@ static void sxg_link_event(struct adapter_t *adapter) mdelay(10); /* Now clear the alarm by reading the LASI status register. */ - status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ - LASI_STATUS, /* LASI status register */ + /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */ + status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, + /* LASI status register */ + LASI_STATUS, &Value); if (status != STATUS_SUCCESS) { DBG_ERROR("Error reading LASI Status MDIO register!\n"); sxg_link_state(adapter, SXG_LINK_DOWN); -/* ASSERT(0); */ + /* ASSERT(0); */ } ASSERT(Value & LASI_STATUS_LS_ALARM); @@ -2483,7 +2554,7 @@ static void sxg_link_event(struct adapter_t *adapter) */ DBG_ERROR("SXG: sxg_link_event: Can't get here!\n"); DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value); -/* ASSERT(0); */ + /* ASSERT(0); */ } DBG_ERROR("EXIT %s\n", __func__); @@ -2512,8 +2583,11 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if * the following 3 bits (from 3 different MDIO registers) are all true. */ - status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ - PHY_PMA_RCV_DET, /* PMA/PMD Receive Signal Detect register */ + + /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */ + status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, + /* PMA/PMD Receive Signal Detect register */ + PHY_PMA_RCV_DET, &Value); if (status != STATUS_SUCCESS) goto bad; @@ -2522,8 +2596,10 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) if (!(Value & PMA_RCV_DETECT)) return (SXG_LINK_DOWN); - status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, /* PHY PCS module */ - PHY_PCS_10G_STATUS1, /* PCS 10GBASE-R Status 1 register */ + /* MIIM_DEV_PHY_PCS - PHY PCS module */ + status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, + /* PCS 10GBASE-R Status 1 register */ + PHY_PCS_10G_STATUS1, &Value); if (status != STATUS_SUCCESS) goto bad; @@ -2532,8 +2608,9 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) if (!(Value & PCS_10B_BLOCK_LOCK)) return (SXG_LINK_DOWN); - status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, /* PHY XS module */ - PHY_XS_LANE_STATUS, /* XS Lane Status register */ + status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */ + /* XS Lane Status register */ + PHY_XS_LANE_STATUS, &Value); if (status != STATUS_SUCCESS) goto bad; @@ -2548,7 +2625,7 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) return (SXG_LINK_UP); bad: - /* An error occurred reading an MDIO register. This shouldn't happen. */ + /* An error occurred reading an MDIO register. This shouldn't happen. */ DBG_ERROR("Error reading an MDIO register!\n"); ASSERT(0); return (SXG_LINK_DOWN); @@ -2581,7 +2658,8 @@ static void sxg_indicate_link_state(struct adapter_t *adapter, * Return * None */ -static void sxg_link_state(struct adapter_t *adapter, enum SXG_LINK_STATE LinkState) +static void sxg_link_state(struct adapter_t *adapter, + enum SXG_LINK_STATE LinkState) { SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT", adapter, LinkState, adapter->LinkState, adapter->State); @@ -2596,7 +2674,8 @@ static void sxg_link_state(struct adapter_t *adapter, enum SXG_LINK_STATE LinkSt if (LinkState == adapter->LinkState) { /* Nothing changed.. */ spin_unlock(&adapter->AdapterLock); - DBG_ERROR("EXIT #0 %s\n", __func__); + DBG_ERROR("EXIT #0 %s. Link status = %d\n", + __func__, LinkState); return; } /* Save the adapter state */ @@ -2625,13 +2704,15 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter, u32 DevAddr, u32 RegAddr, u32 Value) { struct sxg_hw_regs *HwRegs = adapter->HwRegs; - u32 AddrOp; /* Address operation (written to MIIM field reg) */ - u32 WriteOp; /* Write operation (written to MIIM field reg) */ - u32 Cmd; /* Command (written to MIIM command reg) */ + /* Address operation (written to MIIM field reg) */ + u32 AddrOp; + /* Write operation (written to MIIM field reg) */ + u32 WriteOp; + u32 Cmd;/* Command (written to MIIM command reg) */ u32 ValueRead; u32 Timeout; -/* DBG_ERROR("ENTER %s\n", __func__); */ + /* DBG_ERROR("ENTER %s\n", __func__); */ SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", adapter, 0, 0, 0); @@ -2694,7 +2775,7 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter, } } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); -/* DBG_ERROR("EXIT %s\n", __func__); */ + /* DBG_ERROR("EXIT %s\n", __func__); */ return (STATUS_SUCCESS); } @@ -2706,7 +2787,7 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter, * adapter - A pointer to our adapter structure * DevAddr - MDIO device number being addressed * RegAddr - register address for the specified MDIO device - * pValue - pointer to where to put data read from the MDIO register + * pValue - pointer to where to put data read from the MDIO register * * Return * status @@ -2715,15 +2796,15 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter, u32 DevAddr, u32 RegAddr, u32 *pValue) { struct sxg_hw_regs *HwRegs = adapter->HwRegs; - u32 AddrOp; /* Address operation (written to MIIM field reg) */ - u32 ReadOp; /* Read operation (written to MIIM field reg) */ - u32 Cmd; /* Command (written to MIIM command reg) */ + u32 AddrOp; /* Address operation (written to MIIM field reg) */ + u32 ReadOp; /* Read operation (written to MIIM field reg) */ + u32 Cmd; /* Command (written to MIIM command reg) */ u32 ValueRead; u32 Timeout; SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", adapter, 0, 0, 0); - DBG_ERROR("ENTER %s\n", __FUNCTION__); + DBG_ERROR("ENTER %s\n", __FUNCTION__); /* Ensure values don't exceed field width */ DevAddr &= 0x001F; /* 5-bit field */ @@ -2790,7 +2871,7 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter, READ_REG(HwRegs->MacAmiimField, *pValue); *pValue &= 0xFFFF; /* data is in the lower 16 bits */ - DBG_ERROR("EXIT %s\n", __FUNCTION__); + DBG_ERROR("EXIT %s\n", __FUNCTION__); return (STATUS_SUCCESS); } @@ -2799,21 +2880,21 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter, * Functions to obtain the CRC corresponding to the destination mac address. * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using * the polynomial: - * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1. + * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + * + x^4 + x^2 + x^1. * - * After the CRC for the 6 bytes is generated (but before the value is complemented), - * we must then transpose the value and return bits 30-23. + * After the CRC for the 6 bytes is generated (but before the value is + * complemented), we must then transpose the value and return bits 30-23. */ -static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */ +static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */ +static u32 sxg_crc_init; /* Is table initialized */ -/* - * Contruct the CRC32 table - */ +/* Contruct the CRC32 table */ static void sxg_mcast_init_crc32(void) { - u32 c; /* CRC shit reg */ - u32 e = 0; /* Poly X-or pattern */ - int i; /* counter */ + u32 c; /* CRC shit reg */ + u32 e = 0; /* Poly X-or pattern */ + int i; /* counter */ int k; /* byte being shifted into crc */ static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 }; @@ -2831,7 +2912,6 @@ static void sxg_mcast_init_crc32(void) } } -static u32 sxg_crc_init; /* Is table initialized */ /* * Return the MAC hast as described above. */ @@ -2870,19 +2950,23 @@ static void sxg_mcast_set_mask(struct adapter_t *adapter) if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) { /* - * Turn on all multicast addresses. We have to do this for promiscuous - * mode as well as ALLMCAST mode. It saves the Microcode from having - * to keep state about the MAC configuration. + * Turn on all multicast addresses. We have to do this for + * promiscuous mode as well as ALLMCAST mode. It saves the + * Microcode from having keep state about the MAC configuration + */ + /* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n + * SLUT MODE!!!\n",__func__); */ -/* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__func__); */ WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH); WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH); -/* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */ + /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \ + * 0xFFFFFFFF\n",__func__, adapter->netdev->name); + */ } else { /* - * Commit our multicast mast to the SLIC by writing to the multicast - * address mask registers + * Commit our multicast mast to the SLIC by writing to the + * multicast address mask registers */ DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n", __func__, adapter->netdev->name, @@ -3173,7 +3257,8 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter, } /* - * sxg_allocate_rcvblock_complete - Complete a receive descriptor block allocation + * sxg_allocate_rcvblock_complete - Complete a receive descriptor + * block allocation * * Arguments - * adapter - A pointer to our adapter structure @@ -3223,16 +3308,18 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr*) (RcvDataBuffer + - SXG_RCV_DATA_BUFFER_HDR_OFFSET - (BufferSize)); + SXG_RCV_DATA_BUFFER_HDR_OFFSET + (BufferSize)); RcvDataBufferHdr->VirtualAddress = RcvDataBuffer; - RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; /* For FREE macro assertion */ + /* For FREE macro assertion */ + RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; RcvDataBufferHdr->Size = SXG_RCV_BUFFER_DATA_SIZE(BufferSize); SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr); /* ASK hardcoded 2048 */ - RcvDataBufferHdr->PhysicalAddress = pci_map_single(adapter->pcidev, + RcvDataBufferHdr->PhysicalAddress = + pci_map_single(adapter->pcidev, RcvDataBufferHdr->SxgDumbRcvPacket->data, 2048, PCI_DMA_FROMDEVICE); @@ -3255,13 +3342,14 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList); spin_unlock(&adapter->RcvQLock); - /* Now free the contained receive data buffers that we initialized above */ + /* Now free the contained receive data buffers that we + * initialized above */ RcvDataBuffer = RcvBlock; for (i = 0, Paddr = PhysicalAddress; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) { - RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr*) (RcvDataBuffer + - SXG_RCV_DATA_BUFFER_HDR_OFFSET + RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr*) + (RcvDataBuffer + SXG_RCV_DATA_BUFFER_HDR_OFFSET (BufferSize)); spin_lock(&adapter->RcvQLock); SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); @@ -3285,7 +3373,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk", adapter, RcvBlock, Length, 0); return; - fail: +fail: /* Free any allocated resources */ if (RcvBlock) { RcvDataBuffer = RcvBlock; @@ -3293,7 +3381,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, i++, RcvDataBuffer += BufferSize) { RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) (RcvDataBuffer + - SXG_RCV_DATA_BUFFER_HDR_OFFSET + SXG_RCV_DATA_BUFFER_HDR_OFFSET (BufferSize)); SXG_FREE_RCV_PACKET(RcvDataBufferHdr); } @@ -3328,8 +3416,10 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, spin_lock(&adapter->SglQLock); adapter->AllSglBufferCount++; memset(SxgSgl, 0, sizeof(struct sxg_scatter_gather)); - SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */ - SxgSgl->adapter = adapter; /* Initialize backpointer once */ + /* *PhysicalAddress; */ + SxgSgl->PhysicalAddress = PhysicalAddress; + /* Initialize backpointer once */ + SxgSgl->adapter = adapter; InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); spin_unlock(&adapter->SglQLock); SxgSgl->State = SXG_BUFFER_BUSY; @@ -3341,15 +3431,21 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, static void sxg_adapter_set_hwaddr(struct adapter_t *adapter) { -/* - * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, - * card->config_set, adapter->port, adapter->physport, adapter->functionnumber); - * sxg_dbg_macaddrs(adapter); - */ + /* + * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \ + * funct#[%d]\n", __func__, card->config_set, + * adapter->port, adapter->physport, adapter->functionnumber); + * + * sxg_dbg_macaddrs(adapter); + */ + memcpy(adapter->macaddr, temp_mac_address, + sizeof(struct sxg_config_mac)); + /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", + * __FUNCTION__); + */ + + /* sxg_dbg_macaddrs(adapter); */ - memcpy(adapter->macaddr, temp_mac_address, sizeof(struct sxg_config_mac)); -/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */ -/* sxg_dbg_macaddrs(adapter); */ if (!(adapter->currmacaddr[0] || adapter->currmacaddr[1] || adapter->currmacaddr[2] || @@ -3361,7 +3457,7 @@ static void sxg_adapter_set_hwaddr(struct adapter_t *adapter) memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6); } -/* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */ + /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */ sxg_dbg_macaddrs(adapter); } @@ -3514,8 +3610,7 @@ static int sxg_initialize_adapter(struct adapter_t *adapter) * status */ static int sxg_fill_descriptor_block(struct adapter_t *adapter, - struct sxg_rcv_descriptor_block_hdr - *RcvDescriptorBlockHdr) + struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr) { u32 i; struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo; @@ -3544,8 +3639,8 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter, RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr); ASSERT(RingDescriptorCmd); RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD; - RcvDescriptorBlock = - (struct sxg_rcv_descriptor_block *) RcvDescriptorBlockHdr->VirtualAddress; + RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *) + RcvDescriptorBlockHdr->VirtualAddress; /* Fill in the descriptor block */ for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) { @@ -3555,11 +3650,13 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter, SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket); RcvDataBufferHdr->State = SXG_BUFFER_ONCARD; RcvDescriptorBlock->Descriptors[i].VirtualAddress = - (void *)RcvDataBufferHdr; + (void *)RcvDataBufferHdr; if (i == 0) - printk("ASK:sxg_fill_descriptor_block: first virt address %p\n", RcvDataBufferHdr); + printk("ASK:sxg_fill_descriptor_block: first virt \ + address %p\n", RcvDataBufferHdr); if (i == (SXG_RCV_DESCRIPTORS_PER_BLOCK - 1)) - printk("ASK:sxg_fill_descriptor_block: last virt address %p\n", RcvDataBufferHdr); + printk("ASK:sxg_fill_descriptor_block: last virt \ + address %p\n", RcvDataBufferHdr); RcvDescriptorBlock->Descriptors[i].PhysicalAddress = RcvDataBufferHdr->PhysicalAddress; @@ -3616,7 +3713,8 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter) ReceiveBufferSize), SXG_BUFFER_TYPE_RCV); } - printk("ASK:sxg_stock_rcv_buffers: RcvBuffersOnCard %d\n", adapter->RcvBuffersOnCard); + printk("ASK:sxg_stock_rcv_buffers: RcvBuffersOnCard %d\n", + adapter->RcvBuffersOnCard); /* Now grab the RcvQLock lock and proceed */ spin_lock(&adapter->RcvQLock); while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { @@ -3699,11 +3797,10 @@ static void sxg_complete_descriptor_blocks(struct adapter_t *adapter, * header. The card will be restocked later via the * RcvBuffersOnCard test */ - if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == - STATUS_FAILURE) { + if (sxg_fill_descriptor_block(adapter, + RcvDescriptorBlockHdr) == STATUS_FAILURE) SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr); - } } spin_unlock(&adapter->RcvQLock); SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks", @@ -3719,7 +3816,7 @@ static struct pci_driver sxg_driver = { .suspend = sxgpm_suspend, .resume = sxgpm_resume, #endif -/* .shutdown = slic_shutdown, MOOK_INVESTIGATE */ + /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */ }; static int __init sxg_module_init(void) diff --git a/drivers/staging/sxg/sxg.h b/drivers/staging/sxg/sxg.h index a6e5eb87e57..9fa1dac2324 100644 --- a/drivers/staging/sxg/sxg.h +++ b/drivers/staging/sxg/sxg.h @@ -62,13 +62,13 @@ struct sxg_stats { u64 DumbXmtUcastBytes; /* OID_GEN_DIRECTED_BYTES_XMIT */ u64 DumbXmtMcastBytes; /* OID_GEN_MULTICAST_BYTES_XMIT */ u64 DumbXmtBcastBytes; /* OID_GEN_BROADCAST_BYTES_XMIT */ - u64 XmtErrors; /* OID_GEN_XMIT_ERROR */ + u64 XmtErrors; /* OID_GEN_XMIT_ERROR */ u64 XmtDiscards; /* OID_GEN_XMIT_DISCARDS */ - u64 XmtOk; /* OID_GEN_XMIT_OK */ - u64 XmtQLen; /* OID_GEN_TRANSMIT_QUEUE_LENGTH */ + u64 XmtOk; /* OID_GEN_XMIT_OK */ + u64 XmtQLen; /* OID_GEN_TRANSMIT_QUEUE_LENGTH */ u64 XmtZeroFull; /* Transmit ring zero full */ /* Rcv */ - u32 RcvNBL; /* Offload recieve NBL count */ + u32 RcvNBL; /* Offload recieve NBL count */ u64 DumbRcvBytes; /* dumbnic recv bytes */ u64 DumbRcvUcastBytes; /* OID_GEN_DIRECTED_BYTES_RCV */ u64 DumbRcvMcastBytes; /* OID_GEN_MULTICAST_BYTES_RCV */ @@ -116,14 +116,14 @@ struct sxg_stats { /* DUMB-NIC Send path definitions */ -#define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb) { \ - ASSERT(_skb); \ - dev_kfree_skb_irq(_skb); \ +#define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb) { \ + ASSERT(_skb); \ + dev_kfree_skb_irq(_skb); \ } -#define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \ - ASSERT(_skb); \ - dev_kfree_skb(_skb); \ +#define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \ + ASSERT(_skb); \ + dev_kfree_skb(_skb); \ } /* @@ -139,21 +139,21 @@ struct sxg_stats { /* Indications array size */ #define SXG_RCV_ARRAYSIZE 64 -#define SXG_ALLOCATE_RCV_PACKET(_pAdapt, _RcvDataBufferHdr) { \ - struct sk_buff * skb; \ - skb = netdev_alloc_skb(_pAdapt->netdev, 2048); \ - if (skb) { \ - (_RcvDataBufferHdr)->skb = skb; \ - skb->next = NULL; \ - } else { \ - (_RcvDataBufferHdr)->skb = NULL; \ - } \ +#define SXG_ALLOCATE_RCV_PACKET(_pAdapt, _RcvDataBufferHdr) { \ + struct sk_buff * skb; \ + skb = netdev_alloc_skb(_pAdapt->netdev, 2048); \ + if (skb) { \ + (_RcvDataBufferHdr)->skb = skb; \ + skb->next = NULL; \ + } else { \ + (_RcvDataBufferHdr)->skb = NULL; \ + } \ } -#define SXG_FREE_RCV_PACKET(_RcvDataBufferHdr) { \ - if((_RcvDataBufferHdr)->skb) { \ - dev_kfree_skb((_RcvDataBufferHdr)->skb); \ - } \ +#define SXG_FREE_RCV_PACKET(_RcvDataBufferHdr) { \ + if((_RcvDataBufferHdr)->skb) { \ + dev_kfree_skb((_RcvDataBufferHdr)->skb); \ + } \ } /* @@ -161,54 +161,58 @@ struct sxg_stats { * If we fill up our array of packet pointers, then indicate this * block up now and start on a new one. */ -#define SXG_ADD_RCV_PACKET(_pAdapt, _Packet, _PrevPacket, _IndicationList, _NumPackets) { \ - (_IndicationList)[_NumPackets] = (_Packet); \ - (_NumPackets)++; \ - if((_NumPackets) == SXG_RCV_ARRAYSIZE) { \ - SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \ - (_NumPackets), 0, 0, 0); \ - netif_rx((_IndicationList),(_NumPackets)); \ - (_NumPackets) = 0; \ - } \ +#define SXG_ADD_RCV_PACKET(_pAdapt, _Packet, _PrevPacket, _IndicationList, \ + _NumPackets) { \ + (_IndicationList)[_NumPackets] = (_Packet); \ + (_NumPackets)++; \ + if((_NumPackets) == SXG_RCV_ARRAYSIZE) { \ + SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \ + (_NumPackets), 0, 0, 0); \ + netif_rx((_IndicationList),(_NumPackets)); \ + (_NumPackets) = 0; \ + } \ } -#define SXG_INDICATE_PACKETS(_pAdapt, _IndicationList, _NumPackets) { \ - if(_NumPackets) { \ - SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \ - (_NumPackets), 0, 0, 0); \ - netif_rx((_IndicationList),(_NumPackets)); \ - (_NumPackets) = 0; \ - } \ +#define SXG_INDICATE_PACKETS(_pAdapt, _IndicationList, _NumPackets) { \ + if(_NumPackets) { \ + SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \ + (_NumPackets), 0, 0, 0); \ + netif_rx((_IndicationList),(_NumPackets)); \ + (_NumPackets) = 0; \ + } \ } -#define SXG_REINIATIALIZE_PACKET(_Packet) \ - {} /*_NdisReinitializePacket(_Packet)*/ /* this is not necessary with an skb */ +#define SXG_REINIATIALIZE_PACKET(_Packet) \ + {} /*_NdisReinitializePacket(_Packet)*/ + /* this is not necessary with an skb */ /* Definitions to initialize Dumb-nic Receive NBLs */ -#define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((struct sxg_rcv_nbl_reserved *)((_Packet)->MiniportReservedEx))->RcvDataBufferHdr) +#define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((struct sxg_rcv_nbl_reserved *)\ + ((_Packet)->MiniportReservedEx))->RcvDataBufferHdr) -#define SXG_RCV_SET_CHECKSUM_INFO(_Packet, _Cpi) \ - NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), TcpIpChecksumPacketInfo) = (PVOID)(_Cpi) +#define SXG_RCV_SET_CHECKSUM_INFO(_Packet, _Cpi) \ + NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), \ + TcpIpChecksumPacketInfo) = (PVOID)(_Cpi) #define SXG_RCV_SET_TOEPLITZ(_Packet, _Toeplitz, _Type, _Function) { \ - NDIS_PACKET_SET_HASH_VALUE((_Packet), (_Toeplitz)); \ - NDIS_PACKET_SET_HASH_TYPE((_Packet), (_Type)); \ - NDIS_PACKET_SET_HASH_FUNCTION((_Packet), (_Function)); \ + NDIS_PACKET_SET_HASH_VALUE((_Packet), (_Toeplitz)); \ + NDIS_PACKET_SET_HASH_TYPE((_Packet), (_Type)); \ + NDIS_PACKET_SET_HASH_FUNCTION((_Packet), (_Function)); \ } -#define SXG_RCV_SET_VLAN_INFO(_Packet, _VlanId, _Priority) { \ - NDIS_PACKET_8021Q_INFO _Packet8021qInfo; \ - _Packet8021qInfo.TagHeader.VlanId = (_VlanId); \ - _Packet8021qInfo.TagHeader.UserPriority = (_Priority); \ +#define SXG_RCV_SET_VLAN_INFO(_Packet, _VlanId, _Priority) { \ + NDIS_PACKET_8021Q_INFO _Packet8021qInfo; \ + _Packet8021qInfo.TagHeader.VlanId = (_VlanId); \ + _Packet8021qInfo.TagHeader.UserPriority = (_Priority); \ NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), Ieee8021QNetBufferListInfo) = \ - _Packet8021qInfo.Value; \ + _Packet8021qInfo.Value; \ } -#define SXG_ADJUST_RCV_PACKET(_Packet, _RcvDataBufferHdr, _Event) { \ - SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbRcv", \ - (_RcvDataBufferHdr), (_Packet), \ - (_Event)->Status, 0); \ - ASSERT((_Event)->Length <= (_RcvDataBufferHdr)->Size); \ +#define SXG_ADJUST_RCV_PACKET(_Packet, _RcvDataBufferHdr, _Event) { \ + SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbRcv", \ + (_RcvDataBufferHdr), (_Packet), \ + (_Event)->Status, 0); \ + ASSERT((_Event)->Length <= (_RcvDataBufferHdr)->Size); \ skb_put(Packet, (_Event)->Length); \ } @@ -216,47 +220,49 @@ struct sxg_stats { * Macros to free a receive data buffer and receive data descriptor block * NOTE - Lock must be held with RCV macros */ -#define SXG_GET_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ - struct list_entry *_ple; \ - _Hdr = NULL; \ - if((_pAdapt)->FreeRcvBufferCount) { \ - ASSERT(!(IsListEmpty(&(_pAdapt)->FreeRcvBuffers))); \ - _ple = RemoveHeadList(&(_pAdapt)->FreeRcvBuffers); \ - (_Hdr) = container_of(_ple, struct sxg_rcv_data_buffer_hdr, FreeList); \ - (_pAdapt)->FreeRcvBufferCount--; \ - ASSERT((_Hdr)->State == SXG_BUFFER_FREE); \ - } \ +#define SXG_GET_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ + struct list_entry *_ple; \ + _Hdr = NULL; \ + if((_pAdapt)->FreeRcvBufferCount) { \ + ASSERT(!(IsListEmpty(&(_pAdapt)->FreeRcvBuffers))); \ + _ple = RemoveHeadList(&(_pAdapt)->FreeRcvBuffers); \ + (_Hdr) = container_of(_ple, struct sxg_rcv_data_buffer_hdr, \ + FreeList); \ + (_pAdapt)->FreeRcvBufferCount--; \ + ASSERT((_Hdr)->State == SXG_BUFFER_FREE); \ + } \ } -#define SXG_FREE_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ - SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RtnDHdr", \ - (_Hdr), (_pAdapt)->FreeRcvBufferCount, \ - (_Hdr)->State, (_Hdr)->VirtualAddress); \ -/* SXG_RESTORE_MDL_OFFSET(_Hdr); */ \ - (_pAdapt)->FreeRcvBufferCount++; \ - ASSERT(((_pAdapt)->AllRcvBlockCount * SXG_RCV_DESCRIPTORS_PER_BLOCK) >= (_pAdapt)->FreeRcvBufferCount); \ - ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \ - (_Hdr)->State = SXG_BUFFER_FREE; \ - InsertTailList(&(_pAdapt)->FreeRcvBuffers, &((_Hdr)->FreeList)); \ +#define SXG_FREE_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ + SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RtnDHdr", \ + (_Hdr), (_pAdapt)->FreeRcvBufferCount, \ + (_Hdr)->State, (_Hdr)->VirtualAddress); \ +/* SXG_RESTORE_MDL_OFFSET(_Hdr); */ \ + (_pAdapt)->FreeRcvBufferCount++; \ + ASSERT(((_pAdapt)->AllRcvBlockCount * SXG_RCV_DESCRIPTORS_PER_BLOCK) \ + >= (_pAdapt)->FreeRcvBufferCount); \ + ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \ + (_Hdr)->State = SXG_BUFFER_FREE; \ + InsertTailList(&(_pAdapt)->FreeRcvBuffers, &((_Hdr)->FreeList)); \ } -#define SXG_FREE_RCV_DESCRIPTOR_BLOCK(_pAdapt, _Hdr) { \ - ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \ - (_Hdr)->State = SXG_BUFFER_FREE; \ - (_pAdapt)->FreeRcvBlockCount++; \ +#define SXG_FREE_RCV_DESCRIPTOR_BLOCK(_pAdapt, _Hdr) { \ + ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \ + (_Hdr)->State = SXG_BUFFER_FREE; \ + (_pAdapt)->FreeRcvBlockCount++; \ ASSERT((_pAdapt)->AllRcvBlockCount >= (_pAdapt)->FreeRcvBlockCount); \ - InsertTailList(&(_pAdapt)->FreeRcvBlocks, &(_Hdr)->FreeList); \ + InsertTailList(&(_pAdapt)->FreeRcvBlocks, &(_Hdr)->FreeList); \ } /* SGL macros */ -#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \ - spin_lock(&(_pAdapt)->SglQLock); \ - (_pAdapt)->FreeSglBufferCount++; \ - ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount);\ - ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \ - (_Sgl)->State = SXG_BUFFER_FREE; \ - InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \ - spin_unlock(&(_pAdapt)->SglQLock); \ +#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \ + spin_lock(&(_pAdapt)->SglQLock); \ + (_pAdapt)->FreeSglBufferCount++; \ + ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount); \ + ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \ + (_Sgl)->State = SXG_BUFFER_FREE; \ + InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \ + spin_unlock(&(_pAdapt)->SglQLock); \ } /* @@ -267,7 +273,7 @@ struct sxg_stats { * and not grabbing it avoids a possible double-trip. */ #define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl) { \ - struct list_entry *_ple; \ + struct list_entry *_ple; \ if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \ (_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \ (_pAdapt->AllocationsPending == 0)) { \ @@ -280,7 +286,8 @@ struct sxg_stats { if((_pAdapt)->FreeSglBufferCount) { \ ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \ _ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \ - (_Sgl) = container_of(_ple, struct sxg_scatter_gather, FreeList); \ + (_Sgl) = container_of(_ple, struct sxg_scatter_gather, \ + FreeList); \ (_pAdapt)->FreeSglBufferCount--; \ ASSERT((_Sgl)->State == SXG_BUFFER_FREE); \ (_Sgl)->State = SXG_BUFFER_BUSY; \ @@ -294,7 +301,7 @@ struct sxg_stats { * Linked list of multicast addresses. */ struct sxg_multicast_address { - unsigned char Address[6]; + unsigned char Address[6]; struct sxg_multicast_address *Next; }; @@ -319,20 +326,20 @@ struct sxg_buffer_queue { #define SXG_FAST_SEND_BUFFER 1 #define SXG_RECEIVE_BUFFER 2 -#define SXG_INIT_BUFFER(_Buffer, _Type) { \ - (_Buffer)->Type = (_Type); \ - if((_Type) == SXG_RECEIVE_BUFFER) { \ - (_Buffer)->Direction = 0; \ - } else { \ +#define SXG_INIT_BUFFER(_Buffer, _Type) { \ + (_Buffer)->Type = (_Type); \ + if((_Type) == SXG_RECEIVE_BUFFER) { \ + (_Buffer)->Direction = 0; \ + } else { \ (_Buffer)->Direction = NDIS_SG_LIST_WRITE_TO_DEVICE; \ - } \ - (_Buffer)->Bytes = 0; \ - (_Buffer)->Head = NULL; \ - (_Buffer)->Tail = NULL; \ + } \ + (_Buffer)->Bytes = 0; \ + (_Buffer)->Head = NULL; \ + (_Buffer)->Tail = NULL; \ } -#define SXG_RSS_CPU_COUNT(_pAdapt) \ +#define SXG_RSS_CPU_COUNT(_pAdapt) \ ((_pAdapt)->RssEnabled ? NR_CPUS : 1) /* DRIVER and ADAPTER structures */ @@ -367,9 +374,9 @@ enum SXG_LINK_STATE { /* Microcode file selection codes */ enum SXG_UCODE_SEL { - SXG_UCODE_SAHARA, /* Sahara ucode */ - SXG_UCODE_SDIAGCPU, /* Sahara CPU diagnostic ucode */ - SXG_UCODE_SDIAGSYS /* Sahara system diagnostic ucode */ + SXG_UCODE_SAHARA, /* Sahara ucode */ + SXG_UCODE_SDIAGCPU, /* Sahara CPU diagnostic ucode */ + SXG_UCODE_SDIAGSYS /* Sahara system diagnostic ucode */ }; @@ -378,8 +385,9 @@ enum SXG_UCODE_SEL { /* This probably lives in a proto.h file. Move later */ #define SXG_MULTICAST_PACKET(_pether) ((_pether)->ether_dhost[0] & 0x01) -#define SXG_BROADCAST_PACKET(_pether) ((*(u32 *)(_pether)->ether_dhost == 0xFFFFFFFF) && \ - (*(u16 *)&(_pether)->ether_dhost[4] == 0xFFFF)) +#define SXG_BROADCAST_PACKET(_pether) \ + ((*(u32 *)(_pether)->ether_dhost == 0xFFFFFFFF) && \ + (*(u16 *)&(_pether)->ether_dhost[4] == 0xFFFF)) /* For DbgPrints */ #define SXG_ID DPFLTR_IHVNETWORK_ID @@ -420,28 +428,28 @@ struct sxg_driver { * Mojave supports 16K, Oasis supports 16K-1, so * just set this at 15K, shouldnt make that much of a diff. */ -#define DUMP_BUF_SIZE 0x3C00 +#define DUMP_BUF_SIZE 0x3C00 #endif #define MIN(a, b) ((u32)(a) < (u32)(b) ? (a) : (b)) #define MAX(a, b) ((u32)(a) > (u32)(b) ? (a) : (b)) struct mcast_address { - unsigned char address[6]; + unsigned char address[6]; struct mcast_address *next; }; -#define CARD_DOWN 0x00000000 -#define CARD_UP 0x00000001 -#define CARD_FAIL 0x00000002 -#define CARD_DIAG 0x00000003 -#define CARD_SLEEP 0x00000004 +#define CARD_DOWN 0x00000000 +#define CARD_UP 0x00000001 +#define CARD_FAIL 0x00000002 +#define CARD_DIAG 0x00000003 +#define CARD_SLEEP 0x00000004 -#define ADAPT_DOWN 0x00 -#define ADAPT_UP 0x01 -#define ADAPT_FAIL 0x02 -#define ADAPT_RESET 0x03 -#define ADAPT_SLEEP 0x04 +#define ADAPT_DOWN 0x00 +#define ADAPT_UP 0x01 +#define ADAPT_FAIL 0x02 +#define ADAPT_RESET 0x03 +#define ADAPT_SLEEP 0x04 #define ADAPT_FLAGS_BOOTTIME 0x0001 #define ADAPT_FLAGS_IS64BIT 0x0002 @@ -453,29 +461,30 @@ struct mcast_address { #define ADAPT_FLAGS_STATS_TIMER_SET 0x0080 #define ADAPT_FLAGS_RESET_TIMER_SET 0x0100 -#define LINK_DOWN 0x00 -#define LINK_CONFIG 0x01 -#define LINK_UP 0x02 +#define LINK_DOWN 0x00 +#define LINK_CONFIG 0x01 +#define LINK_UP 0x02 -#define LINK_10MB 0x00 -#define LINK_100MB 0x01 -#define LINK_AUTOSPEED 0x02 -#define LINK_1000MB 0x03 -#define LINK_10000MB 0x04 +#define LINK_10MB 0x00 +#define LINK_100MB 0x01 +#define LINK_AUTOSPEED 0x02 +#define LINK_1000MB 0x03 +#define LINK_10000MB 0x04 -#define LINK_HALFD 0x00 -#define LINK_FULLD 0x01 -#define LINK_AUTOD 0x02 +#define LINK_HALFD 0x00 +#define LINK_FULLD 0x01 +#define LINK_AUTOD 0x02 -#define MAC_DIRECTED 0x00000001 -#define MAC_BCAST 0x00000002 -#define MAC_MCAST 0x00000004 -#define MAC_PROMISC 0x00000008 -#define MAC_LOOPBACK 0x00000010 -#define MAC_ALLMCAST 0x00000020 +#define MAC_DIRECTED 0x00000001 +#define MAC_BCAST 0x00000002 +#define MAC_MCAST 0x00000004 +#define MAC_PROMISC 0x00000008 +#define MAC_LOOPBACK 0x00000010 +#define MAC_ALLMCAST 0x00000020 #define SLIC_DUPLEX(x) ((x==LINK_FULLD) ? "FDX" : "HDX") -#define SLIC_SPEED(x) ((x==LINK_100MB) ? "100Mb" : ((x==LINK_1000MB) ? "1000Mb" : " 10Mb")) +#define SLIC_SPEED(x) ((x==LINK_100MB) ? "100Mb" : \ + ((x==LINK_1000MB) ? "1000Mb" : " 10Mb")) #define SLIC_LINKSTATE(x) ((x==LINK_DOWN) ? "Down" : "Up ") #define SLIC_ADAPTER_STATE(x) ((x==ADAPT_UP) ? "UP" : "Down") #define SLIC_CARD_STATE(x) ((x==CARD_UP) ? "UP" : "Down") @@ -492,8 +501,8 @@ struct ether_header { #define NUM_CFG_REGS 64 struct physcard { - struct adapter_t *adapter[SLIC_MAX_PORTS]; - struct physcard *next; + struct adapter_t *adapter[SLIC_MAX_PORTS]; + struct physcard *next; unsigned int adapters_allocd; }; @@ -687,7 +696,6 @@ struct adapter_t { /* PSXG_DUMP_CMD DumpBuffer; */ /* 68k - Cmd and Buffer */ /* dma_addr_t PDumpBuffer; */ /* Physical address */ /*#endif */ /* SXG_FAILURE_DUMP */ - }; #if SLIC_DUMP_ENABLED @@ -721,13 +729,13 @@ struct slic_crash_info { (largestat) += ((newstat) - (oldstat)); \ } -#define ETHER_EQ_ADDR(_AddrA, _AddrB, _Result) \ -{ \ - _Result = TRUE; \ - if (*(u32 *)(_AddrA) != *(u32 *)(_AddrB)) \ - _Result = FALSE; \ - if (*(u16 *)(&((_AddrA)[4])) != *(u16 *)(&((_AddrB)[4]))) \ - _Result = FALSE; \ +#define ETHER_EQ_ADDR(_AddrA, _AddrB, _Result) \ +{ \ + _Result = TRUE; \ + if (*(u32 *)(_AddrA) != *(u32 *)(_AddrB)) \ + _Result = FALSE; \ + if (*(u16 *)(&((_AddrA)[4])) != *(u16 *)(&((_AddrB)[4]))) \ + _Result = FALSE; \ } #define ETHERMAXFRAME 1514 @@ -735,7 +743,8 @@ struct slic_crash_info { #if defined(CONFIG_X86_64) || defined(CONFIG_IA64) #define SXG_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & 0x00000000FFFFFFFF) -#define SXG_GET_ADDR_HIGH(_addr) (u32)(((u64)(_addr) >> 32) & 0x00000000FFFFFFFF) +#define SXG_GET_ADDR_HIGH(_addr) \ + (u32)(((u64)(_addr) >> 32) & 0x00000000FFFFFFFF) #else #define SXG_GET_ADDR_LOW(_addr) (u32)_addr #define SXG_GET_ADDR_HIGH(_addr) (u32)0 @@ -744,8 +753,8 @@ struct slic_crash_info { #define FLUSH TRUE #define DONT_FLUSH FALSE -#define SIOCSLICDUMPCARD SIOCDEVPRIVATE+9 -#define SIOCSLICSETINTAGG SIOCDEVPRIVATE+10 -#define SIOCSLICTRACEDUMP SIOCDEVPRIVATE+11 +#define SIOCSLICDUMPCARD (SIOCDEVPRIVATE+9) +#define SIOCSLICSETINTAGG (SIOCDEVPRIVATE+10) +#define SIOCSLICTRACEDUMP (SIOCDEVPRIVATE+11) #endif /* __SXG_DRIVER_H__ */ diff --git a/drivers/staging/sxg/sxg_os.h b/drivers/staging/sxg/sxg_os.h index 55e4c382eba..68e1a04b61f 100644 --- a/drivers/staging/sxg/sxg_os.h +++ b/drivers/staging/sxg/sxg_os.h @@ -49,26 +49,26 @@ struct list_entry { struct list_entry *nle_blink; }; -#define InitializeListHead(l) \ +#define InitializeListHead(l) \ (l)->nle_flink = (l)->nle_blink = (l) -#define IsListEmpty(h) \ +#define IsListEmpty(h) \ ((h)->nle_flink == (h)) -#define RemoveEntryList(e) \ - do { \ - list_entry *b; \ - list_entry *f; \ - \ - f = (e)->nle_flink; \ - b = (e)->nle_blink; \ - b->nle_flink = f; \ - f->nle_blink = b; \ +#define RemoveEntryList(e) \ + do { \ + list_entry *b; \ + list_entry *f; \ + \ + f = (e)->nle_flink; \ + b = (e)->nle_blink; \ + b->nle_flink = f; \ + f->nle_blink = b; \ } while (0) /* These two have to be inlined since they return things. */ -static __inline struct list_entry *RemoveHeadList(struct list_entry *l) +static inline struct list_entry *RemoveHeadList(struct list_entry *l) { struct list_entry *f; struct list_entry *e; @@ -81,7 +81,7 @@ static __inline struct list_entry *RemoveHeadList(struct list_entry *l) return (e); } -static __inline struct list_entry *RemoveTailList(struct list_entry *l) +static inline struct list_entry *RemoveTailList(struct list_entry *l) { struct list_entry *b; struct list_entry *e; @@ -94,35 +94,35 @@ static __inline struct list_entry *RemoveTailList(struct list_entry *l) return (e); } -#define InsertTailList(l, e) \ - do { \ - struct list_entry *b; \ - \ - b = (l)->nle_blink; \ - (e)->nle_flink = (l); \ - (e)->nle_blink = b; \ - b->nle_flink = (e); \ - (l)->nle_blink = (e); \ +#define InsertTailList(l, e) \ + do { \ + struct list_entry *b; \ + \ + b = (l)->nle_blink; \ + (e)->nle_flink = (l); \ + (e)->nle_blink = b; \ + b->nle_flink = (e); \ + (l)->nle_blink = (e); \ } while (0) -#define InsertHeadList(l, e) \ - do { \ - struct list_entry *f; \ - \ - f = (l)->nle_flink; \ - (e)->nle_flink = f; \ - (e)->nle_blink = l; \ - f->nle_blink = (e); \ - (l)->nle_flink = (e); \ +#define InsertHeadList(l, e) \ + do { \ + struct list_entry *f; \ + \ + f = (l)->nle_flink; \ + (e)->nle_flink = f; \ + (e)->nle_blink = l; \ + f->nle_blink = (e); \ + (l)->nle_flink = (e); \ } while (0) #define ATK_DEBUG 1 #if ATK_DEBUG -#define SLIC_TIMESTAMP(value) { \ - struct timeval timev; \ - do_gettimeofday(&timev); \ - value = timev.tv_sec*1000000 + timev.tv_usec; \ +#define SLIC_TIMESTAMP(value) { \ + struct timeval timev; \ + do_gettimeofday(&timev); \ + value = timev.tv_sec*1000000 + timev.tv_usec; \ } #else #define SLIC_TIMESTAMP(value) @@ -131,17 +131,19 @@ static __inline struct list_entry *RemoveTailList(struct list_entry *l) /* SXG DEFINES */ #ifdef ATKDBG -#define SXG_TIMESTAMP(value) { \ - struct timeval timev; \ - do_gettimeofday(&timev); \ - value = timev.tv_sec*1000000 + timev.tv_usec; \ +#define SXG_TIMESTAMP(value) { \ + struct timeval timev; \ + do_gettimeofday(&timev); \ + value = timev.tv_sec*1000000 + timev.tv_usec; \ } #else #define SXG_TIMESTAMP(value) #endif -#define WRITE_REG(reg,value,flush) sxg_reg32_write((®), (value), (flush)) -#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(®),(value),(cpu)) +#define WRITE_REG(reg,value,flush) \ + sxg_reg32_write((®), (value), (flush)) +#define WRITE_REG64(a,reg,value,cpu) \ + sxg_reg64_write((a),(®),(value),(cpu)) #define READ_REG(reg,value) (value) = readl((void __iomem *)(®)) #endif /* _SLIC_OS_SPECIFIC_H_ */ diff --git a/drivers/staging/sxg/sxgdbg.h b/drivers/staging/sxg/sxgdbg.h index 7c1dfae6c9a..3f7895c083d 100644 --- a/drivers/staging/sxg/sxgdbg.h +++ b/drivers/staging/sxg/sxgdbg.h @@ -55,12 +55,12 @@ #define SXG_ASSERT_ENABLED #ifdef SXG_ASSERT_ENABLED #ifndef ASSERT -#define ASSERT(a) \ - { \ - if (!(a)) { \ - DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\ - __FILE__, __func__, __LINE__); \ - } \ +#define ASSERT(a) \ + { \ + if (!(a)) { \ + DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n", \ + __FILE__, __func__, __LINE__); \ + } \ } #endif #else @@ -88,16 +88,17 @@ extern ulong ATKTimerDiv; * parameters. */ struct trace_entry { - char name[8]; /* 8 character name - like 's'i'm'b'a'r'c'v' */ - u32 time; /* Current clock tic */ - unsigned char cpu; /* Current CPU */ - unsigned char irql; /* Current IRQL */ - unsigned char driver; /* The driver which added the trace call */ - unsigned char pad2; /* pad to 4 byte boundary - will probably get used */ - u32 arg1; /* Caller arg1 */ - u32 arg2; /* Caller arg2 */ - u32 arg3; /* Caller arg3 */ - u32 arg4; /* Caller arg4 */ + char name[8];/* 8 character name - like 's'i'm'b'a'r'c'v' */ + u32 time; /* Current clock tic */ + unsigned char cpu; /* Current CPU */ + unsigned char irql; /* Current IRQL */ + unsigned char driver;/* The driver which added the trace call */ + /* pad to 4 byte boundary - will probably get used */ + unsigned char pad2; + u32 arg1; /* Caller arg1 */ + u32 arg2; /* Caller arg2 */ + u32 arg3; /* Caller arg3 */ + u32 arg4; /* Caller arg4 */ }; /* Driver types for driver field in struct trace_entry */ @@ -108,11 +109,12 @@ struct trace_entry { #define TRACE_ENTRIES 1024 struct sxg_trace_buffer { - unsigned int size; /* aid for windbg extension */ - unsigned int in; /* Where to add */ - unsigned int level; /* Current Trace level */ - spinlock_t lock; /* For MP tracing */ - struct trace_entry entries[TRACE_ENTRIES];/* The circular buffer */ + /* aid for windbg extension */ + unsigned int size; + unsigned int in; /* Where to add */ + unsigned int level; /* Current Trace level */ + spinlock_t lock; /* For MP tracing */ + struct trace_entry entries[TRACE_ENTRIES];/* The circular buffer */ }; /* @@ -143,22 +145,22 @@ struct sxg_trace_buffer { /*The trace macro. This is active only if ATK_TRACE_ENABLED is set. */ #if ATK_TRACE_ENABLED #define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4) { \ - if ((buffer) && ((buffer)->level >= (tlevel))) { \ - unsigned int trace_irql = 0; /* ?????? FIX THIS */ \ - unsigned int trace_len; \ - struct trace_entry *trace_entry; \ - struct timeval timev; \ - \ - spin_lock(&(buffer)->lock); \ - trace_entry = &(buffer)->entries[(buffer)->in]; \ - do_gettimeofday(&timev); \ - \ - memset(trace_entry->name, 0, 8); \ - trace_len = strlen(tname); \ - trace_len = trace_len > 8 ? 8 : trace_len; \ - memcpy(trace_entry->name, (tname), trace_len); \ - trace_entry->time = timev.tv_usec; \ - trace_entry->cpu = (unsigned char)(smp_processor_id() & 0xFF); \ + if ((buffer) && ((buffer)->level >= (tlevel))) { \ + unsigned int trace_irql = 0;/* ?????? FIX THIS */\ + unsigned int trace_len; \ + struct trace_entry *trace_entry; \ + struct timeval timev; \ + \ + spin_lock(&(buffer)->lock); \ + trace_entry = &(buffer)->entries[(buffer)->in]; \ + do_gettimeofday(&timev); \ + \ + memset(trace_entry->name, 0, 8); \ + trace_len = strlen(tname); \ + trace_len = trace_len > 8 ? 8 : trace_len; \ + memcpy(trace_entry->name, (tname), trace_len); \ + trace_entry->time = timev.tv_usec; \ + trace_entry->cpu = (unsigned char)(smp_processor_id() & 0xFF);\ trace_entry->driver = (tdriver); \ trace_entry->irql = trace_irql; \ trace_entry->arg1 = (ulong)(a1); \ diff --git a/drivers/staging/sxg/sxghif.h b/drivers/staging/sxg/sxghif.h index fe9a0808e89..bf057d4823a 100644 --- a/drivers/staging/sxg/sxghif.h +++ b/drivers/staging/sxg/sxghif.h @@ -12,82 +12,82 @@ /* UCODE Registers */ struct sxg_ucode_regs { /* Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 */ - u32 Icr; /* Code = 0 (extended), ExCode = 0 - Int control */ - u32 RsvdReg1; /* Code = 1 - TOE -NA */ - u32 RsvdReg2; /* Code = 2 - TOE -NA */ - u32 RsvdReg3; /* Code = 3 - TOE -NA */ - u32 RsvdReg4; /* Code = 4 - TOE -NA */ - u32 RsvdReg5; /* Code = 5 - TOE -NA */ - u32 CardUp; /* Code = 6 - Microcode initialized when 1 */ - u32 RsvdReg7; /* Code = 7 - TOE -NA */ - u32 ConfigStat; /* Code = 8 - Configuration data load status */ - u32 RsvdReg9; /* Code = 9 - TOE -NA */ - u32 CodeNotUsed[6]; /* Codes 10-15 not used. ExCode = 0 */ + u32 Icr; /* Code = 0 (extended), ExCode = 0 - Int control */ + u32 RsvdReg1; /* Code = 1 - TOE -NA */ + u32 RsvdReg2; /* Code = 2 - TOE -NA */ + u32 RsvdReg3; /* Code = 3 - TOE -NA */ + u32 RsvdReg4; /* Code = 4 - TOE -NA */ + u32 RsvdReg5; /* Code = 5 - TOE -NA */ + u32 CardUp; /* Code = 6 - Microcode initialized when 1 */ + u32 RsvdReg7; /* Code = 7 - TOE -NA */ + u32 ConfigStat; /* Code = 8 - Configuration data load status */ + u32 RsvdReg9; /* Code = 9 - TOE -NA */ + u32 CodeNotUsed[6]; /* Codes 10-15 not used. ExCode = 0 */ /* This brings us to ExCode 1 at address 0x40 = Interrupt status pointer */ - u32 Isp; /* Code = 0 (extended), ExCode = 1 */ - u32 PadEx1[15]; /* Codes 1-15 not used with extended codes */ + u32 Isp; /* Code = 0 (extended), ExCode = 1 */ + u32 PadEx1[15]; /* Codes 1-15 not used with extended codes */ /* ExCode 2 = Interrupt Status Register */ - u32 Isr; /* Code = 0 (extended), ExCode = 2 */ + u32 Isr; /* Code = 0 (extended), ExCode = 2 */ u32 PadEx2[15]; /* ExCode 3 = Event base register. Location of event rings */ - u32 EventBase; /* Code = 0 (extended), ExCode = 3 */ + u32 EventBase; /* Code = 0 (extended), ExCode = 3 */ u32 PadEx3[15]; /* ExCode 4 = Event ring size */ - u32 EventSize; /* Code = 0 (extended), ExCode = 4 */ + u32 EventSize; /* Code = 0 (extended), ExCode = 4 */ u32 PadEx4[15]; /* ExCode 5 = TCB Buffers base address */ - u32 TcbBase; /* Code = 0 (extended), ExCode = 5 */ + u32 TcbBase; /* Code = 0 (extended), ExCode = 5 */ u32 PadEx5[15]; /* ExCode 6 = TCB Composite Buffers base address */ - u32 TcbCompBase; /* Code = 0 (extended), ExCode = 6 */ + u32 TcbCompBase; /* Code = 0 (extended), ExCode = 6 */ u32 PadEx6[15]; /* ExCode 7 = Transmit ring base address */ - u32 XmtBase; /* Code = 0 (extended), ExCode = 7 */ + u32 XmtBase; /* Code = 0 (extended), ExCode = 7 */ u32 PadEx7[15]; /* ExCode 8 = Transmit ring size */ - u32 XmtSize; /* Code = 0 (extended), ExCode = 8 */ + u32 XmtSize; /* Code = 0 (extended), ExCode = 8 */ u32 PadEx8[15]; /* ExCode 9 = Receive ring base address */ - u32 RcvBase; /* Code = 0 (extended), ExCode = 9 */ + u32 RcvBase; /* Code = 0 (extended), ExCode = 9 */ u32 PadEx9[15]; /* ExCode 10 = Receive ring size */ - u32 RcvSize; /* Code = 0 (extended), ExCode = 10 */ + u32 RcvSize; /* Code = 0 (extended), ExCode = 10 */ u32 PadEx10[15]; /* ExCode 11 = Read EEPROM/Flash Config */ - u32 Config; /* Code = 0 (extended), ExCode = 11 */ + u32 Config; /* Code = 0 (extended), ExCode = 11 */ u32 PadEx11[15]; /* ExCode 12 = Multicast bits 31:0 */ - u32 McastLow; /* Code = 0 (extended), ExCode = 12 */ + u32 McastLow; /* Code = 0 (extended), ExCode = 12 */ u32 PadEx12[15]; /* ExCode 13 = Multicast bits 63:32 */ - u32 McastHigh; /* Code = 0 (extended), ExCode = 13 */ + u32 McastHigh; /* Code = 0 (extended), ExCode = 13 */ u32 PadEx13[15]; /* ExCode 14 = Ping */ - u32 Ping; /* Code = 0 (extended), ExCode = 14 */ + u32 Ping; /* Code = 0 (extended), ExCode = 14 */ u32 PadEx14[15]; /* ExCode 15 = Link MTU */ - u32 LinkMtu; /* Code = 0 (extended), ExCode = 15 */ + u32 LinkMtu; /* Code = 0 (extended), ExCode = 15 */ u32 PadEx15[15]; /* ExCode 16 = Download synchronization */ - u32 LoadSync; /* Code = 0 (extended), ExCode = 16 */ + u32 LoadSync; /* Code = 0 (extended), ExCode = 16 */ u32 PadEx16[15]; /* ExCode 17 = Upper DRAM address bits on 32-bit systems */ - u32 Upper; /* Code = 0 (extended), ExCode = 17 */ + u32 Upper; /* Code = 0 (extended), ExCode = 17 */ u32 PadEx17[15]; /* ExCode 18 = Slowpath Send Index Address */ - u32 SPSendIndex; /* Code = 0 (extended), ExCode = 18 */ + u32 SPSendIndex; /* Code = 0 (extended), ExCode = 18 */ u32 PadEx18[15]; /* ExCode 19 = Get ucode statistics */ - u32 GetUcodeStats; /* Code = 0 (extended), ExCode = 19 */ + u32 GetUcodeStats; /* Code = 0 (extended), ExCode = 19 */ u32 PadEx19[15]; /* ExCode 20 = Aggregation - See sxgmisc.c:SxgSetInterruptAggregation */ - u32 Aggregation; /* Code = 0 (extended), ExCode = 20 */ + u32 Aggregation; /* Code = 0 (extended), ExCode = 20 */ u32 PadEx20[15]; /* ExCode 21 = Receive MDL push timer */ - u32 PushTicks; /* Code = 0 (extended), ExCode = 21 */ + u32 PushTicks; /* Code = 0 (extended), ExCode = 21 */ u32 PadEx21[15]; /* ExCode 22 = ACK Frequency */ - u32 AckFrequency; /* Code = 0 (extended), ExCode = 22 */ + u32 AckFrequency; /* Code = 0 (extended), ExCode = 22 */ u32 PadEx22[15]; /* ExCode 23 = TOE NA */ u32 RsvdReg23; @@ -96,31 +96,31 @@ struct sxg_ucode_regs { u32 RsvdReg24; u32 PadEx24[15]; /* ExCode 25 = TOE NA */ - u32 RsvdReg25; /* Code = 0 (extended), ExCode = 25 */ + u32 RsvdReg25; /* Code = 0 (extended), ExCode = 25 */ u32 PadEx25[15]; /* ExCode 26 = Receive checksum requirements */ - u32 ReceiveChecksum; /* Code = 0 (extended), ExCode = 26 */ + u32 ReceiveChecksum; /* Code = 0 (extended), ExCode = 26 */ u32 PadEx26[15]; /* ExCode 27 = RSS Requirements */ - u32 Rss; /* Code = 0 (extended), ExCode = 27 */ + u32 Rss; /* Code = 0 (extended), ExCode = 27 */ u32 PadEx27[15]; /* ExCode 28 = RSS Table */ - u32 RssTable; /* Code = 0 (extended), ExCode = 28 */ + u32 RssTable; /* Code = 0 (extended), ExCode = 28 */ u32 PadEx28[15]; /* ExCode 29 = Event ring release entries */ - u32 EventRelease; /* Code = 0 (extended), ExCode = 29 */ + u32 EventRelease; /* Code = 0 (extended), ExCode = 29 */ u32 PadEx29[15]; /* ExCode 30 = Number of receive bufferlist commands on ring 0 */ - u32 RcvCmd; /* Code = 0 (extended), ExCode = 30 */ + u32 RcvCmd; /* Code = 0 (extended), ExCode = 30 */ u32 PadEx30[15]; /* ExCode 31 = slowpath transmit command - Data[31:0] = 1 */ - u32 XmtCmd; /* Code = 0 (extended), ExCode = 31 */ + u32 XmtCmd; /* Code = 0 (extended), ExCode = 31 */ u32 PadEx31[15]; /* ExCode 32 = Dump command */ - u32 DumpCmd; /* Code = 0 (extended), ExCode = 32 */ + u32 DumpCmd; /* Code = 0 (extended), ExCode = 32 */ u32 PadEx32[15]; /* ExCode 33 = Debug command */ - u32 DebugCmd; /* Code = 0 (extended), ExCode = 33 */ + u32 DebugCmd; /* Code = 0 (extended), ExCode = 33 */ u32 PadEx33[15]; /* * There are 128 possible extended commands - each of account for 16 @@ -129,7 +129,7 @@ struct sxg_ucode_regs { * base. As extended codes are added, reduce the first array value in * the following field */ - u32 PadToNextCpu[94][16]; /* 94 = 128 - 34 (34 = Excodes 0 - 33) */ + u32 PadToNextCpu[94][16]; /* 94 = 128 - 34 (34 = Excodes 0 - 33)*/ }; /* Interrupt control register (0) values */ @@ -142,10 +142,11 @@ struct sxg_ucode_regs { ((((_MessageId) << SXG_ICR_MSGID_SHIFT) & \ SXG_ICR_MSGID_MASK) | (_Data)) -#define SXG_MIN_AGG_DEFAULT 0x0010 /* Minimum aggregation default */ -#define SXG_MAX_AGG_DEFAULT 0x0040 /* Maximum aggregation default */ -#define SXG_MAX_AGG_SHIFT 16 /* Maximum in top 16 bits of register */ -#define SXG_AGG_XMT_DISABLE 0x80000000 /* Disable interrupt aggregation on xmt */ +#define SXG_MIN_AGG_DEFAULT 0x0010 /* Minimum aggregation default */ +#define SXG_MAX_AGG_DEFAULT 0x0040 /* Maximum aggregation default */ +#define SXG_MAX_AGG_SHIFT 16 /* Maximum in top 16 bits of register */ +/* Disable interrupt aggregation on xmt */ +#define SXG_AGG_XMT_DISABLE 0x80000000 /* The Microcode supports up to 8 RSS queues */ #define SXG_MAX_RSS 8 @@ -170,11 +171,11 @@ struct sxg_ucode_regs { * Status returned by ucode in the ConfigStat reg (see above) when attempted * to load configuration data from the EEPROM/Flash. */ -#define SXG_CFG_TIMEOUT 1 /* init value - timeout if unchanged */ -#define SXG_CFG_LOAD_EEPROM 2 /* config data loaded from EEPROM */ -#define SXG_CFG_LOAD_FLASH 3 /* config data loaded from flash */ -#define SXG_CFG_LOAD_INVALID 4 /* no valid config data found */ -#define SXG_CFG_LOAD_ERROR 5 /* hardware error */ +#define SXG_CFG_TIMEOUT 1 /* init value - timeout if unchanged */ +#define SXG_CFG_LOAD_EEPROM 2 /* config data loaded from EEPROM */ +#define SXG_CFG_LOAD_FLASH 3 /* config data loaded from flash */ +#define SXG_CFG_LOAD_INVALID 4 /* no valid config data found */ +#define SXG_CFG_LOAD_ERROR 5 /* hardware error */ #define SXG_CHECK_FOR_HANG_TIME 5 @@ -186,17 +187,17 @@ struct sxg_ucode_regs { * struct sxg_ucode_regs definition above */ struct sxg_tcb_regs { - u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */ - u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */ - u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */ - u32 Rsvd1; /* Code = 3 - TOE NA */ - u32 Rsvd2; /* Code = 4 - TOE NA */ - u32 Rsvd3; /* Code = 5 - TOE NA */ - u32 Invalid1; /* Code = 6 - Reserved for "CardUp" see above */ - u32 Rsvd4; /* Code = 7 - TOE NA */ - u32 Invalid2; /* Code = 8 - Reserved for "ConfigStat" see above */ - u32 Rsvd5; /* Code = 9 - TOE NA */ - u32 Pad[6]; /* Codes 10-15 - Not used. */ + u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */ + u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */ + u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */ + u32 Rsvd1; /* Code = 3 - TOE NA */ + u32 Rsvd2; /* Code = 4 - TOE NA */ + u32 Rsvd3; /* Code = 5 - TOE NA */ + u32 Invalid1; /* Code = 6 - Reserved for "CardUp" see above */ + u32 Rsvd4; /* Code = 7 - TOE NA */ + u32 Invalid2; /* Code = 8 - Reserved for "ConfigStat" see above */ + u32 Rsvd5; /* Code = 9 - TOE NA */ + u32 Pad[6]; /* Codes 10-15 - Not used. */ }; /*************************************************************************** @@ -226,7 +227,7 @@ struct sxg_tcb_regs { #define SXG_ISR_ERR 0x80000000 /* Error */ #define SXG_ISR_EVENT 0x40000000 /* Event ring event */ #define SXG_ISR_NONE1 0x20000000 /* Not used */ -#define SXG_ISR_UPC 0x10000000 /* Dump/debug command complete */ +#define SXG_ISR_UPC 0x10000000 /* Dump/debug command complete*/ #define SXG_ISR_LINK 0x08000000 /* Link event */ #define SXG_ISR_PDQF 0x04000000 /* Processed data queue full */ #define SXG_ISR_RMISS 0x02000000 /* Drop - no host buf */ @@ -335,7 +336,8 @@ struct sxg_event { */ #define EVENT_RING_SIZE 4096 #define EVENT_RING_BATCH 16 /* Hand entries back 16 at a time. */ -#define EVENT_BATCH_LIMIT 256 /* Stop processing events after 4096 (256 * 16) */ +/* Stop processing events after 4096 (256 * 16) */ +#define EVENT_BATCH_LIMIT 256 struct sxg_event_ring { struct sxg_event Ring[EVENT_RING_SIZE]; @@ -352,34 +354,34 @@ struct sxg_event_ring { * offloaded connections, 10:4 if we support 2k and so on. */ #define SXG_TCB_BUCKET_SHIFT 4 -#define SXG_TCB_PER_BUCKET 16 -#define SXG_TCB_BUCKET_MASK 0xFF0 /* Bucket portion of TCB ID */ -#define SXG_TCB_ELEMENT_MASK 0x00F /* Element within bucket */ -#define SXG_TCB_BUCKETS 256 /* 256 * 16 = 4k */ +#define SXG_TCB_PER_BUCKET 16 +#define SXG_TCB_BUCKET_MASK 0xFF0 /* Bucket portion of TCB ID */ +#define SXG_TCB_ELEMENT_MASK 0x00F /* Element within bucket */ +#define SXG_TCB_BUCKETS 256 /* 256 * 16 = 4k */ #define SXG_TCB_BUFFER_SIZE 512 /* ASSERT format is correct */ -#define SXG_TCB_RCVQ_SIZE 736 +#define SXG_TCB_RCVQ_SIZE 736 #define SXG_TCB_COMPOSITE_BUFFER_SIZE 1024 -#define SXG_LOCATE_TCP_FRAME_HDR(_TcpObject, _IPv6) \ - (((_TcpObject)->VlanId) ? \ - ((_IPv6) ? /* Vlan frame header = yes */ \ - &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.SxgTcp : \ - &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.SxgTcp) : \ - ((_IPv6) ? /* Vlan frame header = No */ \ - &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.SxgTcp : \ +#define SXG_LOCATE_TCP_FRAME_HDR(_TcpObject, _IPv6) \ + (((_TcpObject)->VlanId) ? \ + ((_IPv6) ? /* Vlan frame header = yes */ \ + &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.SxgTcp: \ + &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.SxgTcp): \ + ((_IPv6) ? /* Vlan frame header = No */ \ + &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.SxgTcp : \ &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.SxgTcp)) -#define SXG_LOCATE_IP_FRAME_HDR(_TcpObject) \ - (_TcpObject)->VlanId ? \ - &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.Ip : \ +#define SXG_LOCATE_IP_FRAME_HDR(_TcpObject) \ + (_TcpObject)->VlanId ? \ + &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.Ip: \ &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.Ip -#define SXG_LOCATE_IP6_FRAME_HDR(_TcpObject) \ - (_TcpObject)->VlanId ? \ - &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \ +#define SXG_LOCATE_IP6_FRAME_HDR(TcpObject) \ + (_TcpObject)->VlanId ? \ + &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip: \ &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip #if DBG @@ -391,16 +393,18 @@ struct sxg_event_ring { * Obviously this is DBG only. Maybe remove later, or #if 0 so we * can set it when needed */ -#define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) { \ - PIPV6_HDR _Ip6FrameHdr; \ - if((_TcpObject)->IPv6) { \ - _Ip6FrameHdr = SXG_LOCATE_IP6_FRAME_HDR((_TcpObject)); \ - if(_FastPath) { \ - _Ip6FrameHdr->HopLimit = (_TcpObject)->Cached.TtlOrHopLimit - 2; \ - } else { \ - _Ip6FrameHdr->HopLimit = (_TcpObject)->Cached.TtlOrHopLimit - 1; \ - } \ - } \ +#define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) { \ + PIPV6_HDR _Ip6FrameHdr; \ + if ((_TcpObject)->IPv6) { \ + _Ip6FrameHdr = SXG_LOCATE_IP6_FRAME_HDR((_TcpObject)); \ + if (_FastPath) { \ + _Ip6FrameHdr->HopLimit = \ + (_TcpObject)->Cached.TtlOrHopLimit - 2; \ + } else { \ + _Ip6FrameHdr->HopLimit = \ + (_TcpObject)->Cached.TtlOrHopLimit - 1; \ + } \ + } \ } #else /* Do nothing with free build */ @@ -415,41 +419,47 @@ struct sxg_event_ring { /* Structure and macros to manage a ring */ struct sxg_ring_info { - unsigned char Head; /* Where we add entries - Note unsigned char:RING_SIZE */ + /* Where we add entries - Note unsigned char:RING_SIZE */ + unsigned char Head; unsigned char Tail; /* Where we pull off completed entries */ ushort Size; /* Ring size - Must be multiple of 2 */ void * Context[SXG_MAX_RING_SIZE]; /* Shadow ring */ }; -#define SXG_INITIALIZE_RING(_ring, _size) { \ - (_ring).Head = 0; \ - (_ring).Tail = 0; \ - (_ring).Size = (_size); \ +#define SXG_INITIALIZE_RING(_ring, _size) { \ + (_ring).Head = 0; \ + (_ring).Tail = 0; \ + (_ring).Size = (_size); \ } -#define SXG_ADVANCE_INDEX(_index, _size) ((_index) = ((_index) + 1) & ((_size) - 1)) -#define SXG_PREVIOUS_INDEX(_index, _size) (((_index) - 1) &((_size) - 1)) + +#define SXG_ADVANCE_INDEX(_index, _size) \ + ((_index) = ((_index) + 1) & ((_size) - 1)) +#define SXG_PREVIOUS_INDEX(_index, _size) \ + (((_index) - 1) &((_size) - 1)) #define SXG_RING_EMPTY(_ring) ((_ring)->Head == (_ring)->Tail) -#define SXG_RING_FULL(_ring) ((((_ring)->Head + 1) & ((_ring)->Size - 1)) == (_ring)->Tail) -#define SXG_RING_ADVANCE_HEAD(_ring) SXG_ADVANCE_INDEX((_ring)->Head, ((_ring)->Size)) -#define SXG_RING_RETREAT_HEAD(_ring) ((_ring)->Head = \ - SXG_PREVIOUS_INDEX((_ring)->Head, (_ring)->Size)) -#define SXG_RING_ADVANCE_TAIL(_ring) { \ - ASSERT((_ring)->Tail != (_ring)->Head); \ - SXG_ADVANCE_INDEX((_ring)->Tail, ((_ring)->Size)); \ +#define SXG_RING_FULL(_ring) \ + ((((_ring)->Head + 1) & ((_ring)->Size - 1)) == (_ring)->Tail) +#define SXG_RING_ADVANCE_HEAD(_ring) \ + SXG_ADVANCE_INDEX((_ring)->Head, ((_ring)->Size)) +#define SXG_RING_RETREAT_HEAD(_ring) ((_ring)->Head = \ + SXG_PREVIOUS_INDEX((_ring)->Head, (_ring)->Size)) +#define SXG_RING_ADVANCE_TAIL(_ring) { \ + ASSERT((_ring)->Tail != (_ring)->Head); \ + SXG_ADVANCE_INDEX((_ring)->Tail, ((_ring)->Size)); \ } /* * Set cmd to the next available ring entry, set the shadow context * entry and advance the ring. * The appropriate lock must be held when calling this macro */ -#define SXG_GET_CMD(_ring, _ringinfo, _cmd, _context) { \ - if(SXG_RING_FULL(_ringinfo)) { \ - (_cmd) = NULL; \ - } else { \ - (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Head]; \ +#define SXG_GET_CMD(_ring, _ringinfo, _cmd, _context) { \ + if(SXG_RING_FULL(_ringinfo)) { \ + (_cmd) = NULL; \ + } else { \ + (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Head]; \ (_ringinfo)->Context[(_ringinfo)->Head] = (void *)(_context);\ - SXG_RING_ADVANCE_HEAD(_ringinfo); \ - } \ + SXG_RING_ADVANCE_HEAD(_ringinfo); \ + } \ } /* @@ -457,21 +467,21 @@ struct sxg_ring_info { * NOTE - The appopriate lock MUST NOT BE DROPPED between the SXG_GET_CMD * and SXG_ABORT_CMD calls. */ -#define SXG_ABORT_CMD(_ringinfo) { \ - ASSERT(!(SXG_RING_EMPTY(_ringinfo))); \ - SXG_RING_RETREAT_HEAD(_ringinfo); \ - (_ringinfo)->Context[(_ringinfo)->Head] = NULL; \ +#define SXG_ABORT_CMD(_ringinfo) { \ + ASSERT(!(SXG_RING_EMPTY(_ringinfo))); \ + SXG_RING_RETREAT_HEAD(_ringinfo); \ + (_ringinfo)->Context[(_ringinfo)->Head] = NULL; \ } /* * For the given ring, return a pointer to the tail cmd and context, * clear the context and advance the tail */ -#define SXG_RETURN_CMD(_ring, _ringinfo, _cmd, _context) { \ - (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Tail]; \ +#define SXG_RETURN_CMD(_ring, _ringinfo, _cmd, _context) { \ + (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Tail]; \ (_context) = (_ringinfo)->Context[(_ringinfo)->Tail]; \ - (_ringinfo)->Context[(_ringinfo)->Tail] = NULL; \ - SXG_RING_ADVANCE_TAIL(_ringinfo); \ + (_ringinfo)->Context[(_ringinfo)->Tail] = NULL; \ + SXG_RING_ADVANCE_TAIL(_ringinfo); \ } /*************************************************************** @@ -507,7 +517,8 @@ struct sxg_cmd { union { u32 Rsvd1; /* TOE NA */ u32 SgeOffset; /* Slowpath - 2nd SGE offset */ - u32 Resid; /* MDL completion - clobbers update */ + /* MDL completion - clobbers update */ + u32 Resid; }; union { u32 TotalLength; /* Total transfer length */ @@ -639,10 +650,10 @@ enum sxg_buffer_type { * Further complicating matters is the fact that the receive * buffers must be variable in length in order to accomodate * jumbo frame configurations. We configure the buffer - * length so that the buffer and it's corresponding struct sxg_rcv_data_buffer_hdr - * structure add up to an even boundary. Then we place the - * remaining data structures after 128 of them as shown in - * the following diagram: + * length so that the buffer and it's corresponding struct + * sxg_rcv_data_buffer_hdr structure add up to an even + * boundary. Then we place the remaining data structures after 128 + * of them as shown in the following diagram: * * _________________________________________ * | | @@ -683,7 +694,8 @@ enum sxg_buffer_type { */ #define SXG_RCV_DATA_BUFFERS 8192 /* Amount to give to the card */ #define SXG_INITIAL_RCV_DATA_BUFFERS 16384 /* Initial pool of buffers */ -#define SXG_MIN_RCV_DATA_BUFFERS 4096 /* Minimum amount and when to get more */ +/* Minimum amount and when to get more */ +#define SXG_MIN_RCV_DATA_BUFFERS 4096 #define SXG_MAX_RCV_BLOCKS 256 /* = 32k receive buffers */ /* Receive buffer header */ @@ -699,7 +711,7 @@ struct sxg_rcv_data_buffer_hdr { struct list_entry FreeList; /* Free queue of buffers */ unsigned char State; /* See SXG_BUFFER state above */ unsigned char Status; /* Event status (to log PUSH) */ - struct sk_buff * skb; /* Double mapped (nbl and pkt) */ + struct sk_buff * skb; /* Double mapped (nbl and pkt)*/ }; /* @@ -708,15 +720,17 @@ struct sxg_rcv_data_buffer_hdr { */ #define SxgDumbRcvPacket skb -#define SXG_RCV_DATA_HDR_SIZE 256 /* Space for struct sxg_rcv_data_buffer_hdr */ -#define SXG_RCV_DATA_BUFFER_SIZE 2048 /* Non jumbo = 2k including HDR */ -#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 /* jumbo = 10k including HDR */ +/* Space for struct sxg_rcv_data_buffer_hdr */ +#define SXG_RCV_DATA_HDR_SIZE 256 +/* Non jumbo = 2k including HDR */ +#define SXG_RCV_DATA_BUFFER_SIZE 2048 +#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 /* jumbo = 10k including HDR */ /* Receive data descriptor */ struct sxg_rcv_data_descriptor { union { struct sk_buff *VirtualAddress; /* Host handle */ - u64 ForceTo8Bytes; /* Force x86 to 8-byte boundary */ + u64 ForceTo8Bytes; /*Force x86 to 8-byte boundary*/ }; dma_addr_t PhysicalAddress; }; @@ -731,32 +745,32 @@ struct sxg_rcv_descriptor_block { /* Receive descriptor block header */ struct sxg_rcv_descriptor_block_hdr { - void *VirtualAddress; /* start of 2k buffer */ - dma_addr_t PhysicalAddress; /* ..and it's physical address */ - struct list_entry FreeList; /* free queue of descriptor blocks */ - unsigned char State; /* see sxg_buffer state above */ + void *VirtualAddress; /* start of 2k buffer */ + dma_addr_t PhysicalAddress; /* ..and it's physical address */ + struct list_entry FreeList;/* free queue of descriptor blocks */ + unsigned char State; /* see sxg_buffer state above */ }; /* Receive block header */ struct sxg_rcv_block_hdr { void *VirtualAddress; /* Start of virtual memory */ - dma_addr_t PhysicalAddress; /* ..and it's physical address */ - struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS */ + dma_addr_t PhysicalAddress; /* ..and it's physical address*/ + struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS*/ }; /* Macros to determine data structure offsets into receive block */ -#define SXG_RCV_BLOCK_SIZE(_Buffersize) \ +#define SXG_RCV_BLOCK_SIZE(_Buffersize) \ (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ - (sizeof(struct sxg_rcv_descriptor_block)) + \ - (sizeof(struct sxg_rcv_descriptor_block_hdr)) + \ + (sizeof(struct sxg_rcv_descriptor_block)) + \ + (sizeof(struct sxg_rcv_descriptor_block_hdr)) + \ (sizeof(struct sxg_rcv_block_hdr))) #define SXG_RCV_BUFFER_DATA_SIZE(_Buffersize) \ ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE) #define SXG_RCV_DATA_BUFFER_HDR_OFFSET(_Buffersize) \ ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE) -#define SXG_RCV_DESCRIPTOR_BLOCK_OFFSET(_Buffersize) \ +#define SXG_RCV_DESCRIPTOR_BLOCK_OFFSET(_Buffersize) \ ((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) -#define SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET(_Buffersize) \ +#define SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET(_Buffersize) \ (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ (sizeof(struct sxg_rcv_descriptor_block))) #define SXG_RCV_BLOCK_HDR_OFFSET(_Buffersize) \ @@ -766,12 +780,13 @@ struct sxg_rcv_block_hdr { /* Scatter gather list buffer */ #define SXG_INITIAL_SGL_BUFFERS 8192 /* Initial pool of SGL buffers */ -#define SXG_MIN_SGL_BUFFERS 2048 /* Minimum amount and when to get more */ -#define SXG_MAX_SGL_BUFFERS 16384 /* Maximum to allocate (note ADAPT:ushort) */ +#define SXG_MIN_SGL_BUFFERS 2048 /* Minimum amount and when to get more*/ +/* Maximum to allocate (note ADAPT:ushort) */ +#define SXG_MAX_SGL_BUFFERS 16384 /* - * SXG_SGL_POOL_PROPERTIES - This structure is used to define a pool of SGL buffers. - * These buffers are allocated out of shared memory and used to + * SXG_SGL_POOL_PROPERTIES - This structure is used to define a pool of SGL + * buffers. These buffers are allocated out of shared memory and used to * contain a physical scatter gather list structure that is shared * with the card. * @@ -801,39 +816,38 @@ struct sxg_sgl_pool_properties { /* * At the moment I'm going to statically initialize 4 pools: - * 100k buffer pool: The vast majority of the expected buffers are expected to - * be less than or equal to 100k. At 30 entries per and - * 8k initial buffers amounts to ~4MB of memory - * NOTE - This used to be 64K with 20 entries, but during - * WHQL NDIS 6.0 Testing (2c_mini6stress) MS does their - * best to send absurd NBL's with ridiculous SGLs, we - * have received 400byte sends contained in SGL's that - * have 28 entries - * 1M buffer pool: Buffers between 64k and 1M. Allocate 256 initial buffers - * with 300 entries each => ~2MB of memory - * 5M buffer pool: Not expected often, if at all. 32 initial buffers - * at 1500 entries each => ~1MB of memory + * 100k buffer pool: The vast majority of the expected buffers are expected + * to be less than or equal to 100k. At 30 entries per and + * 8k initial buffers amounts to ~4MB of memory + * NOTE - This used to be 64K with 20 entries, but during + * WHQL NDIS 6.0 Testing (2c_mini6stress) MS does their + * best to send absurd NBL's with ridiculous SGLs, we + * have received 400byte sends contained in SGL's that + * have 28 entries + * 1M buffer pool: Buffers between 64k and 1M. Allocate 256 initial + * buffers with 300 entries each => ~2MB of memory + * 5M buffer pool: Not expected often, if at all. 32 initial buffers + * at 1500 entries each => ~1MB of memory * 10M buffer pool: Not expected at all, except under pathelogical conditions. - * Allocate one at initialization time. - * Note - 10M is the current limit of what we can - * realistically support due to the sahara SGL - * bug described in the SAHARA SGL WORKAROUND below - * - * We will likely adjust the number of pools and/or pool properties over time.. + * Allocate one at initialization time. + * Note - 10M is the current limit of what we can realistically + * support due to the sahara SGL bug described in the + * SAHARA SGL WORKAROUND below. We will likely adjust the + * number of pools and/or pool properties over time. */ #define SXG_NUM_SGL_POOLS 4 -#define INITIALIZE_SGL_POOL_PROPERTIES \ -struct sxg_sgl_pool_properties SxgSglPoolProperties[SXG_NUM_SGL_POOLS] = \ -{ \ - { 102400, 30, 8192, 2048, 16384, 256}, \ - { 1048576, 300, 256, 128, 1024, 16}, \ - { 5252880, 1500, 32, 16, 512, 0}, \ - {10485760, 2700, 2, 4, 32, 0}, \ +#define INITIALIZE_SGL_POOL_PROPERTIES \ +struct sxg_sgl_pool_properties SxgSglPoolProperties[SXG_NUM_SGL_POOLS] =\ +{ \ + { 102400, 30, 8192, 2048, 16384, 256}, \ + { 1048576, 300, 256, 128, 1024, 16}, \ + { 5252880, 1500, 32, 16, 512, 0}, \ + {10485760, 2700, 2, 4, 32, 0}, \ }; extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; -#define SXG_MAX_SGL_BUFFER_SIZE \ +#define SXG_MAX_SGL_BUFFER_SIZE \ SxgSglPoolProperties[SXG_NUM_SGL_POOLS - 1].NBSize /* @@ -847,9 +861,9 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; * We currently workaround this issue by allocating SGL buffers * in 64k blocks and skipping over buffers that straddle the boundary. */ -#define SXG_INVALID_SGL(_SxgSgl) \ - (((_SxgSgl)->PhysicalAddress.LowPart & 0xFFFF0000) != \ - (((_SxgSgl)->PhysicalAddress.LowPart + \ +#define SXG_INVALID_SGL(_SxgSgl) \ + (((_SxgSgl)->PhysicalAddress.LowPart & 0xFFFF0000) != \ + (((_SxgSgl)->PhysicalAddress.LowPart + \ SXG_SGL_SIZE((_SxgSgl)->Pool)) & 0xFFFF0000)) /* @@ -858,17 +872,19 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; * struct sxg_sgl_block_hdr, plus one for padding */ #define SXG_SGL_BLOCK_SIZE 65536 -#define SXG_SGL_ALLOCATION_SIZE(_Pool) SXG_SGL_BLOCK_SIZE + SXG_SGL_SIZE(_Pool) +#define SXG_SGL_ALLOCATION_SIZE(_Pool) \ + SXG_SGL_BLOCK_SIZE + SXG_SGL_SIZE(_Pool) struct sxg_sgl_block_hdr { - ushort Pool; /* Associated SGL pool */ - struct list_entry List; /* struct sxg_scatter_gather blocks */ - dma64_addr_t PhysicalAddress;/* physical address */ + ushort Pool; /* Associated SGL pool */ + /* struct sxg_scatter_gather blocks */ + struct list_entry List; + dma64_addr_t PhysicalAddress;/* physical address */ }; /* * The following definition denotes the maximum block of memory that the - * card can DMA to. It is specified in the call to NdisMRegisterScatterGatherDma. + * card can DMA to.It is specified in the call to NdisMRegisterScatterGatherDma. * For now, use the same value as used in the Slic/Oasis driver, which * is 128M. That should cover any expected MDL that I can think of. */ @@ -876,9 +892,9 @@ struct sxg_sgl_block_hdr { /* Self identifying structure type */ enum SXG_SGL_TYPE { - SXG_SGL_DUMB, /* Dumb NIC SGL */ - SXG_SGL_SLOW, /* Slowpath protocol header - see below */ - SXG_SGL_CHIMNEY /* Chimney offload SGL */ + SXG_SGL_DUMB, /* Dumb NIC SGL */ + SXG_SGL_SLOW, /* Slowpath protocol header - see below */ + SXG_SGL_CHIMNEY /* Chimney offload SGL */ }; /* @@ -912,13 +928,16 @@ struct sxg_scatter_gather { ushort Pool; /* Associated SGL pool */ ushort Entries; /* SGL total entries */ void * adapter; /* Back pointer to adapter */ - struct list_entry FreeList; /* Free struct sxg_scatter_gather blocks */ - struct list_entry AllList; /* All struct sxg_scatter_gather blocks */ + /* Free struct sxg_scatter_gather blocks */ + struct list_entry FreeList; + /* All struct sxg_scatter_gather blocks */ + struct list_entry AllList; dma_addr_t PhysicalAddress;/* physical address */ unsigned char State; /* See SXG_BUFFER state above */ unsigned char CmdIndex; /* Command ring index */ struct sk_buff *DumbPacket; /* Associated Packet */ - u32 Direction; /* For asynchronous completions */ + /* For asynchronous completions */ + u32 Direction; u32 CurOffset; /* Current SGL offset */ u32 SglRef; /* SGL reference count */ struct vlan_hdr VlanTag; /* VLAN tag to be inserted into SGL */ @@ -926,7 +945,10 @@ struct sxg_scatter_gather { struct sxg_x64_sgl Sgl; /* SGL handed to card */ }; -/* Note - the "- 1" is because struct sxg_scatter_gather=>struct sxg_x64_sgl includes 1 SGE.. */ +/* + * Note - the "- 1" is because struct sxg_scatter_gather=>struct sxg_x64_sgl + * includes 1 SGE.. + */ #define SXG_SGL_SIZE(_Pool) \ (sizeof(struct sxg_scatter_gather) + \ ((SxgSglPoolProperties[_Pool].SGEntries - 1) * \ @@ -934,7 +956,8 @@ struct sxg_scatter_gather { #if defined(CONFIG_X86_64) #define SXG_SGL_BUFFER(_SxgSgl) (&_SxgSgl->Sgl) -#define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * sizeof(struct sxg_x64_sge)) +#define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * \ + sizeof(struct sxg_x64_sge)) #define SXG_SGL_BUF_SIZE sizeof(struct sxg_x64_sgl) #elif defined(CONFIG_X86) /* Force NDIS to give us it's own buffer so we can reformat to our own */ @@ -952,7 +975,8 @@ struct sxg_ucode_stats { u32 ERDrops; /* Rcv drops due to ER full */ u32 NBDrops; /* Rcv drops due to out of host buffers */ u32 PQDrops; /* Rcv drops due to PDQ full */ - u32 BFDrops; /* Rcv drops due to bad frame: no link addr match, frlen > max */ + /* Rcv drops due to bad frame: no link addr match, frlen > max */ + u32 BFDrops; u32 UPDrops; /* Rcv drops due to UPFq full */ u32 XNoBufs; /* Xmt drop due to no DRAM Xmit buffer or PxyBuf */ }; diff --git a/drivers/staging/sxg/sxghw.h b/drivers/staging/sxg/sxghw.h index 3ce1947080a..efcbd453e16 100644 --- a/drivers/staging/sxg/sxghw.h +++ b/drivers/staging/sxg/sxghw.h @@ -33,14 +33,18 @@ #define SSID_FUNC_MASK 0xF000 /* Subsystem function mask */ /* Base SSID's */ -#define SSID_SAHARA_PROTO 0x0018 /* 100022 Sahara prototype (XenPak) board */ +/* 100022 Sahara prototype (XenPak) board */ +#define SSID_SAHARA_PROTO 0x0018 #define SSID_SAHARA_FIBER 0x0019 /* 100023 Sahara 1-port fiber board */ #define SSID_SAHARA_COPPER 0x001A /* 100024 Sahara 1-port copper board */ /* Useful SSID macros */ -#define SSID_BASE(ssid) ((ssid) & SSID_BASE_MASK) /* isolate base SSID bits */ -#define SSID_OEM(ssid) ((ssid) & SSID_OEM_MASK) /* isolate SSID OEM bits */ -#define SSID_FUNC(ssid) ((ssid) & SSID_FUNC_MASK) /* isolate SSID function bits */ +/* isolate base SSID bits */ +#define SSID_BASE(ssid) ((ssid) & SSID_BASE_MASK) +/* isolate SSID OEM bits */ +#define SSID_OEM(ssid) ((ssid) & SSID_OEM_MASK) +/* isolate SSID function bits */ +#define SSID_FUNC(ssid) ((ssid) & SSID_FUNC_MASK) /* HW Register Space */ @@ -48,34 +52,34 @@ #pragma pack(push, 1) struct sxg_hw_regs { - u32 Reset; /* Write 0xdead to invoke soft reset */ - u32 Pad1; /* No register defined at offset 4 */ - u32 InterruptMask0; /* Deassert legacy interrupt on function 0 */ - u32 InterruptMask1; /* Deassert legacy interrupt on function 1 */ - u32 UcodeDataLow; /* Store microcode instruction bits 31-0 */ + u32 Reset; /* Write 0xdead to invoke soft reset */ + u32 Pad1; /* No register defined at offset 4 */ + u32 InterruptMask0; /* Deassert legacy interrupt on function 0 */ + u32 InterruptMask1; /* Deassert legacy interrupt on function 1 */ + u32 UcodeDataLow; /* Store microcode instruction bits 31-0 */ u32 UcodeDataMiddle; /* Store microcode instruction bits 63-32 */ - u32 UcodeDataHigh; /* Store microcode instruction bits 95-64 */ - u32 UcodeAddr; /* Store microcode address - See flags below */ - u32 PadTo0x80[24]; /* Pad to Xcv configuration registers */ - u32 MacConfig0; /* 0x80 - AXGMAC Configuration Register 0 */ - u32 MacConfig1; /* 0x84 - AXGMAC Configuration Register 1 */ - u32 MacConfig2; /* 0x88 - AXGMAC Configuration Register 2 */ - u32 MacConfig3; /* 0x8C - AXGMAC Configuration Register 3 */ - u32 MacAddressLow; /* 0x90 - AXGMAC MAC Station Address - octets 1-4 */ - u32 MacAddressHigh; /* 0x94 - AXGMAC MAC Station Address - octets 5-6 */ + u32 UcodeDataHigh; /* Store microcode instruction bits 95-64 */ + u32 UcodeAddr; /* Store microcode address - See flags below */ + u32 PadTo0x80[24]; /* Pad to Xcv configuration registers */ + u32 MacConfig0; /* 0x80 - AXGMAC Configuration Register 0 */ + u32 MacConfig1; /* 0x84 - AXGMAC Configuration Register 1 */ + u32 MacConfig2; /* 0x88 - AXGMAC Configuration Register 2 */ + u32 MacConfig3; /* 0x8C - AXGMAC Configuration Register 3 */ + u32 MacAddressLow; /* 0x90 - AXGMAC MAC Station Address - octets 1-4 */ + u32 MacAddressHigh; /* 0x94 - AXGMAC MAC Station Address - octets 5-6 */ u32 MacReserved1[2]; /* 0x98 - AXGMAC Reserved */ - u32 MacMaxFrameLen; /* 0xA0 - AXGMAC Maximum Frame Length */ + u32 MacMaxFrameLen; /* 0xA0 - AXGMAC Maximum Frame Length */ u32 MacReserved2[2]; /* 0xA4 - AXGMAC Reserved */ - u32 MacRevision; /* 0xAC - AXGMAC Revision Level Register */ + u32 MacRevision; /* 0xAC - AXGMAC Revision Level Register */ u32 MacReserved3[4]; /* 0xB0 - AXGMAC Reserved */ - u32 MacAmiimCmd; /* 0xC0 - AXGMAC AMIIM Command Register */ - u32 MacAmiimField; /* 0xC4 - AXGMAC AMIIM Field Register */ - u32 MacAmiimConfig; /* 0xC8 - AXGMAC AMIIM Configuration Register */ - u32 MacAmiimLink; /* 0xCC - AXGMAC AMIIM Link Fail Vector Register */ + u32 MacAmiimCmd; /* 0xC0 - AXGMAC AMIIM Command Register */ + u32 MacAmiimField; /* 0xC4 - AXGMAC AMIIM Field Register */ + u32 MacAmiimConfig; /* 0xC8 - AXGMAC AMIIM Configuration Register */ + u32 MacAmiimLink; /* 0xCC - AXGMAC AMIIM Link Fail Vector Register */ u32 MacAmiimIndicator; /* 0xD0 - AXGMAC AMIIM Indicator Registor */ u32 PadTo0x100[11]; /* 0xD4 - 0x100 - Pad */ - u32 XmtConfig; /* 0x100 - Transmit Configuration Register */ - u32 RcvConfig; /* 0x104 - Receive Configuration Register 1 */ + u32 XmtConfig; /* 0x100 - Transmit Configuration Register */ + u32 RcvConfig; /* 0x104 - Receive Configuration Register 1 */ u32 LinkAddress0Low; /* 0x108 - Link address 0 */ u32 LinkAddress0High; /* 0x10C - Link address 0 */ u32 LinkAddress1Low; /* 0x110 - Link address 1 */ @@ -90,10 +94,10 @@ struct sxg_hw_regs { u32 ClearStats; /* 0x17C - Clear Stats */ u32 XmtErrorsLow; /* 0x180 - Transmit stats - errors */ u32 XmtErrorsHigh; /* 0x184 - Transmit stats - errors */ - u32 XmtFramesLow; /* 0x188 - Transmit stats - frame count */ - u32 XmtFramesHigh; /* 0x18C - Transmit stats - frame count */ - u32 XmtBytesLow; /* 0x190 - Transmit stats - byte count */ - u32 XmtBytesHigh; /* 0x194 - Transmit stats - byte count */ + u32 XmtFramesLow; /* 0x188 - Transmit stats - frame count */ + u32 XmtFramesHigh; /* 0x18C - Transmit stats - frame count */ + u32 XmtBytesLow; /* 0x190 - Transmit stats - byte count */ + u32 XmtBytesHigh; /* 0x194 - Transmit stats - byte count */ u32 XmtTcpSegmentsLow; /* 0x198 - Transmit stats - TCP segments */ u32 XmtTcpSegmentsHigh; /* 0x19C - Transmit stats - TCP segments */ u32 XmtTcpBytesLow; /* 0x1A0 - Transmit stats - TCP bytes */ @@ -119,11 +123,12 @@ struct sxg_hw_regs { #define MICROCODE_ADDRESS_GO 0x80000000 /* Start microcode */ #define MICROCODE_ADDRESS_WRITE 0x40000000 /* Store microcode */ #define MICROCODE_ADDRESS_READ 0x20000000 /* Read microcode */ -#define MICROCODE_ADDRESS_PARITY 0x10000000 /* Parity error detected */ +#define MICROCODE_ADDRESS_PARITY 0x10000000/* Parity error detected */ #define MICROCODE_ADDRESS_MASK 0x00001FFF /* Address bits */ /* Link Address Registers */ -#define LINK_ADDRESS_ENABLE 0x80000000 /* Applied to link address high */ +/* Applied to link address high */ +#define LINK_ADDRESS_ENABLE 0x80000000 /* Microsoft register space size */ #define SXG_UCODEREG_MEMSIZE 0x40000 /* 256k */ @@ -135,110 +140,153 @@ struct sxg_hw_regs { */ #define SXG_ADDRESS_CODE_SHIFT 2 /* Base command code */ #define SXG_ADDRESS_CODE_MASK 0x0000003C -#define SXG_ADDRESS_EXCODE_SHIFT 6 /* Extended (or sub) command code */ +/* Extended (or sub) command code */ +#define SXG_ADDRESS_EXCODE_SHIFT 6 #define SXG_ADDRESS_EXCODE_MASK 0x00001FC0 #define SXG_ADDRESS_CPUID_SHIFT 13 /* CPU */ #define SXG_ADDRESS_CPUID_MASK 0x0003E000 -#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 /* Used to sanity check UCODE_REGS structure */ +/* Used to sanity check UCODE_REGS structure */ +#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 /* Sahara receive sequencer status values */ -#define SXG_RCV_STATUS_ATTN 0x80000000 /* Attention */ -#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 /* Transport mask */ -#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 /* Transport error */ -#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 /* Transport cksum error */ -#define SXG_RCV_STATUS_TRANSPORT_UFLOW 0x22000000 /* Transport underflow */ -#define SXG_RCV_STATUS_TRANSPORT_HDRLEN 0x20000000 /* Transport header length */ -#define SXG_RCV_STATUS_TRANSPORT_FLAGS 0x10000000 /* Transport flags detected */ -#define SXG_RCV_STATUS_TRANSPORT_OPTS 0x08000000 /* Transport options detected */ -#define SXG_RCV_STATUS_TRANSPORT_SESS_MASK 0x07000000 /* Transport DDP */ -#define SXG_RCV_STATUS_TRANSPORT_DDP 0x06000000 /* Transport DDP */ -#define SXG_RCV_STATUS_TRANSPORT_iSCSI 0x05000000 /* Transport iSCSI */ -#define SXG_RCV_STATUS_TRANSPORT_NFS 0x04000000 /* Transport NFS */ -#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 /* Transport FTP */ -#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 /* Transport HTTP */ -#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 /* Transport SMB */ -#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 /* Network mask */ -#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 /* Network error */ -#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 /* Network cksum error */ -#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 /* Network underflow error */ -#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 /* Network header length */ -#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 /* Network overflow detected */ -#define SXG_RCV_STATUS_NETWORK_MCAST 0x00200000 /* Network multicast detected */ -#define SXG_RCV_STATUS_NETWORK_OPTIONS 0x00100000 /* Network options detected */ -#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 /* Network offset detected */ -#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 /* Network fragment detected */ -#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 /* Network transport type mask */ -#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 /* UDP */ -#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 /* TCP */ -#define SXG_RCV_STATUS_IPONLY 0x00008000 /* IP-only not TCP */ -#define SXG_RCV_STATUS_PKT_PRI 0x00006000 /* Receive priority */ -#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 /* Receive priority shift */ -#define SXG_RCV_STATUS_PARITY 0x00001000 /* MAC Receive RAM parity error */ -#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 /* Link address detection mask */ -#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 /* Link address D */ -#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 /* Link address C */ -#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 /* Link address B */ -#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 /* Link address A */ -#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 /* Link address broadcast */ -#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 /* Link address multicast */ -#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 /* Link control multicast */ -#define SXG_RCV_STATUS_LINK_MASK 0x000000FF /* Link status mask */ -#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 /* Link error */ -#define SXG_RCV_STATUS_LINK_MASK 0x000000FF /* Link status mask */ -#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 /* RcvMacQ parity error */ -#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 /* Data early */ -#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 /* Buffer overflow */ -#define SXG_RCV_STATUS_LINK_CODE 0x00000084 /* Link code error */ -#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 /* Dribble nibble */ -#define SXG_RCV_STATUS_LINK_CRC 0x00000082 /* CRC error */ -#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 /* Link overflow */ -#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 /* Link underflow */ -#define SXG_RCV_STATUS_LINK_8023 0x00000020 /* 802.3 */ -#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 /* Snap */ -#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 /* VLAN */ -#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 /* Network type mask */ -#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 /* Control packet */ -#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 /* IPv6 packet */ -#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 /* IPv4 packet */ +#define SXG_RCV_STATUS_ATTN 0x80000000 /* Attention */ +#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 /* Transport mask */ +#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 /* Transport error */ +/* Transport cksum error */ +#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 +/* Transport underflow */ +#define SXG_RCV_STATUS_TRANSPORT_UFLOW 0x22000000 + /* Transport header length */ +#define SXG_RCV_STATUS_TRANSPORT_HDRLEN 0x20000000 +/* Transport flags detected */ +#define SXG_RCV_STATUS_TRANSPORT_FLAGS 0x10000000 + /* Transport options detected */ +#define SXG_RCV_STATUS_TRANSPORT_OPTS 0x08000000 +#define SXG_RCV_STATUS_TRANSPORT_SESS_MASK 0x07000000 /* Transport DDP */ +#define SXG_RCV_STATUS_TRANSPORT_DDP 0x06000000 /* Transport DDP */ +#define SXG_RCV_STATUS_TRANSPORT_iSCSI 0x05000000 /* Transport iSCSI */ +#define SXG_RCV_STATUS_TRANSPORT_NFS 0x04000000 /* Transport NFS */ +#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 /* Transport FTP */ +#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 /* Transport HTTP */ +#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 /* Transport SMB */ +#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 /* Network mask */ +#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 /* Network error */ +/* Network cksum error */ +#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 +/* Network underflow error */ +#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 + /* Network header length */ +#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 + /* Network overflow detected */ +#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 +/* Network multicast detected */ +#define SXG_RCV_STATUS_NETWORK_MCAST 0x00200000 +/* Network options detected */ +#define SXG_RCV_STATUS_NETWORK_OPTIONS 0x00100000 +/* Network offset detected */ +#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 +/* Network fragment detected */ +#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 +/* Network transport type mask */ +#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 +#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 /* UDP */ +#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 /* TCP */ +#define SXG_RCV_STATUS_IPONLY 0x00008000 /* IP-only not TCP */ +/* Receive priority */ +#define SXG_RCV_STATUS_PKT_PRI 0x00006000 +/* Receive priority shift */ +#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 +/* MAC Receive RAM parity error */ +#define SXG_RCV_STATUS_PARITY 0x00001000 +/* Link address detection mask */ +#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 + +#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 /* Link address D */ +#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 /* Link address C */ +#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 /* Link address B */ +#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 /* Link address A */ +/* Link address broadcast */ +#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 + /* Link address multicast */ +#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 +/* Link control multicast */ +#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 +/* Link status mask */ +#define SXG_RCV_STATUS_LINK_MASK 0x000000FF +#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 /* Link error */ +/* Link status mask */ +#define SXG_RCV_STATUS_LINK_MASK 0x000000FF +/* RcvMacQ parity error */ +#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 +#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 /* Data early */ +#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 /* Buffer overflow */ +#define SXG_RCV_STATUS_LINK_CODE 0x00000084 /* Link code error */ +#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 /* Dribble nibble */ +#define SXG_RCV_STATUS_LINK_CRC 0x00000082 /* CRC error */ +#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 /* Link overflow */ +#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 /* Link underflow */ +#define SXG_RCV_STATUS_LINK_8023 0x00000020 /* 802.3 */ +#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 /* Snap */ +#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 /* VLAN */ +/* Network type mask */ +#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 +#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 /* Control packet */ +#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 /* IPv6 packet */ +#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 /* IPv4 packet */ /* Sahara receive and transmit configuration registers */ -#define RCV_CONFIG_RESET 0x80000000 /* RcvConfig register reset */ -#define RCV_CONFIG_ENABLE 0x40000000 /* Enable the receive logic */ -#define RCV_CONFIG_ENPARSE 0x20000000 /* Enable the receive parser */ -#define RCV_CONFIG_SOCKET 0x10000000 /* Enable the socket detector */ -#define RCV_CONFIG_RCVBAD 0x08000000 /* Receive all bad frames */ -#define RCV_CONFIG_CONTROL 0x04000000 /* Receive all control frames */ -#define RCV_CONFIG_RCVPAUSE 0x02000000 /* Enable pause transmit when attn */ -#define RCV_CONFIG_TZIPV6 0x01000000 /* Include TCP port w/ IPv6 toeplitz */ -#define RCV_CONFIG_TZIPV4 0x00800000 /* Include TCP port w/ IPv4 toeplitz */ -#define RCV_CONFIG_FLUSH 0x00400000 /* Flush buffers */ -#define RCV_CONFIG_PRIORITY_MASK 0x00300000 /* Priority level */ -#define RCV_CONFIG_CONN_MASK 0x000C0000 /* Number of connections */ -#define RCV_CONFIG_CONN_4K 0x00000000 /* 4k connections */ -#define RCV_CONFIG_CONN_2K 0x00040000 /* 2k connections */ -#define RCV_CONFIG_CONN_1K 0x00080000 /* 1k connections */ -#define RCV_CONFIG_CONN_512 0x000C0000 /* 512 connections */ -#define RCV_CONFIG_HASH_MASK 0x00030000 /* Hash depth */ -#define RCV_CONFIG_HASH_8 0x00000000 /* Hash depth 8 */ -#define RCV_CONFIG_HASH_16 0x00010000 /* Hash depth 16 */ -#define RCV_CONFIG_HASH_4 0x00020000 /* Hash depth 4 */ -#define RCV_CONFIG_HASH_2 0x00030000 /* Hash depth 2 */ -#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 /* Buffer length bits 15:4. ie multiple of 16. */ -#define RCV_CONFIG_SKT_DIS 0x00000008 /* Disable socket detection on attn */ +/* RcvConfig register reset */ +#define RCV_CONFIG_RESET 0x80000000 +/* Enable the receive logic */ +#define RCV_CONFIG_ENABLE 0x40000000 +/* Enable the receive parser */ +#define RCV_CONFIG_ENPARSE 0x20000000 +/* Enable the socket detector */ +#define RCV_CONFIG_SOCKET 0x10000000 +#define RCV_CONFIG_RCVBAD 0x08000000 /* Receive all bad frames */ +/* Receive all control frames */ +#define RCV_CONFIG_CONTROL 0x04000000 +/* Enable pause transmit when attn */ +#define RCV_CONFIG_RCVPAUSE 0x02000000 +/* Include TCP port w/ IPv6 toeplitz */ +#define RCV_CONFIG_TZIPV6 0x01000000 +/* Include TCP port w/ IPv4 toeplitz */ +#define RCV_CONFIG_TZIPV4 0x00800000 +#define RCV_CONFIG_FLUSH 0x00400000 /* Flush buffers */ +#define RCV_CONFIG_PRIORITY_MASK 0x00300000 /* Priority level */ +#define RCV_CONFIG_CONN_MASK 0x000C0000 /* Number of connections */ +#define RCV_CONFIG_CONN_4K 0x00000000 /* 4k connections */ +#define RCV_CONFIG_CONN_2K 0x00040000 /* 2k connections */ +#define RCV_CONFIG_CONN_1K 0x00080000 /* 1k connections */ +#define RCV_CONFIG_CONN_512 0x000C0000 /* 512 connections */ +#define RCV_CONFIG_HASH_MASK 0x00030000 /* Hash depth */ +#define RCV_CONFIG_HASH_8 0x00000000 /* Hash depth 8 */ +#define RCV_CONFIG_HASH_16 0x00010000 /* Hash depth 16 */ +#define RCV_CONFIG_HASH_4 0x00020000 /* Hash depth 4 */ +#define RCV_CONFIG_HASH_2 0x00030000 /* Hash depth 2 */ +/* Buffer length bits 15:4. ie multiple of 16. */ +#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 +/* Disable socket detection on attn */ +#define RCV_CONFIG_SKT_DIS 0x00000008 /* * Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size. * We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC, * and round up to nearest 16 byte boundary */ -#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK) - -#define XMT_CONFIG_RESET 0x80000000 /* XmtConfig register reset */ -#define XMT_CONFIG_ENABLE 0x40000000 /* Enable transmit logic */ -#define XMT_CONFIG_MAC_PARITY 0x20000000 /* Inhibit MAC RAM parity error */ -#define XMT_CONFIG_BUF_PARITY 0x10000000 /* Inhibit D2F buffer parity error */ -#define XMT_CONFIG_MEM_PARITY 0x08000000 /* Inhibit 1T SRAM parity error */ -#define XMT_CONFIG_INVERT_PARITY 0x04000000 /* Invert MAC RAM parity */ -#define XMT_CONFIG_INITIAL_IPID 0x0000FFFF /* Initial IPID */ +#define RCV_CONFIG_BUFSIZE(_MaxFrame) \ + ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK) + +/* XmtConfig register reset */ +#define XMT_CONFIG_RESET 0x80000000 +#define XMT_CONFIG_ENABLE 0x40000000 /* Enable transmit logic */ +/* Inhibit MAC RAM parity error */ +#define XMT_CONFIG_MAC_PARITY 0x20000000 +/* Inhibit D2F buffer parity error */ +#define XMT_CONFIG_BUF_PARITY 0x10000000 +/* Inhibit 1T SRAM parity error */ +#define XMT_CONFIG_MEM_PARITY 0x08000000 +#define XMT_CONFIG_INVERT_PARITY 0x04000000 /* Invert MAC RAM parity */ +#define XMT_CONFIG_INITIAL_IPID 0x0000FFFF /* Initial IPID */ /* * A-XGMAC Registers - Occupy 0x80 - 0xD4 of the struct sxg_hw_regs @@ -246,68 +294,93 @@ struct sxg_hw_regs { * Full register descriptions can be found in axgmac.pdf */ /* A-XGMAC Configuration Register 0 */ -#define AXGMAC_CFG0_SUB_RESET 0x80000000 /* Sub module reset */ -#define AXGMAC_CFG0_RCNTRL_RESET 0x00400000 /* Receive control reset */ -#define AXGMAC_CFG0_RFUNC_RESET 0x00200000 /* Receive function reset */ -#define AXGMAC_CFG0_TCNTRL_RESET 0x00040000 /* Transmit control reset */ -#define AXGMAC_CFG0_TFUNC_RESET 0x00020000 /* Transmit function reset */ -#define AXGMAC_CFG0_MII_RESET 0x00010000 /* MII Management reset */ +#define AXGMAC_CFG0_SUB_RESET 0x80000000 /* Sub module reset */ +#define AXGMAC_CFG0_RCNTRL_RESET 0x00400000 /* Receive control reset */ +#define AXGMAC_CFG0_RFUNC_RESET 0x00200000 /* Receive function reset */ +#define AXGMAC_CFG0_TCNTRL_RESET 0x00040000 /* Transmit control reset */ +#define AXGMAC_CFG0_TFUNC_RESET 0x00020000 /* Transmit function reset */ +#define AXGMAC_CFG0_MII_RESET 0x00010000 /* MII Management reset */ /* A-XGMAC Configuration Register 1 */ -#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 /* Allow the sending of Pause frames */ -#define AXGMAC_CFG1_XMT_EN 0x40000000 /* Enable transmit */ -#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 /* Allow the detection of Pause frames */ -#define AXGMAC_CFG1_RCV_EN 0x10000000 /* Enable receive */ -#define AXGMAC_CFG1_XMT_STATE 0x04000000 /* Current transmit state - READ ONLY */ -#define AXGMAC_CFG1_RCV_STATE 0x01000000 /* Current receive state - READ ONLY */ -#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 /* Only pause for 64 slot on XOFF */ -#define AXGMAC_CFG1_XMG_FCS1 0x00000400 /* Delay transmit FCS 1 4-byte word */ -#define AXGMAC_CFG1_XMG_FCS2 0x00000800 /* Delay transmit FCS 2 4-byte words */ -#define AXGMAC_CFG1_XMG_FCS3 0x00000C00 /* Delay transmit FCS 3 4-byte words */ -#define AXGMAC_CFG1_RCV_FCS1 0x00000100 /* Delay receive FCS 1 4-byte word */ -#define AXGMAC_CFG1_RCV_FCS2 0x00000200 /* Delay receive FCS 2 4-byte words */ -#define AXGMAC_CFG1_RCV_FCS3 0x00000300 /* Delay receive FCS 3 4-byte words */ -#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 /* Per-packet override enable */ -#define AXGMAC_CFG1_SWAP 0x00000040 /* Byte swap enable */ -#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 /* ASSERT srdrpfrm on short frame (<64) */ -#define AXGMAC_CFG1_RCV_STRICT 0x00000010 /* RCV only 802.3AE when CLEAR */ -#define AXGMAC_CFG1_CHECK_LEN 0x00000008 /* Verify frame length */ -#define AXGMAC_CFG1_GEN_FCS 0x00000004 /* Generate FCS */ -#define AXGMAC_CFG1_PAD_MASK 0x00000003 /* Mask for pad bits */ -#define AXGMAC_CFG1_PAD_64 0x00000001 /* Pad frames to 64 bytes */ -#define AXGMAC_CFG1_PAD_VLAN 0x00000002 /* Detect VLAN and pad to 68 bytes */ -#define AXGMAC_CFG1_PAD_68 0x00000003 /* Pad to 68 bytes */ +/* Allow the sending of Pause frames */ +#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 +#define AXGMAC_CFG1_XMT_EN 0x40000000 /* Enable transmit */ +/* Allow the detection of Pause frames */ +#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 +#define AXGMAC_CFG1_RCV_EN 0x10000000 /* Enable receive */ +/* Current transmit state - READ ONLY */ +#define AXGMAC_CFG1_XMT_STATE 0x04000000 +/* Current receive state - READ ONLY */ +#define AXGMAC_CFG1_RCV_STATE 0x01000000 +/* Only pause for 64 slot on XOFF */ +#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 +/* Delay transmit FCS 1 4-byte word */ +#define AXGMAC_CFG1_XMG_FCS1 0x00000400 +/* Delay transmit FCS 2 4-byte words */ +#define AXGMAC_CFG1_XMG_FCS2 0x00000800 +/* Delay transmit FCS 3 4-byte words */ +#define AXGMAC_CFG1_XMG_FCS3 0x00000C00 +/* Delay receive FCS 1 4-byte word */ +#define AXGMAC_CFG1_RCV_FCS1 0x00000100 +/* Delay receive FCS 2 4-byte words */ +#define AXGMAC_CFG1_RCV_FCS2 0x00000200 +/* Delay receive FCS 3 4-byte words */ +#define AXGMAC_CFG1_RCV_FCS3 0x00000300 +/* Per-packet override enable */ +#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 +#define AXGMAC_CFG1_SWAP 0x00000040 /* Byte swap enable */ +/* ASSERT srdrpfrm on short frame (<64) */ +#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 +/* RCV only 802.3AE when CLEAR */ +#define AXGMAC_CFG1_RCV_STRICT 0x00000010 +#define AXGMAC_CFG1_CHECK_LEN 0x00000008 /* Verify frame length */ +#define AXGMAC_CFG1_GEN_FCS 0x00000004 /* Generate FCS */ +#define AXGMAC_CFG1_PAD_MASK 0x00000003 /* Mask for pad bits */ +#define AXGMAC_CFG1_PAD_64 0x00000001 /* Pad frames to 64 bytes */ +/* Detect VLAN and pad to 68 bytes */ +#define AXGMAC_CFG1_PAD_VLAN 0x00000002 +#define AXGMAC_CFG1_PAD_68 0x00000003 /* Pad to 68 bytes */ /* A-XGMAC Configuration Register 2 */ -#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 /* Generate single pause frame (test) */ -#define AXGMAC_CFG2_LF_MANUAL 0x08000000 /* Manual link fault sequence */ -#define AXGMAC_CFG2_LF_AUTO 0x04000000 /* Auto link fault sequence */ -#define AXGMAC_CFG2_LF_REMOTE 0x02000000 /* Remote link fault (READ ONLY) */ -#define AXGMAC_CFG2_LF_LOCAL 0x01000000 /* Local link fault (READ ONLY) */ -#define AXGMAC_CFG2_IPG_MASK 0x001F0000 /* Inter packet gap */ +/* Generate single pause frame (test) */ +#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 +/* Manual link fault sequence */ +#define AXGMAC_CFG2_LF_MANUAL 0x08000000 +/* Auto link fault sequence */ +#define AXGMAC_CFG2_LF_AUTO 0x04000000 +/* Remote link fault (READ ONLY) */ +#define AXGMAC_CFG2_LF_REMOTE 0x02000000 +/* Local link fault (READ ONLY) */ +#define AXGMAC_CFG2_LF_LOCAL 0x01000000 +#define AXGMAC_CFG2_IPG_MASK 0x001F0000 /* Inter packet gap */ #define AXGMAC_CFG2_IPG_SHIFT 16 -#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 /* Pause transmit module */ -#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 /* Enable IPG extension algorithm */ -#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F /* IPG extension */ +#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 /* Pause transmit module */ +/* Enable IPG extension algorithm */ +#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 +#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F /* IPG extension */ /* A-XGMAC Configuration Register 3 */ -#define AXGMAC_CFG3_RCV_DROP 0xFFFF0000 /* Receive frame drop filter */ -#define AXGMAC_CFG3_RCV_DONT_CARE 0x0000FFFF /* Receive frame don't care filter */ +/* Receive frame drop filter */ +#define AXGMAC_CFG3_RCV_DROP 0xFFFF0000 +/* Receive frame don't care filter */ +#define AXGMAC_CFG3_RCV_DONT_CARE 0x0000FFFF /* A-XGMAC Station Address Register - Octets 1-4 */ -#define AXGMAC_SARLOW_OCTET_ONE 0xFF000000 /* First octet */ -#define AXGMAC_SARLOW_OCTET_TWO 0x00FF0000 /* Second octet */ -#define AXGMAC_SARLOW_OCTET_THREE 0x0000FF00 /* Third octet */ -#define AXGMAC_SARLOW_OCTET_FOUR 0x000000FF /* Fourth octet */ +#define AXGMAC_SARLOW_OCTET_ONE 0xFF000000 /* First octet */ +#define AXGMAC_SARLOW_OCTET_TWO 0x00FF0000 /* Second octet */ +#define AXGMAC_SARLOW_OCTET_THREE 0x0000FF00 /* Third octet */ +#define AXGMAC_SARLOW_OCTET_FOUR 0x000000FF /* Fourth octet */ /* A-XGMAC Station Address Register - Octets 5-6 */ -#define AXGMAC_SARHIGH_OCTET_FIVE 0xFF000000 /* Fifth octet */ -#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 /* Sixth octet */ +#define AXGMAC_SARHIGH_OCTET_FIVE 0xFF000000 /* Fifth octet */ +#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 /* Sixth octet */ /* A-XGMAC Maximum frame length register */ -#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 /* Maximum transmit frame length */ +/* Maximum transmit frame length */ +#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 #define AXGMAC_MAXFRAME_XMT_SHIFT 16 -#define AXGMAC_MAXFRAME_RCV 0x0000FFFF /* Maximum receive frame length */ +/* Maximum receive frame length */ +#define AXGMAC_MAXFRAME_RCV 0x0000FFFF /* * This register doesn't need to be written for standard MTU. * For jumbo, I'll just statically define the value here. This @@ -323,25 +396,32 @@ struct sxg_hw_regs { /* A-XGMAC AMIIM Command Register */ #define AXGMAC_AMIIM_CMD_START 0x00000008 /* Command start */ #define AXGMAC_AMIIM_CMD_MASK 0x00000007 /* Command */ -#define AXGMAC_AMIIM_CMD_LEGACY_WRITE 1 /* 10/100/1000 Mbps Phy Write */ -#define AXGMAC_AMIIM_CMD_LEGACY_READ 2 /* 10/100/1000 Mbps Phy Read */ +/* 10/100/1000 Mbps Phy Write */ +#define AXGMAC_AMIIM_CMD_LEGACY_WRITE 1 +/* 10/100/1000 Mbps Phy Read */ +#define AXGMAC_AMIIM_CMD_LEGACY_READ 2 #define AXGMAC_AMIIM_CMD_MONITOR_SINGLE 3 /* Monitor single PHY */ -#define AXGMAC_AMIIM_CMD_MONITOR_MULTIPLE 4 /* Monitor multiple contiguous PHYs */ -#define AXGMAC_AMIIM_CMD_10G_OPERATION 5 /* Present AMIIM Field Reg */ -#define AXGMAC_AMIIM_CMD_CLEAR_LINK_FAIL 6 /* Clear Link Fail Bit in MIIM */ +/* Monitor multiple contiguous PHYs */ +#define AXGMAC_AMIIM_CMD_MONITOR_MULTIPLE 4 +/* Present AMIIM Field Reg */ +#define AXGMAC_AMIIM_CMD_10G_OPERATION 5 +/* Clear Link Fail Bit in MIIM */ +#define AXGMAC_AMIIM_CMD_CLEAR_LINK_FAIL 6 /* A-XGMAC AMIIM Field Register */ #define AXGMAC_AMIIM_FIELD_ST 0xC0000000 /* 2-bit ST field */ #define AXGMAC_AMIIM_FIELD_ST_SHIFT 30 #define AXGMAC_AMIIM_FIELD_OP 0x30000000 /* 2-bit OP field */ #define AXGMAC_AMIIM_FIELD_OP_SHIFT 28 -#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 /* Port address field (hstphyadx in spec) */ +/* Port address field (hstphyadx in spec) */ +#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 #define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23 -#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 /* Device address field (hstregadx in spec) */ +/* Device address field (hstregadx in spec) */ +#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 #define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18 #define AXGMAC_AMIIM_FIELD_TA 0x00030000 /* 2-bit TA field */ #define AXGMAC_AMIIM_FIELD_TA_SHIFT 16 -#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field +#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF /* Data field */ /* Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register */ #define MIIM_OP_ADDR 0 /* MIIM Address set operation */ @@ -349,49 +429,76 @@ struct sxg_hw_regs { #define MIIM_OP_READ 2 /* MIIM Read register operation */ #define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) -/* Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register */ +/* + * Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM + * Field Register + */ #define MIIM_PORT_NUM 1 /* All Sahara MIIM modules use port 1 */ -/* Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register */ -#define MIIM_DEV_PHY_PMA 1 /* PHY PMA/PMD module MIIM device number */ -#define MIIM_DEV_PHY_PCS 3 /* PHY PCS module MIIM device number */ -#define MIIM_DEV_PHY_XS 4 /* PHY XS module MIIM device number */ +/* + * Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM + * Field Register + */ +/* PHY PMA/PMD module MIIM device number */ +#define MIIM_DEV_PHY_PMA 1 +/* PHY PCS module MIIM device number */ +#define MIIM_DEV_PHY_PCS 3 +/* PHY XS module MIIM device number */ +#define MIIM_DEV_PHY_XS 4 #define MIIM_DEV_XGXS 5 /* XGXS MIIM device number */ -/* Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register */ +/* + * Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field + * Register + */ #define MIIM_TA_10GB 2 /* set to 2 for 10 GB operation */ /* A-XGMAC AMIIM Configuration Register */ -#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 /* Bypass preamble of mngmt frame */ -#define AXGMAC_AMIIM_CFG_HALF_CLOCK 0x0000007F /* half-clock duration of MDC output */ +/* Bypass preamble of mngmt frame */ +#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 +/* half-clock duration of MDC output */ +#define AXGMAC_AMIIM_CFG_HALF_CLOCK 0x0000007F /* A-XGMAC AMIIM Indicator Register */ -#define AXGMAC_AMIIM_INDC_LINK 0x00000010 /* Link status from legacy PHY or MMD */ -#define AXGMAC_AMIIM_INDC_MPHY 0x00000008 /* Multiple phy operation in progress */ -#define AXGMAC_AMIIM_INDC_SPHY 0x00000004 /* Single phy operation in progress */ -#define AXGMAC_AMIIM_INDC_MON 0x00000002 /* Single or multiple monitor cmd */ -#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 /* Set until cmd operation complete */ +/* Link status from legacy PHY or MMD */ +#define AXGMAC_AMIIM_INDC_LINK 0x00000010 +/* Multiple phy operation in progress */ +#define AXGMAC_AMIIM_INDC_MPHY 0x00000008 +/* Single phy operation in progress */ +#define AXGMAC_AMIIM_INDC_SPHY 0x00000004 +/* Single or multiple monitor cmd */ +#define AXGMAC_AMIIM_INDC_MON 0x00000002 +/* Set until cmd operation complete */ +#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 /* Link Status and Control Register */ #define LS_PHY_CLR_RESET 0x80000000 /* Clear reset signal to PHY */ #define LS_SERDES_POWER_DOWN 0x40000000 /* Power down the Sahara Serdes */ #define LS_XGXS_ENABLE 0x20000000 /* Enable the XAUI XGXS logic */ -#define LS_XGXS_CTL 0x10000000 /* Hold XAUI XGXS logic reset until Serdes is up */ -#define LS_SERDES_DOWN 0x08000000 /* When 0, XAUI Serdes is up and initialization is complete */ -#define LS_TRACE_DOWN 0x04000000 /* When 0, Trace Serdes is up and initialization is complete */ -#define LS_PHY_CLK_25MHZ 0x02000000 /* Set PHY clock to 25 MHz (else 156.125 MHz) */ +/* Hold XAUI XGXS logic reset until Serdes is up */ +#define LS_XGXS_CTL 0x10000000 +/* When 0, XAUI Serdes is up and initialization is complete */ +#define LS_SERDES_DOWN 0x08000000 +/* When 0, Trace Serdes is up and initialization is complete */ +#define LS_TRACE_DOWN 0x04000000 +/* Set PHY clock to 25 MHz (else 156.125 MHz) */ +#define LS_PHY_CLK_25MHZ 0x02000000 #define LS_PHY_CLK_EN 0x01000000 /* Enable clock to PHY */ #define LS_XAUI_LINK_UP 0x00000010 /* XAUI link is up */ -#define LS_XAUI_LINK_CHNG 0x00000008 /* XAUI link status has changed */ +/* XAUI link status has changed */ +#define LS_XAUI_LINK_CHNG 0x00000008 #define LS_LINK_ALARM 0x00000004 /* Link alarm pin */ -#define LS_ATTN_CTRL_MASK 0x00000003 /* Mask link attention control bits */ +/* Mask link attention control bits */ +#define LS_ATTN_CTRL_MASK 0x00000003 #define LS_ATTN_ALARM 0x00000000 /* 00 => Attn on link alarm */ -#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 /* 01 => Attn on link alarm or status change */ -#define LS_ATTN_STAT_CHNG 0x00000002 /* 10 => Attn on link status change */ +/* 01 => Attn on link alarm or status change */ +#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 +/* 10 => Attn on link status change */ +#define LS_ATTN_STAT_CHNG 0x00000002 #define LS_ATTN_NONE 0x00000003 /* 11 => no Attn */ /* Link Address High Registers */ -#define LINK_ADDR_ENABLE 0x80000000 /* Enable this link address */ +#define LINK_ADDR_ENABLE 0x80000000 /* Enable this link address */ /* @@ -404,12 +511,12 @@ struct sxg_hw_regs { #define XGXS_ADDRESS_STATUS1 0x0001 /* XS Status 1 */ #define XGXS_ADDRESS_DEVID_LOW 0x0002 /* XS Device ID (low) */ #define XGXS_ADDRESS_DEVID_HIGH 0x0003 /* XS Device ID (high) */ -#define XGXS_ADDRESS_SPEED 0x0004 /* XS Speed ability */ +#define XGXS_ADDRESS_SPEED 0x0004 /* XS Speed ability */ #define XGXS_ADDRESS_DEV_LOW 0x0005 /* XS Devices in package */ #define XGXS_ADDRESS_DEV_HIGH 0x0006 /* XS Devices in package */ #define XGXS_ADDRESS_STATUS2 0x0008 /* XS Status 2 */ #define XGXS_ADDRESS_PKGID_lOW 0x000E /* XS Package Identifier */ -#define XGXS_ADDRESS_PKGID_HIGH 0x000F /* XS Package Identifier */ +#define XGXS_ADDRESS_PKGID_HIGH 0x000F /* XS Package Identifier */ #define XGXS_ADDRESS_LANE_STATUS 0x0018 /* 10G XGXS Lane Status */ #define XGXS_ADDRESS_TEST_CTRL 0x0019 /* 10G XGXS Test Control */ #define XGXS_ADDRESS_RESET_LO1 0x8000 /* Vendor-Specific Reset Lo 1 */ @@ -423,7 +530,8 @@ struct sxg_hw_regs { #define XGXS_CONTROL1_SPEED1 0x2000 /* 0 = unspecified, 1 = 10Gb+ */ #define XGXS_CONTROL1_LOWPOWER 0x0400 /* 1 = Low power mode */ #define XGXS_CONTROL1_SPEED2 0x0040 /* Same as SPEED1 (?) */ -#define XGXS_CONTROL1_SPEED 0x003C /* Everything reserved except zero (?) */ +/* Everything reserved except zero (?) */ +#define XGXS_CONTROL1_SPEED 0x003C /* XS Status 1 register bit definitions */ #define XGXS_STATUS1_FAULT 0x0080 /* Fault detected */ @@ -439,7 +547,7 @@ struct sxg_hw_regs { #define XGXS_DEVICES_PCS 0x0008 /* PCS Present */ #define XGXS_DEVICES_WIS 0x0004 /* WIS Present */ #define XGXS_DEVICES_PMD 0x0002 /* PMD/PMA Present */ -#define XGXS_DEVICES_CLAUSE22 0x0001 /* Clause 22 registers present */ +#define XGXS_DEVICES_CLAUSE22 0x0001 /* Clause 22 registers present*/ /* XS Devices High register bit definitions */ #define XGXS_DEVICES_VENDOR2 0x8000 /* Vendor specific device 2 */ @@ -457,7 +565,7 @@ struct sxg_hw_regs { #define XGXS_PKGID_HIGH_REV 0x000F /* Revision Number */ /* XS Lane Status register bit definitions */ -#define XGXS_LANE_PHY 0x1000 /* PHY/DTE lane alignment status */ +#define XGXS_LANE_PHY 0x1000 /* PHY/DTE lane alignment status */ #define XGXS_LANE_PATTERN 0x0800 /* Pattern testing ability */ #define XGXS_LANE_LOOPBACK 0x0400 /* PHY loopback ability */ #define XGXS_LANE_SYNC3 0x0008 /* Lane 3 sync */ @@ -478,7 +586,10 @@ struct sxg_hw_regs { * * Full register descriptions can be found in PHY/XENPAK/IEEE specs */ -/* LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device) */ +/* + * LASI (Link Alarm Status Interrupt) Registers (located in + * MIIM_DEV_PHY_PMA device) + */ #define LASI_RX_ALARM_CONTROL 0x9000 /* LASI RX_ALARM Control */ #define LASI_TX_ALARM_CONTROL 0x9001 /* LASI TX_ALARM Control */ #define LASI_CONTROL 0x9002 /* LASI Control */ @@ -487,9 +598,12 @@ struct sxg_hw_regs { #define LASI_STATUS 0x9005 /* LASI Status */ /* LASI_CONTROL bit definitions */ -#define LASI_CTL_RX_ALARM_ENABLE 0x0004 /* Enable RX_ALARM interrupts */ -#define LASI_CTL_TX_ALARM_ENABLE 0x0002 /* Enable TX_ALARM interrupts */ -#define LASI_CTL_LS_ALARM_ENABLE 0x0001 /* Enable Link Status interrupts */ +/* Enable RX_ALARM interrupts */ +#define LASI_CTL_RX_ALARM_ENABLE 0x0004 +/* Enable TX_ALARM interrupts */ +#define LASI_CTL_TX_ALARM_ENABLE 0x0002 +/* Enable Link Status interrupts */ +#define LASI_CTL_LS_ALARM_ENABLE 0x0001 /* LASI_STATUS bit definitions */ #define LASI_STATUS_RX_ALARM 0x0004 /* RX_ALARM status */ @@ -499,7 +613,7 @@ struct sxg_hw_regs { /* PHY registers - PMA/PMD (device 1) */ #define PHY_PMA_CONTROL1 0x0000 /* PMA/PMD Control 1 */ #define PHY_PMA_STATUS1 0x0001 /* PMA/PMD Status 1 */ -#define PHY_PMA_RCV_DET 0x000A /* PMA/PMD Receive Signal Detect */ +#define PHY_PMA_RCV_DET 0x000A /* PMA/PMD Receive Signal Detect */ /* other PMA/PMD registers exist and can be defined as needed */ /* PHY registers - PCS (device 3) */ @@ -518,10 +632,10 @@ struct sxg_hw_regs { #define PMA_CONTROL1_RESET 0x8000 /* PMA/PMD reset */ /* PHY_PMA_RCV_DET register bit definitions */ -#define PMA_RCV_DETECT 0x0001 /* PMA/PMD receive signal detect */ +#define PMA_RCV_DETECT 0x0001 /* PMA/PMD receive signal detect */ /* PHY_PCS_10G_STATUS1 register bit definitions */ -#define PCS_10B_BLOCK_LOCK 0x0001 /* PCS 10GBASE-R locked to receive blocks */ +#define PCS_10B_BLOCK_LOCK 0x0001 /* PCS 10GBASE-R locked to receive blocks */ /* PHY_XS_LANE_STATUS register bit definitions */ #define XS_LANE_ALIGN 0x1000 /* XS transmit lanes aligned */ @@ -559,13 +673,20 @@ struct phy_ucode { #pragma pack(push, 1) struct xmt_desc { ushort XmtLen; /* word 0, bits [15:0] - transmit length */ - unsigned char XmtCtl; /* word 0, bits [23:16] - transmit control byte */ - unsigned char Cmd; /* word 0, bits [31:24] - transmit command plus misc. */ - u32 XmtBufId; /* word 1, bits [31:0] - transmit buffer ID */ - unsigned char TcpStrt; /* word 2, bits [7:0] - byte address of TCP header */ - unsigned char IpStrt; /* word 2, bits [15:8] - byte address of IP header */ - ushort IpCkSum; /* word 2, bits [31:16] - partial IP checksum */ - ushort TcpCkSum; /* word 3, bits [15:0] - partial TCP checksum */ + /* word 0, bits [23:16] - transmit control byte */ + unsigned char XmtCtl; + /* word 0, bits [31:24] - transmit command plus misc. */ + unsigned char Cmd; + /* word 1, bits [31:0] - transmit buffer ID */ + u32 XmtBufId; + /* word 2, bits [7:0] - byte address of TCP header */ + unsigned char TcpStrt; + /* word 2, bits [15:8] - byte address of IP header */ + unsigned char IpStrt; + /* word 2, bits [31:16] - partial IP checksum */ + ushort IpCkSum; + /* word 3, bits [15:0] - partial TCP checksum */ + ushort TcpCkSum; ushort Rsvd1; /* word 3, bits [31:16] - PAD */ u32 Rsvd2; /* word 4, bits [31:0] - PAD */ u32 Rsvd3; /* word 5, bits [31:0] - PAD */ @@ -580,42 +701,58 @@ struct xmt_desc { #define XMT_DESC_CMD_CSUM_INSERT 1 /* checksum insert descriptor */ #define XMT_DESC_CMD_FORMAT 2 /* format descriptor */ #define XMT_DESC_CMD_PRIME 3 /* prime descriptor */ -#define XMT_DESC_CMD_CODE_SHFT 6 /* comand code shift (shift to bits [31:30] in word 0) */ +/* comand code shift (shift to bits [31:30] in word 0) */ +#define XMT_DESC_CMD_CODE_SHFT 6 /* shifted command codes */ -#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT) -#define XMT_CSUM_INSERT (XMT_DESC_CMD_CSUM_INSERT << XMT_DESC_CMD_CODE_SHFT) -#define XMT_FORMAT (XMT_DESC_CMD_FORMAT << XMT_DESC_CMD_CODE_SHFT) -#define XMT_PRIME (XMT_DESC_CMD_PRIME << XMT_DESC_CMD_CODE_SHFT) +#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT) +#define XMT_CSUM_INSERT (XMT_DESC_CMD_CSUM_INSERT << XMT_DESC_CMD_CODE_SHFT) +#define XMT_FORMAT (XMT_DESC_CMD_FORMAT << XMT_DESC_CMD_CODE_SHFT) +#define XMT_PRIME (XMT_DESC_CMD_PRIME << XMT_DESC_CMD_CODE_SHFT) /* * struct xmt_desc Control Byte (XmtCtl) definitions * NOTE: These bits do not work on Sahara (Rev A)! */ -#define XMT_CTL_PAUSE_FRAME 0x80 /* current frame is a pause control frame (for statistics) */ -#define XMT_CTL_CONTROL_FRAME 0x40 /* current frame is a control frame (for statistics) */ +/* current frame is a pause control frame (for statistics) */ +#define XMT_CTL_PAUSE_FRAME 0x80 +/* current frame is a control frame (for statistics) */ +#define XMT_CTL_CONTROL_FRAME 0x40 #define XMT_CTL_PER_PKT_QUAL 0x20 /* per packet qualifier */ #define XMT_CTL_PAD_MODE_NONE 0x00 /* do not pad frame */ #define XMT_CTL_PAD_MODE_64 0x08 /* pad frame to 64 bytes */ -#define XMT_CTL_PAD_MODE_VLAN_68 0x10 /* pad frame to 64 bytes, and VLAN frames to 68 bytes */ +/* pad frame to 64 bytes, and VLAN frames to 68 bytes */ +#define XMT_CTL_PAD_MODE_VLAN_68 0x10 #define XMT_CTL_PAD_MODE_68 0x18 /* pad frame to 68 bytes */ -#define XMT_CTL_GEN_FCS 0x04 /* generate FCS (CRC) for this frame */ +/* generate FCS (CRC) for this frame */ +#define XMT_CTL_GEN_FCS 0x04 #define XMT_CTL_DELAY_FCS_0 0x00 /* do not delay FCS calcution */ -#define XMT_CTL_DELAY_FCS_1 0x01 /* delay FCS calculation by 1 (4-byte) word */ -#define XMT_CTL_DELAY_FCS_2 0x02 /* delay FCS calculation by 2 (4-byte) words */ -#define XMT_CTL_DELAY_FCS_3 0x03 /* delay FCS calculation by 3 (4-byte) words */ +/* delay FCS calculation by 1 (4-byte) word */ +#define XMT_CTL_DELAY_FCS_1 0x01 +/* delay FCS calculation by 2 (4-byte) words */ +#define XMT_CTL_DELAY_FCS_2 0x02 +/* delay FCS calculation by 3 (4-byte) words */ +#define XMT_CTL_DELAY_FCS_3 0x03 /* struct xmt_desc XmtBufId definition */ -#define XMT_BUF_ID_SHFT 8 /* The Xmt buffer ID is formed by dividing */ - /* the buffer (DRAM) address by 256 (or << 8) */ +/* + * The Xmt buffer ID is formed by dividing the buffer (DRAM) address + * by 256 (or << 8) + */ + +#define XMT_BUF_ID_SHFT 8 /* Receiver Sequencer Definitions */ /* Receive Event Queue (queues 3 - 6) bit definitions */ -#define RCV_EVTQ_RBFID_MASK 0x0000FFFF /* bit mask for the Receive Buffer ID */ +/* bit mask for the Receive Buffer ID */ +#define RCV_EVTQ_RBFID_MASK 0x0000FFFF /* Receive Buffer ID definition */ -#define RCV_BUF_ID_SHFT 5 /* The Rcv buffer ID is formed by dividing */ - /* the buffer (DRAM) address by 32 (or << 5) */ +/* + * The Rcv buffer ID is formed by dividing the buffer (DRAM) address + * by 32 (or << 5) + */ +#define RCV_BUF_ID_SHFT 5 /* * Format of the 18 byte Receive Buffer returned by the @@ -628,73 +765,86 @@ struct rcv_buf_hdr { union { ushort TcpCsum; /* TCP checksum */ struct { - unsigned char TcpCsumL; /* lower 8 bits of the TCP checksum */ - unsigned char LinkHash; /* Link hash (multicast frames only) */ + /* lower 8 bits of the TCP checksum */ + unsigned char TcpCsumL; + /* Link hash (multicast frames only) */ + unsigned char LinkHash; }; }; - ushort SktHash; /* Socket hash */ - unsigned char TcpHdrOffset; /* TCP header offset into packet */ - unsigned char IpHdrOffset; /* IP header offset into packet */ - u32 TpzHash; /* Toeplitz hash */ - ushort Reserved; /* Reserved */ + ushort SktHash; /* Socket hash */ + unsigned char TcpHdrOffset; /* TCP header offset into packet */ + unsigned char IpHdrOffset; /* IP header offset into packet */ + u32 TpzHash; /* Toeplitz hash */ + ushort Reserved; /* Reserved */ }; #pragma pack(pop) /* Queue definitions */ /* Ingress (read only) queue numbers */ -#define PXY_BUF_Q 0 /* Proxy Buffer Queue */ -#define HST_EVT_Q 1 /* Host Event Queue */ -#define XMT_BUF_Q 2 /* Transmit Buffer Queue */ -#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */ -#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */ -#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */ -#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */ -#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */ +#define PXY_BUF_Q 0 /* Proxy Buffer Queue */ +#define HST_EVT_Q 1 /* Host Event Queue */ +#define XMT_BUF_Q 2 /* Transmit Buffer Queue */ +#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */ +#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */ +#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */ +#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */ +#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */ /* Local (read/write) queue numbers */ -#define LOCAL_A_Q 8 /* Spare local Queue */ -#define LOCAL_B_Q 9 /* Spare local Queue */ -#define LOCAL_C_Q 10 /* Spare local Queue */ -#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */ -#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */ -#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */ -#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */ -#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */ +#define LOCAL_A_Q 8 /* Spare local Queue */ +#define LOCAL_B_Q 9 /* Spare local Queue */ +#define LOCAL_C_Q 10 /* Spare local Queue */ +#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */ +#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */ +#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue*/ +#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */ +#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */ /* Egress (write only) queue numbers */ -#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */ -#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */ -#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */ -#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */ -#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */ -#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */ -#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */ -#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */ -#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */ -#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */ -#define RCV_BUF_Q 26 /* Receive Buffer Queue */ +#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */ +#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */ +#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */ +#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */ +#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */ +#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */ +#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */ +#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */ +#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */ +#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */ +#define RCV_BUF_Q 26 /* Receive Buffer Queue */ /* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */ -#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */ -#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */ -#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */ +/* enable copy of xmt descriptor to xmt command queue */ +#define PXY_COPY_EN 0x00200000 +#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */ +#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */ /* SXG EEPROM/Flash Configuration Definitions */ /* Location of configuration data in EEPROM or Flash */ -#define EEPROM_CONFIG_START_ADDR 0x00 /* start addr for config info in EEPROM */ -#define FLASH_CONFIG_START_ADDR 0x80 /* start addr for config info in Flash */ +/* start addr for config info in EEPROM */ +#define EEPROM_CONFIG_START_ADDR 0x00 +/* start addr for config info in Flash */ +#define FLASH_CONFIG_START_ADDR 0x80 /* Configuration data section defines */ #define HW_CFG_SECTION_SIZE 512 /* size of H/W section */ #define HW_CFG_SECTION_SIZE_A 256 /* size of H/W section (Sahara rev A) */ -#define SW_CFG_SECTION_START 512 /* starting location (offset) of S/W section */ -#define SW_CFG_SECTION_START_A 256 /* starting location (offset) of S/W section (Sahara rev A) */ +/* starting location (offset) of S/W section */ +#define SW_CFG_SECTION_START 512 +/* starting location (offset) of S/W section (Sahara rev A) */ +#define SW_CFG_SECTION_START_A 256 #define SW_CFG_SECTION_SIZE 128 /* size of S/W section */ +/* + * H/W configuration data magic word Goes in Addr field of first + * struct hw_cfg_data entry + */ +#define HW_CFG_MAGIC_WORD 0xA5A5 +/* + * H/W configuration data terminator Goes in Addr field of last + * struct hw_cfg_data entry + */ +#define HW_CFG_TERMINATOR 0xFFFF -#define HW_CFG_MAGIC_WORD 0xA5A5 /* H/W configuration data magic word */ - /* Goes in Addr field of first struct hw_cfg_data entry */ -#define HW_CFG_TERMINATOR 0xFFFF /* H/W configuration data terminator */ - /* Goes in Addr field of last struct hw_cfg_data entry */ #define SW_CFG_MAGIC_WORD 0x5A5A /* S/W configuration data magic word */ #pragma pack(push, 1) @@ -703,21 +853,23 @@ struct rcv_buf_hdr { * Read by the Sahara hardware */ struct hw_cfg_data { - ushort Addr; - ushort Data; + ushort Addr; + ushort Data; }; /* * Number of struct hw_cfg_data structures to put in the configuration data - * data structure (struct sxg_config or struct sxg_config_a). The number is computed - * to fill the entire H/W config section of the structure. + * data structure (struct sxg_config or struct sxg_config_a). The number is + * computed to fill the entire H/W config section of the structure. */ -#define NUM_HW_CFG_ENTRIES (HW_CFG_SECTION_SIZE / sizeof(struct hw_cfg_data)) -#define NUM_HW_CFG_ENTRIES_A (HW_CFG_SECTION_SIZE_A / sizeof(struct hw_cfg_data)) +#define NUM_HW_CFG_ENTRIES \ + (HW_CFG_SECTION_SIZE / sizeof(struct hw_cfg_data)) +#define NUM_HW_CFG_ENTRIES_A \ + (HW_CFG_SECTION_SIZE_A / sizeof(struct hw_cfg_data)) /* MAC address structure */ struct sxg_config_mac { - unsigned char MacAddr[6]; /* MAC Address */ + unsigned char MacAddr[6]; /* MAC Address */ }; /* FRU data structure */ @@ -800,10 +952,13 @@ struct sxg_config_a { * at compile time. */ compile_time_assert (offsetof(struct sxg_config, SwCfg) == SW_CFG_SECTION_START); -compile_time_assert (sizeof(struct sxg_config) == HW_CFG_SECTION_SIZE + SW_CFG_SECTION_SIZE); +compile_time_assert (sizeof(struct sxg_config) == HW_CFG_SECTION_SIZE + + SW_CFG_SECTION_SIZE); -compile_time_assert (offsetof(struct sxg_config_a, SwCfg) == SW_CFG_SECTION_START_A); -compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A + SW_CFG_SECTION_SIZE); +compile_time_assert (offsetof(struct sxg_config_a, SwCfg) + == SW_CFG_SECTION_START_A); +compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A + + SW_CFG_SECTION_SIZE); #endif /* * Structure used to pass information between driver and user-mode @@ -811,10 +966,11 @@ compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A + SW_C */ struct adapt_userinfo { bool LinkUp; - /* u32 LinkState; * use LinkUp - any need for other states? */ + /* use LinkUp - any need for other states? */ + /* u32 LinkState; */ u32 LinkSpeed; /* not currently needed */ u32 LinkDuplex; /* not currently needed */ - u32 Port; /* not currently needed */ + u32 Port; /* not currently needed */ u32 PhysPort; /* not currently needed */ ushort PciLanes; unsigned char MacAddr[6]; @@ -837,11 +993,16 @@ enum ASIC_TYPE{ /* Sahara (ASIC level) defines */ #define SAHARA_GRAM_SIZE 0x020000 /* GRAM size - 128 KB */ #define SAHARA_DRAM_SIZE 0x200000 /* DRAM size - 2 MB */ -#define SAHARA_QRAM_SIZE 0x004000 /* QRAM size - 16K entries (64 KB) */ -#define SAHARA_WCS_SIZE 0x002000 /* WCS - 8K instructions (x 108 bits) */ +/* QRAM size - 16K entries (64 KB) */ +#define SAHARA_QRAM_SIZE 0x004000 +/* WCS - 8K instructions (x 108 bits) */ +#define SAHARA_WCS_SIZE 0x002000 /* Arabia (board level) defines */ #define FLASH_SIZE 0x080000 /* 512 KB (4 Mb) */ -#define EEPROM_SIZE_XFMR 1024 /* EEPROM size (bytes), including xfmr area */ -#define EEPROM_SIZE_NO_XFMR 640 /* EEPROM size excluding xfmr area (512 + 128) */ -#define EEPROM_SIZE_REV_A 512 /* EEPROM size for Sahara rev A */ +/* EEPROM size (bytes), including xfmr area */ +#define EEPROM_SIZE_XFMR 1024 +/* EEPROM size excluding xfmr area (512 + 128) */ +#define EEPROM_SIZE_NO_XFMR 640 +/* EEPROM size for Sahara rev A */ +#define EEPROM_SIZE_REV_A 512 diff --git a/drivers/staging/sxg/sxgphycode.h b/drivers/staging/sxg/sxgphycode.h index f7ad3895453..adfb6744b71 100644 --- a/drivers/staging/sxg/sxgphycode.h +++ b/drivers/staging/sxg/sxgphycode.h @@ -14,7 +14,10 @@ * type of transceiver. */ -/* Download for AEL2005C PHY with SR/LR transceiver (10GBASE-SR or 10GBASE-LR) */ +/* + * Download for AEL2005C PHY with SR/LR transceiver + * (10GBASE-SR or 10GBASE-LR) + */ static struct phy_ucode PhyUcode[] = { /* * NOTE: An address of 0 is a special case. When the download routine |