aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/sxg
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/sxg')
-rw-r--r--drivers/staging/sxg/README1
-rw-r--r--drivers/staging/sxg/sxg.c1379
-rw-r--r--drivers/staging/sxg/sxg_os.h41
-rw-r--r--drivers/staging/sxg/sxgdbg.h2
-rw-r--r--drivers/staging/sxg/sxghif.h410
-rw-r--r--drivers/staging/sxg/sxghw.h404
-rw-r--r--drivers/staging/sxg/sxgphycode.h12
7 files changed, 1120 insertions, 1129 deletions
diff --git a/drivers/staging/sxg/README b/drivers/staging/sxg/README
index 4d1ddbe4c33..d514d184880 100644
--- a/drivers/staging/sxg/README
+++ b/drivers/staging/sxg/README
@@ -7,6 +7,7 @@ TODO:
- remove wrappers
- checkpatch.pl cleanups
- new functionality that the card needs
+ - remove reliance on x86
Please send patches to:
Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c
index 6ccbee875ab..5272a18e204 100644
--- a/drivers/staging/sxg/sxg.c
+++ b/drivers/staging/sxg/sxg.c
@@ -112,12 +112,16 @@ static bool sxg_mac_filter(p_adapter_t adapter,
static struct net_device_stats *sxg_get_stats(p_net_device dev);
#endif
+#define XXXTODO 0
+
+#if XXXTODO
static int sxg_mac_set_address(p_net_device dev, void *ptr);
+static void sxg_mcast_set_list(p_net_device dev);
+#endif
static void sxg_adapter_set_hwaddr(p_adapter_t adapter);
static void sxg_unmap_mmio_space(p_adapter_t adapter);
-static void sxg_mcast_set_mask(p_adapter_t adapter);
static int sxg_initialize_adapter(p_adapter_t adapter);
static void sxg_stock_rcv_buffers(p_adapter_t adapter);
@@ -132,9 +136,6 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 Value);
static int sxg_read_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 *pValue);
-static void sxg_mcast_set_list(p_net_device dev);
-
-#define XXXTODO 0
static unsigned int sxg_first_init = 1;
static char *sxg_banner =
@@ -202,7 +203,7 @@ static void sxg_init_driver(void)
{
if (sxg_first_init) {
DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
- __FUNCTION__, jiffies);
+ __func__, jiffies);
sxg_first_init = 0;
spin_lock_init(&sxg_global.driver_lock);
}
@@ -223,7 +224,7 @@ static void sxg_dbg_macaddrs(p_adapter_t adapter)
return;
}
-// SXG Globals
+/* SXG Globals */
static SXG_DRIVER SxgDriver;
#ifdef ATKDBG
@@ -250,7 +251,7 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
u32 ThisSectionSize;
u32 *Instruction = NULL;
u32 BaseAddress, AddressOffset, Address;
-// u32 Failure;
+/* u32 Failure; */
u32 ValueRead;
u32 i;
u32 numSections = 0;
@@ -259,10 +260,10 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
adapter, 0, 0, 0);
- DBG_ERROR("sxg: %s ENTER\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER\n", __func__);
switch (UcodeSel) {
- case SXG_UCODE_SAHARA: // Sahara operational ucode
+ case SXG_UCODE_SAHARA: /* Sahara operational ucode */
numSections = SNumSections;
for (i = 0; i < numSections; i++) {
sectionSize[i] = SSectionSize[i];
@@ -276,13 +277,13 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
}
DBG_ERROR("sxg: RESET THE CARD\n");
- // First, reset the card
+ /* First, reset the card */
WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
- // Download each section of the microcode as specified in
- // its download file. The *download.c file is generated using
- // the saharaobjtoc facility which converts the metastep .obj
- // file to a .c file which contains a two dimentional array.
+ /* Download each section of the microcode as specified in */
+ /* its download file. The *download.c file is generated using */
+ /* the saharaobjtoc facility which converts the metastep .obj */
+ /* file to a .c file which contains a two dimentional array. */
for (Section = 0; Section < numSections; Section++) {
DBG_ERROR("sxg: SECTION # %d\n", Section);
switch (UcodeSel) {
@@ -294,35 +295,35 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
break;
}
BaseAddress = sectionStart[Section];
- ThisSectionSize = sectionSize[Section] / 12; // Size in instructions
+ ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
for (AddressOffset = 0; AddressOffset < ThisSectionSize;
AddressOffset++) {
Address = BaseAddress + AddressOffset;
ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
- // Write instruction bits 31 - 0
+ /* Write instruction bits 31 - 0 */
WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
- // Write instruction bits 63-32
+ /* Write instruction bits 63-32 */
WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
FLUSH);
- // Write instruction bits 95-64
+ /* Write instruction bits 95-64 */
WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
FLUSH);
- // Write instruction address with the WRITE bit set
+ /* Write instruction address with the WRITE bit set */
WRITE_REG(HwRegs->UcodeAddr,
(Address | MICROCODE_ADDRESS_WRITE), FLUSH);
- // Sahara bug in the ucode download logic - the write to DataLow
- // for the next instruction could get corrupted. To avoid this,
- // write to DataLow again for this instruction (which may get
- // corrupted, but it doesn't matter), then increment the address
- // and write the data for the next instruction to DataLow. That
- // write should succeed.
+ /* Sahara bug in the ucode download logic - the write to DataLow */
+ /* for the next instruction could get corrupted. To avoid this, */
+ /* write to DataLow again for this instruction (which may get */
+ /* corrupted, but it doesn't matter), then increment the address */
+ /* and write the data for the next instruction to DataLow. That */
+ /* write should succeed. */
WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
- // Advance 3 u32S to start of next instruction
+ /* Advance 3 u32S to start of next instruction */
Instruction += 3;
}
}
- // Now repeat the entire operation reading the instruction back and
- // checking for parity errors
+ /* Now repeat the entire operation reading the instruction back and */
+ /* checking for parity errors */
for (Section = 0; Section < numSections; Section++) {
DBG_ERROR("sxg: check SECTION # %d\n", Section);
switch (UcodeSel) {
@@ -334,74 +335,74 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
break;
}
BaseAddress = sectionStart[Section];
- ThisSectionSize = sectionSize[Section] / 12; // Size in instructions
+ ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
for (AddressOffset = 0; AddressOffset < ThisSectionSize;
AddressOffset++) {
Address = BaseAddress + AddressOffset;
- // Write the address with the READ bit set
+ /* Write the address with the READ bit set */
WRITE_REG(HwRegs->UcodeAddr,
(Address | MICROCODE_ADDRESS_READ), FLUSH);
- // Read it back and check parity bit.
+ /* Read it back and check parity bit. */
READ_REG(HwRegs->UcodeAddr, ValueRead);
if (ValueRead & MICROCODE_ADDRESS_PARITY) {
DBG_ERROR("sxg: %s PARITY ERROR\n",
- __FUNCTION__);
+ __func__);
- return (FALSE); // Parity error
+ return (FALSE); /* Parity error */
}
ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
- // Read the instruction back and compare
+ /* Read the instruction back and compare */
READ_REG(HwRegs->UcodeDataLow, ValueRead);
if (ValueRead != *Instruction) {
DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
- __FUNCTION__);
- return (FALSE); // Miscompare
+ __func__);
+ return (FALSE); /* Miscompare */
}
READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
if (ValueRead != *(Instruction + 1)) {
DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
- __FUNCTION__);
- return (FALSE); // Miscompare
+ __func__);
+ return (FALSE); /* Miscompare */
}
READ_REG(HwRegs->UcodeDataHigh, ValueRead);
if (ValueRead != *(Instruction + 2)) {
DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
- __FUNCTION__);
- return (FALSE); // Miscompare
+ __func__);
+ return (FALSE); /* Miscompare */
}
- // Advance 3 u32S to start of next instruction
+ /* Advance 3 u32S to start of next instruction */
Instruction += 3;
}
}
- // Everything OK, Go.
+ /* Everything OK, Go. */
WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
- // Poll the CardUp register to wait for microcode to initialize
- // Give up after 10,000 attemps (500ms).
+ /* Poll the CardUp register to wait for microcode to initialize */
+ /* Give up after 10,000 attemps (500ms). */
for (i = 0; i < 10000; i++) {
udelay(50);
READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
if (ValueRead == 0xCAFE) {
- DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
break;
}
}
if (i == 10000) {
- DBG_ERROR("sxg: %s TIMEOUT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
- return (FALSE); // Timeout
+ return (FALSE); /* Timeout */
}
- // Now write the LoadSync register. This is used to
- // synchronize with the card so it can scribble on the memory
- // that contained 0xCAFE from the "CardUp" step above
+ /* Now write the LoadSync register. This is used to */
+ /* synchronize with the card so it can scribble on the memory */
+ /* that contained 0xCAFE from the "CardUp" step above */
if (UcodeSel == SXG_UCODE_SAHARA) {
WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
adapter, 0, 0, 0);
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
return (TRUE);
}
@@ -420,29 +421,29 @@ static int sxg_allocate_resources(p_adapter_t adapter)
int status;
u32 i;
u32 RssIds, IsrCount;
-// PSXG_XMT_RING XmtRing;
-// PSXG_RCV_RING RcvRing;
+/* PSXG_XMT_RING XmtRing; */
+/* PSXG_RCV_RING RcvRing; */
- DBG_ERROR("%s ENTER\n", __FUNCTION__);
+ DBG_ERROR("%s ENTER\n", __func__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
adapter, 0, 0, 0);
- // Windows tells us how many CPUs it plans to use for
- // RSS
+ /* Windows tells us how many CPUs it plans to use for */
+ /* RSS */
RssIds = SXG_RSS_CPU_COUNT(adapter);
IsrCount = adapter->MsiEnabled ? RssIds : 1;
- DBG_ERROR("%s Setup the spinlocks\n", __FUNCTION__);
+ DBG_ERROR("%s Setup the spinlocks\n", __func__);
- // Allocate spinlocks and initialize listheads first.
+ /* Allocate spinlocks and initialize listheads first. */
spin_lock_init(&adapter->RcvQLock);
spin_lock_init(&adapter->SglQLock);
spin_lock_init(&adapter->XmtZeroLock);
spin_lock_init(&adapter->Bit64RegLock);
spin_lock_init(&adapter->AdapterLock);
- DBG_ERROR("%s Setup the lists\n", __FUNCTION__);
+ DBG_ERROR("%s Setup the lists\n", __func__);
InitializeListHead(&adapter->FreeRcvBuffers);
InitializeListHead(&adapter->FreeRcvBlocks);
@@ -450,39 +451,39 @@ static int sxg_allocate_resources(p_adapter_t adapter)
InitializeListHead(&adapter->FreeSglBuffers);
InitializeListHead(&adapter->AllSglBuffers);
- // Mark these basic allocations done. This flags essentially
- // tells the SxgFreeResources routine that it can grab spinlocks
- // and reference listheads.
+ /* Mark these basic allocations done. This flags essentially */
+ /* tells the SxgFreeResources routine that it can grab spinlocks */
+ /* and reference listheads. */
adapter->BasicAllocations = TRUE;
- // Main allocation loop. Start with the maximum supported by
- // the microcode and back off if memory allocation
- // fails. If we hit a minimum, fail.
+ /* Main allocation loop. Start with the maximum supported by */
+ /* the microcode and back off if memory allocation */
+ /* fails. If we hit a minimum, fail. */
for (;;) {
- DBG_ERROR("%s Allocate XmtRings size[%lx]\n", __FUNCTION__,
- (sizeof(SXG_XMT_RING) * 1));
+ DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
+ (unsigned int)(sizeof(SXG_XMT_RING) * 1));
- // Start with big items first - receive and transmit rings. At the moment
- // I'm going to keep the ring size fixed and adjust the number of
- // TCBs if we fail. Later we might consider reducing the ring size as well..
+ /* Start with big items first - receive and transmit rings. At the moment */
+ /* I'm going to keep the ring size fixed and adjust the number of */
+ /* TCBs if we fail. Later we might consider reducing the ring size as well.. */
adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_XMT_RING) *
1,
&adapter->PXmtRings);
- DBG_ERROR("%s XmtRings[%p]\n", __FUNCTION__, adapter->XmtRings);
+ DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
if (!adapter->XmtRings) {
goto per_tcb_allocation_failed;
}
memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1);
- DBG_ERROR("%s Allocate RcvRings size[%lx]\n", __FUNCTION__,
- (sizeof(SXG_RCV_RING) * 1));
+ DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
+ (unsigned int)(sizeof(SXG_RCV_RING) * 1));
adapter->RcvRings =
pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_RCV_RING) * 1,
&adapter->PRcvRings);
- DBG_ERROR("%s RcvRings[%p]\n", __FUNCTION__, adapter->RcvRings);
+ DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
if (!adapter->RcvRings) {
goto per_tcb_allocation_failed;
}
@@ -490,7 +491,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
break;
per_tcb_allocation_failed:
- // an allocation failed. Free any successful allocations.
+ /* an allocation failed. Free any successful allocations. */
if (adapter->XmtRings) {
pci_free_consistent(adapter->pcidev,
sizeof(SXG_XMT_RING) * 4096,
@@ -505,22 +506,22 @@ static int sxg_allocate_resources(p_adapter_t adapter)
adapter->PRcvRings);
adapter->RcvRings = NULL;
}
- // Loop around and try again....
+ /* Loop around and try again.... */
}
- DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __FUNCTION__);
- // Initialize rcv zero and xmt zero rings
+ DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
+ /* Initialize rcv zero and xmt zero rings */
SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
- // Sanity check receive data structure format
+ /* Sanity check receive data structure format */
ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
(adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) ==
SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
- // Allocate receive data buffers. We allocate a block of buffers and
- // a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
+ /* Allocate receive data buffers. We allocate a block of buffers and */
+ /* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK */
for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
sxg_allocate_buffer_memory(adapter,
@@ -528,8 +529,8 @@ static int sxg_allocate_resources(p_adapter_t adapter)
ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV);
}
- // NBL resource allocation can fail in the 'AllocateComplete' routine, which
- // doesn't return status. Make sure we got the number of buffers we requested
+ /* NBL resource allocation can fail in the 'AllocateComplete' routine, which */
+ /* doesn't return status. Make sure we got the number of buffers we requested */
if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
@@ -537,17 +538,17 @@ static int sxg_allocate_resources(p_adapter_t adapter)
return (STATUS_RESOURCES);
}
- DBG_ERROR("%s Allocate EventRings size[%lx]\n", __FUNCTION__,
- (sizeof(SXG_EVENT_RING) * RssIds));
+ DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
+ (unsigned int)(sizeof(SXG_EVENT_RING) * RssIds));
- // Allocate event queues.
+ /* Allocate event queues. */
adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_EVENT_RING) *
RssIds,
&adapter->PEventRings);
if (!adapter->EventRings) {
- // Caller will call SxgFreeAdapter to clean up above allocations
+ /* Caller will call SxgFreeAdapter to clean up above allocations */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES;
@@ -555,12 +556,12 @@ static int sxg_allocate_resources(p_adapter_t adapter)
}
memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds);
- DBG_ERROR("%s Allocate ISR size[%x]\n", __FUNCTION__, IsrCount);
- // Allocate ISR
+ DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
+ /* Allocate ISR */
adapter->Isr = pci_alloc_consistent(adapter->pcidev,
IsrCount, &adapter->PIsr);
if (!adapter->Isr) {
- // Caller will call SxgFreeAdapter to clean up above allocations
+ /* Caller will call SxgFreeAdapter to clean up above allocations */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES;
@@ -568,10 +569,10 @@ static int sxg_allocate_resources(p_adapter_t adapter)
}
memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
- DBG_ERROR("%s Allocate shared XMT ring zero index location size[%lx]\n",
- __FUNCTION__, sizeof(u32));
+ DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
+ __func__, (unsigned int)sizeof(u32));
- // Allocate shared XMT ring zero index location
+ /* Allocate shared XMT ring zero index location */
adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
sizeof(u32),
&adapter->
@@ -587,7 +588,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
adapter, SXG_MAX_ENTRIES, 0, 0);
- DBG_ERROR("%s EXIT\n", __FUNCTION__);
+ DBG_ERROR("%s EXIT\n", __func__);
return (STATUS_SUCCESS);
}
@@ -606,17 +607,17 @@ static void sxg_config_pci(struct pci_dev *pcidev)
u16 new_command;
pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
- DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __FUNCTION__, pci_command);
- // Set the command register
- new_command = pci_command | (PCI_COMMAND_MEMORY | // Memory Space Enable
- PCI_COMMAND_MASTER | // Bus master enable
- PCI_COMMAND_INVALIDATE | // Memory write and invalidate
- PCI_COMMAND_PARITY | // Parity error response
- PCI_COMMAND_SERR | // System ERR
- PCI_COMMAND_FAST_BACK); // Fast back-to-back
+ DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
+ /* Set the command register */
+ new_command = pci_command | (PCI_COMMAND_MEMORY | /* Memory Space Enable */
+ PCI_COMMAND_MASTER | /* Bus master enable */
+ PCI_COMMAND_INVALIDATE | /* Memory write and invalidate */
+ PCI_COMMAND_PARITY | /* Parity error response */
+ PCI_COMMAND_SERR | /* System ERR */
+ PCI_COMMAND_FAST_BACK); /* Fast back-to-back */
if (pci_command != new_command) {
DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
- __FUNCTION__, pci_command, new_command);
+ __func__, pci_command, new_command);
pci_write_config_word(pcidev, PCI_COMMAND, new_command);
}
}
@@ -634,9 +635,9 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
ulong mmio_len = 0;
DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
- __FUNCTION__, jiffies, smp_processor_id());
+ __func__, jiffies, smp_processor_id());
- // Initialize trace buffer
+ /* Initialize trace buffer */
#ifdef ATKDBG
SxgTraceBuffer = &LSxgTraceBuffer;
SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
@@ -701,11 +702,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
mmio_start, mmio_len);
memmapped_ioaddr = ioremap(mmio_start, mmio_len);
- DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
memmapped_ioaddr);
if (!memmapped_ioaddr) {
DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
- __FUNCTION__, mmio_len, mmio_start);
+ __func__, mmio_len, mmio_start);
goto err_out_free_mmio_region;
}
@@ -727,7 +728,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
memmapped_ioaddr);
if (!memmapped_ioaddr) {
DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
- __FUNCTION__, mmio_len, mmio_start);
+ __func__, mmio_len, mmio_start);
goto err_out_free_mmio_region;
}
@@ -738,13 +739,13 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->UcodeRegs = (void *)memmapped_ioaddr;
adapter->State = SXG_STATE_INITIALIZING;
- // Maintain a list of all adapters anchored by
- // the global SxgDriver structure.
+ /* Maintain a list of all adapters anchored by */
+ /* the global SxgDriver structure. */
adapter->Next = SxgDriver.Adapters;
SxgDriver.Adapters = adapter;
adapter->AdapterID = ++SxgDriver.AdapterID;
- // Initialize CRC table used to determine multicast hash
+ /* Initialize CRC table used to determine multicast hash */
sxg_mcast_init_crc32();
adapter->JumboEnabled = FALSE;
@@ -757,18 +758,18 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
}
-// status = SXG_READ_EEPROM(adapter);
-// if (!status) {
-// goto sxg_init_bad;
-// }
+/* status = SXG_READ_EEPROM(adapter); */
+/* if (!status) { */
+/* goto sxg_init_bad; */
+/* } */
- DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
sxg_config_pci(pcidev);
- DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
- DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
sxg_init_driver();
- DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
adapter->vendid = pci_tbl_entry->vendor;
adapter->devid = pci_tbl_entry->device;
@@ -780,23 +781,23 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->irq = pcidev->irq;
adapter->next_netdevice = head_netdevice;
head_netdevice = netdev;
-// adapter->chipid = chip_idx;
- adapter->port = 0; //adapter->functionnumber;
+/* adapter->chipid = chip_idx; */
+ adapter->port = 0; /*adapter->functionnumber; */
adapter->cardindex = adapter->port;
- // Allocate memory and other resources
- DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __FUNCTION__);
+ /* Allocate memory and other resources */
+ DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
status = sxg_allocate_resources(adapter);
DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
- __FUNCTION__, status);
+ __func__, status);
if (status != STATUS_SUCCESS) {
goto err_out_unmap;
}
- DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
- __FUNCTION__);
+ __func__);
sxg_adapter_set_hwaddr(adapter);
} else {
adapter->state = ADAPT_FAIL;
@@ -819,7 +820,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
#endif
strcpy(netdev->name, "eth%d");
-// strcpy(netdev->name, pci_name(pcidev));
+/* strcpy(netdev->name, pci_name(pcidev)); */
if ((err = register_netdev(netdev))) {
DBG_ERROR("Cannot register net device, aborting. %s\n",
netdev->name);
@@ -832,11 +833,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
netdev->dev_addr[4], netdev->dev_addr[5]);
-//sxg_init_bad:
+/*sxg_init_bad: */
ASSERT(status == FALSE);
-// sxg_free_adapter(adapter);
+/* sxg_free_adapter(adapter); */
- DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
status, jiffies, smp_processor_id());
return status;
@@ -848,7 +849,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
err_out_exit_sxg_probe:
- DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __FUNCTION__, jiffies,
+ DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
smp_processor_id());
return -ENODEV;
@@ -874,12 +875,12 @@ static void sxg_disable_interrupt(p_adapter_t adapter)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
adapter, adapter->InterruptsEnabled, 0, 0);
- // For now, RSS is disabled with line based interrupts
+ /* For now, RSS is disabled with line based interrupts */
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
- //
- // Turn off interrupts by writing to the icr register.
- //
+ /* */
+ /* Turn off interrupts by writing to the icr register. */
+ /* */
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
adapter->InterruptsEnabled = 0;
@@ -905,12 +906,12 @@ static void sxg_enable_interrupt(p_adapter_t adapter)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
adapter, adapter->InterruptsEnabled, 0, 0);
- // For now, RSS is disabled with line based interrupts
+ /* For now, RSS is disabled with line based interrupts */
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
- //
- // Turn on interrupts by writing to the icr register.
- //
+ /* */
+ /* Turn on interrupts by writing to the icr register. */
+ /* */
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
adapter->InterruptsEnabled = 1;
@@ -935,29 +936,29 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
{
p_net_device dev = (p_net_device) dev_id;
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
-// u32 CpuMask = 0, i;
+/* u32 CpuMask = 0, i; */
adapter->Stats.NumInts++;
if (adapter->Isr[0] == 0) {
- // The SLIC driver used to experience a number of spurious interrupts
- // due to the delay associated with the masking of the interrupt
- // (we'd bounce back in here). If we see that again with Sahara,
- // add a READ_REG of the Icr register after the WRITE_REG below.
+ /* The SLIC driver used to experience a number of spurious interrupts */
+ /* due to the delay associated with the masking of the interrupt */
+ /* (we'd bounce back in here). If we see that again with Sahara, */
+ /* add a READ_REG of the Icr register after the WRITE_REG below. */
adapter->Stats.FalseInts++;
return IRQ_NONE;
}
- //
- // Move the Isr contents and clear the value in
- // shared memory, and mask interrupts
- //
+ /* */
+ /* Move the Isr contents and clear the value in */
+ /* shared memory, and mask interrupts */
+ /* */
adapter->IsrCopy[0] = adapter->Isr[0];
adapter->Isr[0] = 0;
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
-// ASSERT(adapter->IsrDpcsPending == 0);
-#if XXXTODO // RSS Stuff
- // If RSS is enabled and the ISR specifies
- // SXG_ISR_EVENT, then schedule DPC's
- // based on event queues.
+/* ASSERT(adapter->IsrDpcsPending == 0); */
+#if XXXTODO /* RSS Stuff */
+ /* If RSS is enabled and the ISR specifies */
+ /* SXG_ISR_EVENT, then schedule DPC's */
+ /* based on event queues. */
if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
for (i = 0;
i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
@@ -973,8 +974,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
}
}
}
- // Now, either schedule the CPUs specified by the CpuMask,
- // or queue default
+ /* Now, either schedule the CPUs specified by the CpuMask, */
+ /* or queue default */
if (CpuMask) {
*QueueDefault = FALSE;
} else {
@@ -983,9 +984,9 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
}
*TargetCpus = CpuMask;
#endif
- //
- // There are no DPCs in Linux, so call the handler now
- //
+ /* */
+ /* There are no DPCs in Linux, so call the handler now */
+ /* */
sxg_handle_interrupt(adapter);
return IRQ_HANDLED;
@@ -993,7 +994,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
static void sxg_handle_interrupt(p_adapter_t adapter)
{
-// unsigned char RssId = 0;
+/* unsigned char RssId = 0; */
u32 NewIsr;
if (adapter->Stats.RcvNoBuffer < 5) {
@@ -1002,32 +1003,32 @@ static void sxg_handle_interrupt(p_adapter_t adapter)
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
adapter, adapter->IsrCopy[0], 0, 0);
- // For now, RSS is disabled with line based interrupts
+ /* For now, RSS is disabled with line based interrupts */
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
ASSERT(adapter->IsrCopy[0]);
-/////////////////////////////
+/*/////////////////////////// */
- // Always process the event queue.
+ /* Always process the event queue. */
sxg_process_event_queue(adapter,
(adapter->RssEnabled ? /*RssId */ 0 : 0));
-#if XXXTODO // RSS stuff
+#if XXXTODO /* RSS stuff */
if (--adapter->IsrDpcsPending) {
- // We're done.
+ /* We're done. */
ASSERT(adapter->RssEnabled);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
adapter, 0, 0, 0);
return;
}
#endif
- //
- // Last (or only) DPC processes the ISR and clears the interrupt.
- //
+ /* */
+ /* Last (or only) DPC processes the ISR and clears the interrupt. */
+ /* */
NewIsr = sxg_process_isr(adapter, 0);
- //
- // Reenable interrupts
- //
+ /* */
+ /* Reenable interrupts */
+ /* */
adapter->IsrCopy[0] = 0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
adapter, NewIsr, 0, 0);
@@ -1063,75 +1064,75 @@ static int sxg_process_isr(p_adapter_t adapter, u32 MessageId)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
adapter, Isr, 0, 0);
- // Error
+ /* Error */
if (Isr & SXG_ISR_ERR) {
if (Isr & SXG_ISR_PDQF) {
adapter->Stats.PdqFull++;
- DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __FUNCTION__);
+ DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
}
- // No host buffer
+ /* No host buffer */
if (Isr & SXG_ISR_RMISS) {
- // There is a bunch of code in the SLIC driver which
- // attempts to process more receive events per DPC
- // if we start to fall behind. We'll probably
- // need to do something similar here, but hold
- // off for now. I don't want to make the code more
- // complicated than strictly needed.
+ /* There is a bunch of code in the SLIC driver which */
+ /* attempts to process more receive events per DPC */
+ /* if we start to fall behind. We'll probably */
+ /* need to do something similar here, but hold */
+ /* off for now. I don't want to make the code more */
+ /* complicated than strictly needed. */
adapter->Stats.RcvNoBuffer++;
if (adapter->Stats.RcvNoBuffer < 5) {
DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
- __FUNCTION__);
+ __func__);
}
}
- // Card crash
+ /* Card crash */
if (Isr & SXG_ISR_DEAD) {
- // Set aside the crash info and set the adapter state to RESET
+ /* Set aside the crash info and set the adapter state to RESET */
adapter->CrashCpu =
(unsigned char)((Isr & SXG_ISR_CPU) >>
SXG_ISR_CPU_SHIFT);
adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
adapter->Dead = TRUE;
- DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __FUNCTION__,
+ DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
adapter->CrashLocation, adapter->CrashCpu);
}
- // Event ring full
+ /* Event ring full */
if (Isr & SXG_ISR_ERFULL) {
- // Same issue as RMISS, really. This means the
- // host is falling behind the card. Need to increase
- // event ring size, process more events per interrupt,
- // and/or reduce/remove interrupt aggregation.
+ /* Same issue as RMISS, really. This means the */
+ /* host is falling behind the card. Need to increase */
+ /* event ring size, process more events per interrupt, */
+ /* and/or reduce/remove interrupt aggregation. */
adapter->Stats.EventRingFull++;
DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
- __FUNCTION__);
+ __func__);
}
- // Transmit drop - no DRAM buffers or XMT error
+ /* Transmit drop - no DRAM buffers or XMT error */
if (Isr & SXG_ISR_XDROP) {
adapter->Stats.XmtDrops++;
adapter->Stats.XmtErrors++;
- DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __FUNCTION__);
+ DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
}
}
- // Slowpath send completions
+ /* Slowpath send completions */
if (Isr & SXG_ISR_SPSEND) {
sxg_complete_slow_send(adapter);
}
- // Dump
+ /* Dump */
if (Isr & SXG_ISR_UPC) {
- ASSERT(adapter->DumpCmdRunning); // Maybe change when debug is added..
+ ASSERT(adapter->DumpCmdRunning); /* Maybe change when debug is added.. */
adapter->DumpCmdRunning = FALSE;
}
- // Link event
+ /* Link event */
if (Isr & SXG_ISR_LINK) {
sxg_link_event(adapter);
}
- // Debug - breakpoint hit
+ /* Debug - breakpoint hit */
if (Isr & SXG_ISR_BREAK) {
- // At the moment AGDB isn't written to support interactive
- // debug sessions. When it is, this interrupt will be used
- // to signal AGDB that it has hit a breakpoint. For now, ASSERT.
+ /* At the moment AGDB isn't written to support interactive */
+ /* debug sessions. When it is, this interrupt will be used */
+ /* to signal AGDB that it has hit a breakpoint. For now, ASSERT. */
ASSERT(0);
}
- // Heartbeat response
+ /* Heartbeat response */
if (Isr & SXG_ISR_PING) {
adapter->PingOutstanding = FALSE;
}
@@ -1171,39 +1172,39 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
(adapter->State == SXG_STATE_PAUSING) ||
(adapter->State == SXG_STATE_PAUSED) ||
(adapter->State == SXG_STATE_HALTING));
- // We may still have unprocessed events on the queue if
- // the card crashed. Don't process them.
+ /* We may still have unprocessed events on the queue if */
+ /* the card crashed. Don't process them. */
if (adapter->Dead) {
return (0);
}
- // In theory there should only be a single processor that
- // accesses this queue, and only at interrupt-DPC time. So
- // we shouldn't need a lock for any of this.
+ /* In theory there should only be a single processor that */
+ /* accesses this queue, and only at interrupt-DPC time. So */
+ /* we shouldn't need a lock for any of this. */
while (Event->Status & EVENT_STATUS_VALID) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
Event, Event->Code, Event->Status,
adapter->NextEvent);
switch (Event->Code) {
case EVENT_CODE_BUFFERS:
- ASSERT(!(Event->CommandIndex & 0xFF00)); // SXG_RING_INFO Head & Tail == unsigned char
- //
+ ASSERT(!(Event->CommandIndex & 0xFF00)); /* SXG_RING_INFO Head & Tail == unsigned char */
+ /* */
sxg_complete_descriptor_blocks(adapter,
Event->CommandIndex);
- //
+ /* */
break;
case EVENT_CODE_SLOWRCV:
--adapter->RcvBuffersOnCard;
if ((skb = sxg_slow_receive(adapter, Event))) {
u32 rx_bytes;
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
- // Add it to our indication list
+ /* Add it to our indication list */
SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
IndicationList, num_skbs);
- // In Linux, we just pass up each skb to the protocol above at this point,
- // there is no capability of an indication list.
+ /* In Linux, we just pass up each skb to the protocol above at this point, */
+ /* there is no capability of an indication list. */
#else
-// CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE);
- rx_bytes = Event->Length; // (rcvbuf->length & IRHDDR_FLEN_MSK);
+/* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
+ rx_bytes = Event->Length; /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
skb_put(skb, rx_bytes);
adapter->stats.rx_packets++;
adapter->stats.rx_bytes += rx_bytes;
@@ -1218,43 +1219,43 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
break;
default:
DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
- __FUNCTION__, Event->Code);
-// ASSERT(0);
+ __func__, Event->Code);
+/* ASSERT(0); */
}
- // See if we need to restock card receive buffers.
- // There are two things to note here:
- // First - This test is not SMP safe. The
- // adapter->BuffersOnCard field is protected via atomic interlocked calls, but
- // we do not protect it with respect to these tests. The only way to do that
- // is with a lock, and I don't want to grab a lock every time we adjust the
- // BuffersOnCard count. Instead, we allow the buffer replenishment to be off
- // once in a while. The worst that can happen is the card is given one
- // more-or-less descriptor block than the arbitrary value we've chosen.
- // No big deal
- // In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted.
- // Second - We expect this test to rarely evaluate to true. We attempt to
- // refill descriptor blocks as they are returned to us
- // (sxg_complete_descriptor_blocks), so The only time this should evaluate
- // to true is when sxg_complete_descriptor_blocks failed to allocate
- // receive buffers.
+ /* See if we need to restock card receive buffers. */
+ /* There are two things to note here: */
+ /* First - This test is not SMP safe. The */
+ /* adapter->BuffersOnCard field is protected via atomic interlocked calls, but */
+ /* we do not protect it with respect to these tests. The only way to do that */
+ /* is with a lock, and I don't want to grab a lock every time we adjust the */
+ /* BuffersOnCard count. Instead, we allow the buffer replenishment to be off */
+ /* once in a while. The worst that can happen is the card is given one */
+ /* more-or-less descriptor block than the arbitrary value we've chosen. */
+ /* No big deal */
+ /* In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. */
+ /* Second - We expect this test to rarely evaluate to true. We attempt to */
+ /* refill descriptor blocks as they are returned to us */
+ /* (sxg_complete_descriptor_blocks), so The only time this should evaluate */
+ /* to true is when sxg_complete_descriptor_blocks failed to allocate */
+ /* receive buffers. */
if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
sxg_stock_rcv_buffers(adapter);
}
- // It's more efficient to just set this to zero.
- // But clearing the top bit saves potential debug info...
+ /* It's more efficient to just set this to zero. */
+ /* But clearing the top bit saves potential debug info... */
Event->Status &= ~EVENT_STATUS_VALID;
- // Advanct to the next event
+ /* Advanct to the next event */
SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
Event = &EventRing->Ring[adapter->NextEvent[RssId]];
EventsProcessed++;
if (EventsProcessed == EVENT_RING_BATCH) {
- // Release a batch of events back to the card
+ /* Release a batch of events back to the card */
WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
EVENT_RING_BATCH, FALSE);
EventsProcessed = 0;
- // If we've processed our batch limit, break out of the
- // loop and return SXG_ISR_EVENT to arrange for us to
- // be called again
+ /* If we've processed our batch limit, break out of the */
+ /* loop and return SXG_ISR_EVENT to arrange for us to */
+ /* be called again */
if (Batches++ == EVENT_BATCH_LIMIT) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
TRACE_NOISY, "EvtLimit", Batches,
@@ -1265,14 +1266,14 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
}
}
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
- //
- // Indicate any received dumb-nic frames
- //
+ /* */
+ /* Indicate any received dumb-nic frames */
+ /* */
SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
#endif
- //
- // Release events back to the card.
- //
+ /* */
+ /* Release events back to the card. */
+ /* */
if (EventsProcessed) {
WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
EventsProcessed, FALSE);
@@ -1299,43 +1300,43 @@ static void sxg_complete_slow_send(p_adapter_t adapter)
u32 *ContextType;
PSXG_CMD XmtCmd;
- // NOTE - This lock is dropped and regrabbed in this loop.
- // This means two different processors can both be running
- // through this loop. Be *very* careful.
+ /* NOTE - This lock is dropped and regrabbed in this loop. */
+ /* This means two different processors can both be running */
+ /* through this loop. Be *very* careful. */
spin_lock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
- // Locate the current Cmd (ring descriptor entry), and
- // associated SGL, and advance the tail
+ /* Locate the current Cmd (ring descriptor entry), and */
+ /* associated SGL, and advance the tail */
SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
ASSERT(ContextType);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
- // Clear the SGL field.
+ /* Clear the SGL field. */
XmtCmd->Sgl = 0;
switch (*ContextType) {
case SXG_SGL_DUMB:
{
struct sk_buff *skb;
- // Dumb-nic send. Command context is the dumb-nic SGL
+ /* Dumb-nic send. Command context is the dumb-nic SGL */
skb = (struct sk_buff *)ContextType;
- // Complete the send
+ /* Complete the send */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
TRACE_IMPORTANT, "DmSndCmp", skb, 0,
0, 0);
ASSERT(adapter->Stats.XmtQLen);
- adapter->Stats.XmtQLen--; // within XmtZeroLock
+ adapter->Stats.XmtQLen--; /* within XmtZeroLock */
adapter->Stats.XmtOk++;
- // Now drop the lock and complete the send back to
- // Microsoft. We need to drop the lock because
- // Microsoft can come back with a chimney send, which
- // results in a double trip in SxgTcpOuput
+ /* Now drop the lock and complete the send back to */
+ /* Microsoft. We need to drop the lock because */
+ /* Microsoft can come back with a chimney send, which */
+ /* results in a double trip in SxgTcpOuput */
spin_unlock(&adapter->XmtZeroLock);
SXG_COMPLETE_DUMB_SEND(adapter, skb);
- // and reacquire..
+ /* and reacquire.. */
spin_lock(&adapter->XmtZeroLock);
}
break;
@@ -1371,7 +1372,7 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
RcvDataBufferHdr, RcvDataBufferHdr->State,
RcvDataBufferHdr->VirtualAddress);
- // Drop rcv frames in non-running state
+ /* Drop rcv frames in non-running state */
switch (adapter->State) {
case SXG_STATE_RUNNING:
break;
@@ -1384,12 +1385,12 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
goto drop;
}
- // Change buffer state to UPSTREAM
+ /* Change buffer state to UPSTREAM */
RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
if (Event->Status & EVENT_STATUS_RCVERR) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
Event, Event->Status, Event->HostHandle, 0);
- // XXXTODO - Remove this print later
+ /* XXXTODO - Remove this print later */
DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
sxg_process_rcv_error(adapter, *(u32 *)
@@ -1397,8 +1398,8 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
(RcvDataBufferHdr));
goto drop;
}
-#if XXXTODO // VLAN stuff
- // If there's a VLAN tag, extract it and validate it
+#if XXXTODO /* VLAN stuff */
+ /* If there's a VLAN tag, extract it and validate it */
if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->
EtherType == ETHERTYPE_VLAN) {
if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
@@ -1411,9 +1412,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
}
}
#endif
- //
- // Dumb-nic frame. See if it passes our mac filter and update stats
- //
+ /* */
+ /* Dumb-nic frame. See if it passes our mac filter and update stats */
+ /* */
if (!sxg_mac_filter(adapter, (p_ether_header)
SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
Event->Length)) {
@@ -1427,9 +1428,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
RcvDataBufferHdr, Packet, Event->Length, 0);
- //
- // Lastly adjust the receive packet length.
- //
+ /* */
+ /* Lastly adjust the receive packet length. */
+ /* */
SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
return (Packet);
@@ -1541,7 +1542,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) {
- // broadcast
+ /* broadcast */
if (adapter->MacFilter & MAC_BCAST) {
adapter->Stats.DumbRcvBcastPkts++;
adapter->Stats.DumbRcvBcastBytes += length;
@@ -1550,7 +1551,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
return (TRUE);
}
} else {
- // multicast
+ /* multicast */
if (adapter->MacFilter & MAC_ALLMCAST) {
adapter->Stats.DumbRcvMcastPkts++;
adapter->Stats.DumbRcvMcastBytes += length;
@@ -1580,9 +1581,9 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
}
}
} else if (adapter->MacFilter & MAC_DIRECTED) {
- // Not broadcast or multicast. Must be directed at us or
- // the card is in promiscuous mode. Either way, consider it
- // ours if MAC_DIRECTED is set
+ /* Not broadcast or multicast. Must be directed at us or */
+ /* the card is in promiscuous mode. Either way, consider it */
+ /* ours if MAC_DIRECTED is set */
adapter->Stats.DumbRcvUcastPkts++;
adapter->Stats.DumbRcvUcastBytes += length;
adapter->Stats.DumbRcvPkts++;
@@ -1590,7 +1591,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
return (TRUE);
}
if (adapter->MacFilter & MAC_PROMISC) {
- // Whatever it is, keep it.
+ /* Whatever it is, keep it. */
adapter->Stats.DumbRcvPkts++;
adapter->Stats.DumbRcvBytes += length;
return (TRUE);
@@ -1606,7 +1607,7 @@ static int sxg_register_interrupt(p_adapter_t adapter)
DBG_ERROR
("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
- __FUNCTION__, adapter, adapter->netdev->irq, NR_IRQS);
+ __func__, adapter, adapter->netdev->irq, NR_IRQS);
spin_unlock_irqrestore(&sxg_global.driver_lock,
sxg_global.flags);
@@ -1625,18 +1626,18 @@ static int sxg_register_interrupt(p_adapter_t adapter)
}
adapter->intrregistered = 1;
adapter->IntRegistered = TRUE;
- // Disable RSS with line-based interrupts
+ /* Disable RSS with line-based interrupts */
adapter->MsiEnabled = FALSE;
adapter->RssEnabled = FALSE;
DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
- __FUNCTION__, adapter, adapter->netdev->irq);
+ __func__, adapter, adapter->netdev->irq);
}
return (STATUS_SUCCESS);
}
static void sxg_deregister_interrupt(p_adapter_t adapter)
{
- DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __FUNCTION__, adapter);
+ DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
#if XXXTODO
slic_init_cleanup(adapter);
#endif
@@ -1651,7 +1652,7 @@ static void sxg_deregister_interrupt(p_adapter_t adapter)
adapter->rcv_broadcasts = 0;
adapter->rcv_multicasts = 0;
adapter->rcv_unicasts = 0;
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
}
/*
@@ -1666,7 +1667,7 @@ static int sxg_if_init(p_adapter_t adapter)
int status = 0;
DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n",
- __FUNCTION__, adapter->netdev->name,
+ __func__, adapter->netdev->name,
adapter->queues_initialized, adapter->state,
adapter->linkstate, dev->flags);
@@ -1680,7 +1681,7 @@ static int sxg_if_init(p_adapter_t adapter)
adapter->devflags_prev = dev->flags;
adapter->macopts = MAC_DIRECTED;
if (dev->flags) {
- DBG_ERROR("sxg: %s (%s) Set MAC options: ", __FUNCTION__,
+ DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
adapter->netdev->name);
if (dev->flags & IFF_BROADCAST) {
adapter->macopts |= MAC_BCAST;
@@ -1713,7 +1714,7 @@ static int sxg_if_init(p_adapter_t adapter)
/*
* clear any pending events, then enable interrupts
*/
- DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
return (STATUS_SUCCESS);
}
@@ -1724,11 +1725,11 @@ static int sxg_entry_open(p_net_device dev)
int status;
ASSERT(adapter);
- DBG_ERROR("sxg: %s adapter->activated[%d]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
adapter->activated);
DBG_ERROR
("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
- __FUNCTION__, adapter->netdev->name, jiffies, smp_processor_id(),
+ __func__, adapter->netdev->name, jiffies, smp_processor_id(),
adapter->netdev, adapter, adapter->port);
netif_stop_queue(adapter->netdev);
@@ -1738,16 +1739,16 @@ static int sxg_entry_open(p_net_device dev)
sxg_global.num_sxg_ports_active++;
adapter->activated = 1;
}
- // Initialize the adapter
- DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __FUNCTION__);
+ /* Initialize the adapter */
+ DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
status = sxg_initialize_adapter(adapter);
DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
- __FUNCTION__, status);
+ __func__, status);
if (status == STATUS_SUCCESS) {
- DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
status = sxg_if_init(adapter);
- DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
status);
}
@@ -1760,12 +1761,12 @@ static int sxg_entry_open(p_net_device dev)
sxg_global.flags);
return (status);
}
- DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
- // Enable interrupts
+ /* Enable interrupts */
SXG_ENABLE_ALL_INTERRUPTS(adapter);
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
return STATUS_SUCCESS;
@@ -1779,27 +1780,27 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
ASSERT(adapter);
- DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __FUNCTION__, dev,
+ DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
adapter);
sxg_deregister_interrupt(adapter);
sxg_unmap_mmio_space(adapter);
- DBG_ERROR("sxg: %s unregister_netdev\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s unregister_netdev\n", __func__);
unregister_netdev(dev);
mmio_start = pci_resource_start(pcidev, 0);
mmio_len = pci_resource_len(pcidev, 0);
- DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__,
mmio_start, mmio_len);
release_mem_region(mmio_start, mmio_len);
- DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __func__,
(unsigned int)dev->base_addr);
iounmap((char *)dev->base_addr);
- DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s deallocate device\n", __func__);
kfree(dev);
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
}
static int sxg_entry_halt(p_net_device dev)
@@ -1807,17 +1808,17 @@ static int sxg_entry_halt(p_net_device dev)
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
- DBG_ERROR("sxg: %s (%s) ENTER\n", __FUNCTION__, dev->name);
+ DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
netif_stop_queue(adapter->netdev);
adapter->state = ADAPT_DOWN;
adapter->linkstate = LINK_DOWN;
adapter->devflags_prev = 0;
DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
- __FUNCTION__, dev->name, adapter, adapter->state);
+ __func__, dev->name, adapter, adapter->state);
- DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
return (STATUS_SUCCESS);
}
@@ -1825,11 +1826,11 @@ static int sxg_entry_halt(p_net_device dev)
static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
{
ASSERT(rq);
-// DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __FUNCTION__, cmd, rq, dev);
+/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */
switch (cmd) {
case SIOCSLICSETINTAGG:
{
-// p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
+/* p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); */
u32 data[7];
u32 intagg;
@@ -1841,12 +1842,12 @@ static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
intagg = data[0];
printk(KERN_EMERG
"%s: set interrupt aggregation to %d\n",
- __FUNCTION__, intagg);
+ __func__, intagg);
return 0;
}
default:
-// DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __FUNCTION__, cmd);
+/* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
return -EOPNOTSUPP;
}
return 0;
@@ -1870,15 +1871,15 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
u32 status = STATUS_SUCCESS;
- DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__,
skb);
- // Check the adapter state
+ /* Check the adapter state */
switch (adapter->State) {
case SXG_STATE_INITIALIZING:
case SXG_STATE_HALTED:
case SXG_STATE_SHUTDOWN:
- ASSERT(0); // unexpected
- // fall through
+ ASSERT(0); /* unexpected */
+ /* fall through */
case SXG_STATE_RESETTING:
case SXG_STATE_SLEEP:
case SXG_STATE_BOOTDIAG:
@@ -1898,23 +1899,23 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
if (status != STATUS_SUCCESS) {
goto xmit_fail;
}
- // send a packet
+ /* send a packet */
status = sxg_transmit_packet(adapter, skb);
if (status == STATUS_SUCCESS) {
goto xmit_done;
}
xmit_fail:
- // reject & complete all the packets if they cant be sent
+ /* reject & complete all the packets if they cant be sent */
if (status != STATUS_SUCCESS) {
#if XXXTODO
-// sxg_send_packets_fail(adapter, skb, status);
+/* sxg_send_packets_fail(adapter, skb, status); */
#else
SXG_DROP_DUMB_SEND(adapter, skb);
adapter->stats.tx_dropped++;
#endif
}
- DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
status);
xmit_done:
@@ -1940,12 +1941,12 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
void *SglBuffer;
u32 SglBufferLength;
- // The vast majority of work is done in the shared
- // sxg_dumb_sgl routine.
+ /* The vast majority of work is done in the shared */
+ /* sxg_dumb_sgl routine. */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
adapter, skb, 0, 0);
- // Allocate a SGL buffer
+ /* Allocate a SGL buffer */
SXG_GET_SGL_BUFFER(adapter, SxgSgl);
if (!SxgSgl) {
adapter->Stats.NoSglBuf++;
@@ -1963,9 +1964,9 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
SxgSgl->DumbPacket = skb;
pSgl = NULL;
- // Call the common sxg_dumb_sgl routine to complete the send.
+ /* Call the common sxg_dumb_sgl routine to complete the send. */
sxg_dumb_sgl(pSgl, SxgSgl);
- // Return success sxg_dumb_sgl (or something later) will complete it.
+ /* Return success sxg_dumb_sgl (or something later) will complete it. */
return (STATUS_SUCCESS);
}
@@ -1983,39 +1984,39 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
{
p_adapter_t adapter = SxgSgl->adapter;
struct sk_buff *skb = SxgSgl->DumbPacket;
- // For now, all dumb-nic sends go on RSS queue zero
+ /* For now, all dumb-nic sends go on RSS queue zero */
PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
PSXG_CMD XmtCmd = NULL;
-// u32 Index = 0;
+/* u32 Index = 0; */
u32 DataLength = skb->len;
-// unsigned int BufLen;
-// u32 SglOffset;
+/* unsigned int BufLen; */
+/* u32 SglOffset; */
u64 phys_addr;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
pSgl, SxgSgl, 0, 0);
- // Set aside a pointer to the sgl
+ /* Set aside a pointer to the sgl */
SxgSgl->pSgl = pSgl;
- // Sanity check that our SGL format is as we expect.
+ /* Sanity check that our SGL format is as we expect. */
ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT));
- // Shouldn't be a vlan tag on this frame
+ /* Shouldn't be a vlan tag on this frame */
ASSERT(SxgSgl->VlanTag.VlanTci == 0);
ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
- // From here below we work with the SGL placed in our
- // buffer.
+ /* From here below we work with the SGL placed in our */
+ /* buffer. */
SxgSgl->Sgl.NumberOfElements = 1;
- // Grab the spinlock and acquire a command
+ /* Grab the spinlock and acquire a command */
spin_lock(&adapter->XmtZeroLock);
SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
if (XmtCmd == NULL) {
- // Call sxg_complete_slow_send to see if we can
- // free up any XmtRingZero entries and then try again
+ /* Call sxg_complete_slow_send to see if we can */
+ /* free up any XmtRingZero entries and then try again */
spin_unlock(&adapter->XmtZeroLock);
sxg_complete_slow_send(adapter);
spin_lock(&adapter->XmtZeroLock);
@@ -2027,10 +2028,10 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
- // Update stats
+ /* Update stats */
adapter->Stats.DumbXmtPkts++;
adapter->Stats.DumbXmtBytes += DataLength;
-#if XXXTODO // Stats stuff
+#if XXXTODO /* Stats stuff */
if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) {
adapter->Stats.DumbXmtBcastPkts++;
@@ -2044,8 +2045,8 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
adapter->Stats.DumbXmtUcastBytes += DataLength;
}
#endif
- // Fill in the command
- // Copy out the first SGE to the command and adjust for offset
+ /* Fill in the command */
+ /* Copy out the first SGE to the command and adjust for offset */
phys_addr =
pci_map_single(adapter->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -2053,54 +2054,54 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32;
XmtCmd->Buffer.FirstSgeAddress =
XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr);
-// XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address;
-// XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset;
+/* XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; */
+/* XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; */
XmtCmd->Buffer.FirstSgeLength = DataLength;
- // Set a pointer to the remaining SGL entries
-// XmtCmd->Sgl = SxgSgl->PhysicalAddress;
- // Advance the physical address of the SxgSgl structure to
- // the second SGE
-// SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) -
-// (u32 *)SxgSgl);
-// XmtCmd->Sgl.LowPart += SglOffset;
+ /* Set a pointer to the remaining SGL entries */
+/* XmtCmd->Sgl = SxgSgl->PhysicalAddress; */
+ /* Advance the physical address of the SxgSgl structure to */
+ /* the second SGE */
+/* SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - */
+/* (u32 *)SxgSgl); */
+/* XmtCmd->Sgl.LowPart += SglOffset; */
XmtCmd->Buffer.SgeOffset = 0;
- // Note - TotalLength might be overwritten with MSS below..
+ /* Note - TotalLength might be overwritten with MSS below.. */
XmtCmd->Buffer.TotalLength = DataLength;
- XmtCmd->SgEntries = 1; //(ushort)(SxgSgl->Sgl.NumberOfElements - Index);
+ XmtCmd->SgEntries = 1; /*(ushort)(SxgSgl->Sgl.NumberOfElements - Index); */
XmtCmd->Flags = 0;
- //
- // Advance transmit cmd descripter by 1.
- // NOTE - See comments in SxgTcpOutput where we write
- // to the XmtCmd register regarding CPU ID values and/or
- // multiple commands.
- //
- //
+ /* */
+ /* Advance transmit cmd descripter by 1. */
+ /* NOTE - See comments in SxgTcpOutput where we write */
+ /* to the XmtCmd register regarding CPU ID values and/or */
+ /* multiple commands. */
+ /* */
+ /* */
WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
- //
- //
- adapter->Stats.XmtQLen++; // Stats within lock
+ /* */
+ /* */
+ adapter->Stats.XmtQLen++; /* Stats within lock */
spin_unlock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
XmtCmd, pSgl, SxgSgl, 0);
return;
abortcmd:
- // NOTE - Only jump to this label AFTER grabbing the
- // XmtZeroLock, and DO NOT DROP IT between the
- // command allocation and the following abort.
+ /* NOTE - Only jump to this label AFTER grabbing the */
+ /* XmtZeroLock, and DO NOT DROP IT between the */
+ /* command allocation and the following abort. */
if (XmtCmd) {
SXG_ABORT_CMD(XmtRingInfo);
}
spin_unlock(&adapter->XmtZeroLock);
-// failsgl:
- // Jump to this label if failure occurs before the
- // XmtZeroLock is grabbed
+/* failsgl: */
+ /* Jump to this label if failure occurs before the */
+ /* XmtZeroLock is grabbed */
adapter->Stats.XmtErrors++;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
- SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); // SxgSgl->DumbPacket is the skb
+ SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); /* SxgSgl->DumbPacket is the skb */
}
/***************************************************************
@@ -2127,122 +2128,122 @@ static int sxg_initialize_link(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
adapter, 0, 0, 0);
- // Reset PHY and XGXS module
+ /* Reset PHY and XGXS module */
WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
- // Reset transmit configuration register
+ /* Reset transmit configuration register */
WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
- // Reset receive configuration register
+ /* Reset receive configuration register */
WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
- // Reset all MAC modules
+ /* Reset all MAC modules */
WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
- // Link address 0
- // XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
- // is stored with the first nibble (0a) in the byte 0
- // of the Mac address. Possibly reverse?
+ /* Link address 0 */
+ /* XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) */
+ /* is stored with the first nibble (0a) in the byte 0 */
+ /* of the Mac address. Possibly reverse? */
Value = *(u32 *) adapter->MacAddr;
WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
- // also write the MAC address to the MAC. Endian is reversed.
+ /* also write the MAC address to the MAC. Endian is reversed. */
WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF);
WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
- // endian swap for the MAC (put high bytes in bits [31:16], swapped)
+ /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
Value = ntohl(Value);
WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
- // Link address 1
+ /* Link address 1 */
WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
- // Link address 2
+ /* Link address 2 */
WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
- // Link address 3
+ /* Link address 3 */
WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
- // Enable MAC modules
+ /* Enable MAC modules */
WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
- // Configure MAC
- WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | // Allow sending of pause
- AXGMAC_CFG1_XMT_EN | // Enable XMT
- AXGMAC_CFG1_RCV_PAUSE | // Enable detection of pause
- AXGMAC_CFG1_RCV_EN | // Enable receive
- AXGMAC_CFG1_SHORT_ASSERT | // short frame detection
- AXGMAC_CFG1_CHECK_LEN | // Verify frame length
- AXGMAC_CFG1_GEN_FCS | // Generate FCS
- AXGMAC_CFG1_PAD_64), // Pad frames to 64 bytes
+ /* Configure MAC */
+ WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | /* Allow sending of pause */
+ AXGMAC_CFG1_XMT_EN | /* Enable XMT */
+ AXGMAC_CFG1_RCV_PAUSE | /* Enable detection of pause */
+ AXGMAC_CFG1_RCV_EN | /* Enable receive */
+ AXGMAC_CFG1_SHORT_ASSERT | /* short frame detection */
+ AXGMAC_CFG1_CHECK_LEN | /* Verify frame length */
+ AXGMAC_CFG1_GEN_FCS | /* Generate FCS */
+ AXGMAC_CFG1_PAD_64), /* Pad frames to 64 bytes */
TRUE);
- // Set AXGMAC max frame length if jumbo. Not needed for standard MTU
+ /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
if (adapter->JumboEnabled) {
WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
}
- // AMIIM Configuration Register -
- // The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
- // (bottom bits) of this register is used to determine the
- // MDC frequency as specified in the A-XGMAC Design Document.
- // This value must not be zero. The following value (62 or 0x3E)
- // is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz.
- // Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec),
- // we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62.
- // This value happens to be the default value for this register,
- // so we really don't have to do this.
+ /* AMIIM Configuration Register - */
+ /* The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion */
+ /* (bottom bits) of this register is used to determine the */
+ /* MDC frequency as specified in the A-XGMAC Design Document. */
+ /* This value must not be zero. The following value (62 or 0x3E) */
+ /* is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. */
+ /* Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), */
+ /* we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. */
+ /* This value happens to be the default value for this register, */
+ /* so we really don't have to do this. */
WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
- // Power up and enable PHY and XAUI/XGXS/Serdes logic
+ /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
WRITE_REG(HwRegs->LinkStatus,
(LS_PHY_CLR_RESET |
LS_XGXS_ENABLE |
LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
- // Per information given by Aeluros, wait 100 ms after removing reset.
- // It's not enough to wait for the self-clearing reset bit in reg 0 to clear.
+ /* Per information given by Aeluros, wait 100 ms after removing reset. */
+ /* It's not enough to wait for the self-clearing reset bit in reg 0 to clear. */
mdelay(100);
- // Verify the PHY has come up by checking that the Reset bit has cleared.
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- PHY_PMA_CONTROL1, // PMA/PMD control register
+ /* Verify the PHY has come up by checking that the Reset bit has cleared. */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ PHY_PMA_CONTROL1, /* PMA/PMD control register */
&Value);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
- if (Value & PMA_CONTROL1_RESET) // reset complete if bit is 0
+ if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
return (STATUS_FAILURE);
- // The SERDES should be initialized by now - confirm
+ /* The SERDES should be initialized by now - confirm */
READ_REG(HwRegs->LinkStatus, Value);
- if (Value & LS_SERDES_DOWN) // verify SERDES is initialized
+ if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
return (STATUS_FAILURE);
- // The XAUI link should also be up - confirm
- if (!(Value & LS_XAUI_LINK_UP)) // verify XAUI link is up
+ /* The XAUI link should also be up - confirm */
+ if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
return (STATUS_FAILURE);
- // Initialize the PHY
+ /* Initialize the PHY */
status = sxg_phy_init(adapter);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
- // Enable the Link Alarm
- status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- LASI_CONTROL, // LASI control register
- LASI_CTL_LS_ALARM_ENABLE); // enable link alarm bit
+ /* Enable the Link Alarm */
+ status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ LASI_CONTROL, /* LASI control register */
+ LASI_CTL_LS_ALARM_ENABLE); /* enable link alarm bit */
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
- // XXXTODO - temporary - verify bit is set
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- LASI_CONTROL, // LASI control register
+ /* XXXTODO - temporary - verify bit is set */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ LASI_CONTROL, /* LASI control register */
&Value);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
}
- // Enable receive
+ /* Enable receive */
MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
ConfigData = (RCV_CONFIG_ENABLE |
RCV_CONFIG_ENPARSE |
@@ -2256,7 +2257,7 @@ static int sxg_initialize_link(p_adapter_t adapter)
WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
- // Mark the link as down. We'll get a link event when it comes up.
+ /* Mark the link as down. We'll get a link event when it comes up. */
sxg_link_state(adapter, SXG_LINK_DOWN);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
@@ -2279,35 +2280,35 @@ static int sxg_phy_init(p_adapter_t adapter)
PPHY_UCODE p;
int status;
- DBG_ERROR("ENTER %s\n", __FUNCTION__);
+ DBG_ERROR("ENTER %s\n", __func__);
- // Read a register to identify the PHY type
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- 0xC205, // PHY ID register (?)
- &Value); // XXXTODO - add def
+ /* Read a register to identify the PHY type */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ 0xC205, /* PHY ID register (?) */
+ &Value); /* XXXTODO - add def */
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
- if (Value == 0x0012) { // 0x0012 == AEL2005C PHY(?) - XXXTODO - add def
+ if (Value == 0x0012) { /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
DBG_ERROR
("AEL2005C PHY detected. Downloading PHY microcode.\n");
- // Initialize AEL2005C PHY and download PHY microcode
+ /* Initialize AEL2005C PHY and download PHY microcode */
for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
if (p->Addr == 0) {
- // if address == 0, data == sleep time in ms
+ /* if address == 0, data == sleep time in ms */
mdelay(p->Data);
} else {
- // write the given data to the specified address
- status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- p->Addr, // PHY address
- p->Data); // PHY data
+ /* write the given data to the specified address */
+ status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ p->Addr, /* PHY address */
+ p->Data); /* PHY data */
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
}
}
}
- DBG_ERROR("EXIT %s\n", __FUNCTION__);
+ DBG_ERROR("EXIT %s\n", __func__);
return (STATUS_SUCCESS);
}
@@ -2330,42 +2331,42 @@ static void sxg_link_event(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
adapter, 0, 0, 0);
- DBG_ERROR("ENTER %s\n", __FUNCTION__);
+ DBG_ERROR("ENTER %s\n", __func__);
- // Check the Link Status register. We should have a Link Alarm.
+ /* Check the Link Status register. We should have a Link Alarm. */
READ_REG(HwRegs->LinkStatus, Value);
if (Value & LS_LINK_ALARM) {
- // We got a Link Status alarm. First, pause to let the
- // link state settle (it can bounce a number of times)
+ /* We got a Link Status alarm. First, pause to let the */
+ /* link state settle (it can bounce a number of times) */
mdelay(10);
- // Now clear the alarm by reading the LASI status register.
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- LASI_STATUS, // LASI status register
+ /* Now clear the alarm by reading the LASI status register. */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ LASI_STATUS, /* LASI status register */
&Value);
if (status != STATUS_SUCCESS) {
DBG_ERROR("Error reading LASI Status MDIO register!\n");
sxg_link_state(adapter, SXG_LINK_DOWN);
-// ASSERT(0);
+/* ASSERT(0); */
}
ASSERT(Value & LASI_STATUS_LS_ALARM);
- // Now get and set the link state
+ /* Now get and set the link state */
LinkState = sxg_get_link_state(adapter);
sxg_link_state(adapter, LinkState);
DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
} else {
- // XXXTODO - Assuming Link Attention is only being generated for the
- // Link Alarm pin (and not for a XAUI Link Status change), then it's
- // impossible to get here. Yet we've gotten here twice (under extreme
- // conditions - bouncing the link up and down many times a second).
- // Needs further investigation.
+ /* XXXTODO - Assuming Link Attention is only being generated for the */
+ /* Link Alarm pin (and not for a XAUI Link Status change), then it's */
+ /* impossible to get here. Yet we've gotten here twice (under extreme */
+ /* conditions - bouncing the link up and down many times a second). */
+ /* Needs further investigation. */
DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
-// ASSERT(0);
+/* ASSERT(0); */
}
- DBG_ERROR("EXIT %s\n", __FUNCTION__);
+ DBG_ERROR("EXIT %s\n", __func__);
}
@@ -2383,50 +2384,50 @@ static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter)
int status;
u32 Value;
- DBG_ERROR("ENTER %s\n", __FUNCTION__);
+ DBG_ERROR("ENTER %s\n", __func__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
adapter, 0, 0, 0);
- // Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
- // the following 3 bits (from 3 different MDIO registers) are all true.
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- PHY_PMA_RCV_DET, // PMA/PMD Receive Signal Detect register
+ /* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if */
+ /* the following 3 bits (from 3 different MDIO registers) are all true. */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ PHY_PMA_RCV_DET, /* PMA/PMD Receive Signal Detect register */
&Value);
if (status != STATUS_SUCCESS)
goto bad;
- // If PMA/PMD receive signal detect is 0, then the link is down
+ /* If PMA/PMD receive signal detect is 0, then the link is down */
if (!(Value & PMA_RCV_DETECT))
return (SXG_LINK_DOWN);
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, // PHY PCS module
- PHY_PCS_10G_STATUS1, // PCS 10GBASE-R Status 1 register
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, /* PHY PCS module */
+ PHY_PCS_10G_STATUS1, /* PCS 10GBASE-R Status 1 register */
&Value);
if (status != STATUS_SUCCESS)
goto bad;
- // If PCS is not locked to receive blocks, then the link is down
+ /* If PCS is not locked to receive blocks, then the link is down */
if (!(Value & PCS_10B_BLOCK_LOCK))
return (SXG_LINK_DOWN);
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, // PHY XS module
- PHY_XS_LANE_STATUS, // XS Lane Status register
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, /* PHY XS module */
+ PHY_XS_LANE_STATUS, /* XS Lane Status register */
&Value);
if (status != STATUS_SUCCESS)
goto bad;
- // If XS transmit lanes are not aligned, then the link is down
+ /* If XS transmit lanes are not aligned, then the link is down */
if (!(Value & XS_LANE_ALIGN))
return (SXG_LINK_DOWN);
- // All 3 bits are true, so the link is up
- DBG_ERROR("EXIT %s\n", __FUNCTION__);
+ /* All 3 bits are true, so the link is up */
+ DBG_ERROR("EXIT %s\n", __func__);
return (SXG_LINK_UP);
bad:
- // An error occurred reading an MDIO register. This shouldn't happen.
+ /* An error occurred reading an MDIO register. This shouldn't happen. */
DBG_ERROR("Error reading an MDIO register!\n");
ASSERT(0);
return (SXG_LINK_DOWN);
@@ -2437,11 +2438,11 @@ static void sxg_indicate_link_state(p_adapter_t adapter,
{
if (adapter->LinkState == SXG_LINK_UP) {
DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
- __FUNCTION__);
+ __func__);
netif_start_queue(adapter->netdev);
} else {
DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
- __FUNCTION__);
+ __func__);
netif_stop_queue(adapter->netdev);
}
}
@@ -2464,23 +2465,23 @@ static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
adapter, LinkState, adapter->LinkState, adapter->State);
- DBG_ERROR("ENTER %s\n", __FUNCTION__);
+ DBG_ERROR("ENTER %s\n", __func__);
- // Hold the adapter lock during this routine. Maybe move
- // the lock to the caller.
+ /* Hold the adapter lock during this routine. Maybe move */
+ /* the lock to the caller. */
spin_lock(&adapter->AdapterLock);
if (LinkState == adapter->LinkState) {
- // Nothing changed..
+ /* Nothing changed.. */
spin_unlock(&adapter->AdapterLock);
- DBG_ERROR("EXIT #0 %s\n", __FUNCTION__);
+ DBG_ERROR("EXIT #0 %s\n", __func__);
return;
}
- // Save the adapter state
+ /* Save the adapter state */
adapter->LinkState = LinkState;
- // Drop the lock and indicate link state
+ /* Drop the lock and indicate link state */
spin_unlock(&adapter->AdapterLock);
- DBG_ERROR("EXIT #1 %s\n", __FUNCTION__);
+ DBG_ERROR("EXIT #1 %s\n", __func__);
sxg_indicate_link_state(adapter, LinkState);
}
@@ -2501,76 +2502,76 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 Value)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
- u32 AddrOp; // Address operation (written to MIIM field reg)
- u32 WriteOp; // Write operation (written to MIIM field reg)
- u32 Cmd; // Command (written to MIIM command reg)
+ u32 AddrOp; /* Address operation (written to MIIM field reg) */
+ u32 WriteOp; /* Write operation (written to MIIM field reg) */
+ u32 Cmd; /* Command (written to MIIM command reg) */
u32 ValueRead;
u32 Timeout;
-// DBG_ERROR("ENTER %s\n", __FUNCTION__);
+/* DBG_ERROR("ENTER %s\n", __func__); */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
adapter, 0, 0, 0);
- // Ensure values don't exceed field width
- DevAddr &= 0x001F; // 5-bit field
- RegAddr &= 0xFFFF; // 16-bit field
- Value &= 0xFFFF; // 16-bit field
+ /* Ensure values don't exceed field width */
+ DevAddr &= 0x001F; /* 5-bit field */
+ RegAddr &= 0xFFFF; /* 16-bit field */
+ Value &= 0xFFFF; /* 16-bit field */
- // Set MIIM field register bits for an MIIM address operation
+ /* Set MIIM field register bits for an MIIM address operation */
AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
- // Set MIIM field register bits for an MIIM write operation
+ /* Set MIIM field register bits for an MIIM write operation */
WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
- // Set MIIM command register bits to execute an MIIM command
+ /* Set MIIM command register bits to execute an MIIM command */
Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
- // Reset the command register command bit (in case it's not 0)
+ /* Reset the command register command bit (in case it's not 0) */
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
- // MIIM write to set the address of the specified MDIO register
+ /* MIIM write to set the address of the specified MDIO register */
WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
- // Write to MIIM Command Register to execute to address operation
+ /* Write to MIIM Command Register to execute to address operation */
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
- // Poll AMIIM Indicator register to wait for completion
+ /* Poll AMIIM Indicator register to wait for completion */
Timeout = SXG_LINK_TIMEOUT;
do {
- udelay(100); // Timeout in 100us units
+ udelay(100); /* Timeout in 100us units */
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
- // Reset the command register command bit
+ /* Reset the command register command bit */
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
- // MIIM write to set up an MDIO write operation
+ /* MIIM write to set up an MDIO write operation */
WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
- // Write to MIIM Command Register to execute the write operation
+ /* Write to MIIM Command Register to execute the write operation */
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
- // Poll AMIIM Indicator register to wait for completion
+ /* Poll AMIIM Indicator register to wait for completion */
Timeout = SXG_LINK_TIMEOUT;
do {
- udelay(100); // Timeout in 100us units
+ udelay(100); /* Timeout in 100us units */
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
-// DBG_ERROR("EXIT %s\n", __FUNCTION__);
+/* DBG_ERROR("EXIT %s\n", __func__); */
return (STATUS_SUCCESS);
}
@@ -2591,110 +2592,78 @@ static int sxg_read_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 *pValue)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
- u32 AddrOp; // Address operation (written to MIIM field reg)
- u32 ReadOp; // Read operation (written to MIIM field reg)
- u32 Cmd; // Command (written to MIIM command reg)
+ u32 AddrOp; /* Address operation (written to MIIM field reg) */
+ u32 ReadOp; /* Read operation (written to MIIM field reg) */
+ u32 Cmd; /* Command (written to MIIM command reg) */
u32 ValueRead;
u32 Timeout;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
adapter, 0, 0, 0);
-// DBG_ERROR("ENTER %s\n", __FUNCTION__);
+/* DBG_ERROR("ENTER %s\n", __func__); */
- // Ensure values don't exceed field width
- DevAddr &= 0x001F; // 5-bit field
- RegAddr &= 0xFFFF; // 16-bit field
+ /* Ensure values don't exceed field width */
+ DevAddr &= 0x001F; /* 5-bit field */
+ RegAddr &= 0xFFFF; /* 16-bit field */
- // Set MIIM field register bits for an MIIM address operation
+ /* Set MIIM field register bits for an MIIM address operation */
AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
- // Set MIIM field register bits for an MIIM read operation
+ /* Set MIIM field register bits for an MIIM read operation */
ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
- // Set MIIM command register bits to execute an MIIM command
+ /* Set MIIM command register bits to execute an MIIM command */
Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
- // Reset the command register command bit (in case it's not 0)
+ /* Reset the command register command bit (in case it's not 0) */
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
- // MIIM write to set the address of the specified MDIO register
+ /* MIIM write to set the address of the specified MDIO register */
WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
- // Write to MIIM Command Register to execute to address operation
+ /* Write to MIIM Command Register to execute to address operation */
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
- // Poll AMIIM Indicator register to wait for completion
+ /* Poll AMIIM Indicator register to wait for completion */
Timeout = SXG_LINK_TIMEOUT;
do {
- udelay(100); // Timeout in 100us units
+ udelay(100); /* Timeout in 100us units */
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
- // Reset the command register command bit
+ /* Reset the command register command bit */
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
- // MIIM write to set up an MDIO register read operation
+ /* MIIM write to set up an MDIO register read operation */
WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
- // Write to MIIM Command Register to execute the read operation
+ /* Write to MIIM Command Register to execute the read operation */
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
- // Poll AMIIM Indicator register to wait for completion
+ /* Poll AMIIM Indicator register to wait for completion */
Timeout = SXG_LINK_TIMEOUT;
do {
- udelay(100); // Timeout in 100us units
+ udelay(100); /* Timeout in 100us units */
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
- // Read the MDIO register data back from the field register
+ /* Read the MDIO register data back from the field register */
READ_REG(HwRegs->MacAmiimField, *pValue);
- *pValue &= 0xFFFF; // data is in the lower 16 bits
+ *pValue &= 0xFFFF; /* data is in the lower 16 bits */
-// DBG_ERROR("EXIT %s\n", __FUNCTION__);
-
- return (STATUS_SUCCESS);
-}
-
-/*
- * Allocate a mcast_address structure to hold the multicast address.
- * Link it in.
- */
-static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
-{
- p_mcast_address_t mcaddr, mlist;
- bool equaladdr;
-
- /* Check to see if it already exists */
- mlist = adapter->mcastaddrs;
- while (mlist) {
- ETHER_EQ_ADDR(mlist->address, address, equaladdr);
- if (equaladdr) {
- return (STATUS_SUCCESS);
- }
- mlist = mlist->next;
- }
-
- /* Doesn't already exist. Allocate a structure to hold it */
- mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
- if (mcaddr == NULL)
- return 1;
-
- memcpy(mcaddr->address, address, 6);
-
- mcaddr->next = adapter->mcastaddrs;
- adapter->mcastaddrs = mcaddr;
+/* DBG_ERROR("EXIT %s\n", __func__); */
return (STATUS_SUCCESS);
}
@@ -2710,7 +2679,6 @@ static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
*
*/
static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */
-static u32 sxg_crc_init; /* Is table initialized */
/*
* Contruct the CRC32 table
@@ -2737,6 +2705,8 @@ static void sxg_mcast_init_crc32(void)
}
}
+#if XXXTODO
+static u32 sxg_crc_init; /* Is table initialized */
/*
* Return the MAC hast as described above.
*/
@@ -2765,6 +2735,74 @@ static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
return (machash);
}
+static void sxg_mcast_set_mask(p_adapter_t adapter)
+{
+ PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
+
+ DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __func__,
+ adapter->netdev->name, (unsigned int)adapter->MacFilter,
+ adapter->MulticastMask);
+
+ if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
+ /* Turn on all multicast addresses. We have to do this for promiscuous
+ * mode as well as ALLMCAST mode. It saves the Microcode from having
+ * to keep state about the MAC configuration.
+ */
+/* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__func__); */
+ WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
+ WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
+/* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */
+
+ } else {
+ /* Commit our multicast mast to the SLIC by writing to the multicast
+ * address mask registers
+ */
+ DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
+ __func__, adapter->netdev->name,
+ ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
+ ((ulong)
+ ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
+
+ WRITE_REG(sxg_regs->McastLow,
+ (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
+ WRITE_REG(sxg_regs->McastHigh,
+ (u32) ((adapter->
+ MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
+ }
+}
+
+/*
+ * Allocate a mcast_address structure to hold the multicast address.
+ * Link it in.
+ */
+static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
+{
+ p_mcast_address_t mcaddr, mlist;
+ bool equaladdr;
+
+ /* Check to see if it already exists */
+ mlist = adapter->mcastaddrs;
+ while (mlist) {
+ ETHER_EQ_ADDR(mlist->address, address, equaladdr);
+ if (equaladdr) {
+ return (STATUS_SUCCESS);
+ }
+ mlist = mlist->next;
+ }
+
+ /* Doesn't already exist. Allocate a structure to hold it */
+ mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
+ if (mcaddr == NULL)
+ return 1;
+
+ memcpy(mcaddr->address, address, 6);
+
+ mcaddr->next = adapter->mcastaddrs;
+ adapter->mcastaddrs = mcaddr;
+
+ return (STATUS_SUCCESS);
+}
+
static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
{
unsigned char crcpoly;
@@ -2783,7 +2821,6 @@ static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
static void sxg_mcast_set_list(p_net_device dev)
{
-#if XXXTODO
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
int status = STATUS_SUCCESS;
int i;
@@ -2809,7 +2846,7 @@ static void sxg_mcast_set_list(p_net_device dev)
}
DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n",
- __FUNCTION__, adapter->devflags_prev, dev->flags, status);
+ __func__, adapter->devflags_prev, dev->flags, status);
if (adapter->devflags_prev != dev->flags) {
adapter->macopts = MAC_DIRECTED;
if (dev->flags) {
@@ -2828,60 +2865,24 @@ static void sxg_mcast_set_list(p_net_device dev)
}
adapter->devflags_prev = dev->flags;
DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n",
- __FUNCTION__, adapter->macopts);
+ __func__, adapter->macopts);
sxg_config_set(adapter, TRUE);
} else {
if (status == STATUS_SUCCESS) {
sxg_mcast_set_mask(adapter);
}
}
-#endif
return;
}
-
-static void sxg_mcast_set_mask(p_adapter_t adapter)
-{
- PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
-
- DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __FUNCTION__,
- adapter->netdev->name, (unsigned int)adapter->MacFilter,
- adapter->MulticastMask);
-
- if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
- /* Turn on all multicast addresses. We have to do this for promiscuous
- * mode as well as ALLMCAST mode. It saves the Microcode from having
- * to keep state about the MAC configuration.
- */
-// DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__FUNCTION__);
- WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
- WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
-// DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__FUNCTION__, adapter->netdev->name);
-
- } else {
- /* Commit our multicast mast to the SLIC by writing to the multicast
- * address mask registers
- */
- DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
- __FUNCTION__, adapter->netdev->name,
- ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
- ((ulong)
- ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
-
- WRITE_REG(sxg_regs->McastLow,
- (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
- WRITE_REG(sxg_regs->McastHigh,
- (u32) ((adapter->
- MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
- }
-}
+#endif
static void sxg_unmap_mmio_space(p_adapter_t adapter)
{
#if LINUX_FREES_ADAPTER_RESOURCES
-// if (adapter->Regs) {
-// iounmap(adapter->Regs);
-// }
-// adapter->slic_regs = NULL;
+/* if (adapter->Regs) { */
+/* iounmap(adapter->Regs); */
+/* } */
+/* adapter->slic_regs = NULL; */
#endif
}
@@ -2909,8 +2910,8 @@ void SxgFreeResources(p_adapter_t adapter)
IsrCount = adapter->MsiEnabled ? RssIds : 1;
if (adapter->BasicAllocations == FALSE) {
- // No allocations have been made, including spinlocks,
- // or listhead initializations. Return.
+ /* No allocations have been made, including spinlocks, */
+ /* or listhead initializations. Return. */
return;
}
@@ -2920,7 +2921,7 @@ void SxgFreeResources(p_adapter_t adapter)
if (!(IsListEmpty(&adapter->AllSglBuffers))) {
SxgFreeSglBuffers(adapter);
}
- // Free event queues.
+ /* Free event queues. */
if (adapter->EventRings) {
pci_free_consistent(adapter->pcidev,
sizeof(SXG_EVENT_RING) * RssIds,
@@ -2947,17 +2948,17 @@ void SxgFreeResources(p_adapter_t adapter)
SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
- // Unmap register spaces
+ /* Unmap register spaces */
SxgUnmapResources(adapter);
- // Deregister DMA
+ /* Deregister DMA */
if (adapter->DmaHandle) {
SXG_DEREGISTER_DMA(adapter->DmaHandle);
}
- // Deregister interrupt
+ /* Deregister interrupt */
SxgDeregisterInterrupt(adapter);
- // Possibly free system info (5.2 only)
+ /* Possibly free system info (5.2 only) */
SXG_RELEASE_SYSTEM_INFO(adapter);
SxgDiagFreeResources(adapter);
@@ -3047,23 +3048,23 @@ static int sxg_allocate_buffer_memory(p_adapter_t adapter,
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
adapter, Size, BufferType, 0);
- // Grab the adapter lock and check the state.
- // If we're in anything other than INITIALIZING or
- // RUNNING state, fail. This is to prevent
- // allocations in an improper driver state
+ /* Grab the adapter lock and check the state. */
+ /* If we're in anything other than INITIALIZING or */
+ /* RUNNING state, fail. This is to prevent */
+ /* allocations in an improper driver state */
spin_lock(&adapter->AdapterLock);
- // Increment the AllocationsPending count while holding
- // the lock. Pause processing relies on this
+ /* Increment the AllocationsPending count while holding */
+ /* the lock. Pause processing relies on this */
++adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
- // At initialization time allocate resources synchronously.
+ /* At initialization time allocate resources synchronously. */
Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
if (Buffer == NULL) {
spin_lock(&adapter->AdapterLock);
- // Decrement the AllocationsPending count while holding
- // the lock. Pause processing relies on this
+ /* Decrement the AllocationsPending count while holding */
+ /* the lock. Pause processing relies on this */
--adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
@@ -3113,10 +3114,10 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
(BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
- // First, initialize the contained pool of receive data
- // buffers. This initialization requires NBL/NB/MDL allocations,
- // If any of them fail, free the block and return without
- // queueing the shared memory
+ /* First, initialize the contained pool of receive data */
+ /* buffers. This initialization requires NBL/NB/MDL allocations, */
+ /* If any of them fail, free the block and return without */
+ /* queueing the shared memory */
RcvDataBuffer = RcvBlock;
#if 0
for (i = 0, Paddr = *PhysicalAddress;
@@ -3126,14 +3127,14 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
for (i = 0, Paddr = PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
- //
+ /* */
RcvDataBufferHdr =
(PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
SXG_RCV_DATA_BUFFER_HDR_OFFSET
(BufferSize));
RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
RcvDataBufferHdr->PhysicalAddress = Paddr;
- RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; // For FREE macro assertion
+ RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; /* For FREE macro assertion */
RcvDataBufferHdr->Size =
SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
@@ -3143,8 +3144,8 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
}
- // Place this entire block of memory on the AllRcvBlocks queue so it can be
- // free later
+ /* Place this entire block of memory on the AllRcvBlocks queue so it can be */
+ /* free later */
RcvBlockHdr =
(PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock +
SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
@@ -3155,7 +3156,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
spin_unlock(&adapter->RcvQLock);
- // Now free the contained receive data buffers that we initialized above
+ /* Now free the contained receive data buffers that we initialized above */
RcvDataBuffer = RcvBlock;
for (i = 0, Paddr = PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3168,7 +3169,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
spin_unlock(&adapter->RcvQLock);
}
- // Locate the descriptor block and put it on a separate free queue
+ /* Locate the descriptor block and put it on a separate free queue */
RcvDescriptorBlock =
(PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock +
SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
@@ -3186,7 +3187,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
adapter, RcvBlock, Length, 0);
return;
fail:
- // Free any allocated resources
+ /* Free any allocated resources */
if (RcvBlock) {
RcvDataBuffer = RcvBlock;
for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3200,7 +3201,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
pci_free_consistent(adapter->pcidev,
Length, RcvBlock, PhysicalAddress);
}
- DBG_ERROR("%s: OUT OF RESOURCES\n", __FUNCTION__);
+ DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
adapter, adapter->FreeRcvBufferCount,
adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
@@ -3230,7 +3231,7 @@ static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter,
adapter->AllSglBufferCount++;
memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER));
SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */
- SxgSgl->adapter = adapter; // Initialize backpointer once
+ SxgSgl->adapter = adapter; /* Initialize backpointer once */
InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
spin_unlock(&adapter->SglQLock);
SxgSgl->State = SXG_BUFFER_BUSY;
@@ -3244,14 +3245,14 @@ static unsigned char temp_mac_address[6] =
static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
{
-// DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __FUNCTION__,
-// card->config_set, adapter->port, adapter->physport, adapter->functionnumber);
-//
-// sxg_dbg_macaddrs(adapter);
+/* DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, */
+/* card->config_set, adapter->port, adapter->physport, adapter->functionnumber); */
+/* */
+/* sxg_dbg_macaddrs(adapter); */
memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC));
-// DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __FUNCTION__);
-// sxg_dbg_macaddrs(adapter);
+/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */
+/* sxg_dbg_macaddrs(adapter); */
if (!(adapter->currmacaddr[0] ||
adapter->currmacaddr[1] ||
adapter->currmacaddr[2] ||
@@ -3262,18 +3263,18 @@ static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
if (adapter->netdev) {
memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
}
-// DBG_ERROR ("%s EXIT port %d\n", __FUNCTION__, adapter->port);
+/* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
sxg_dbg_macaddrs(adapter);
}
+#if XXXTODO
static int sxg_mac_set_address(p_net_device dev, void *ptr)
{
-#if XXXTODO
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
struct sockaddr *addr = ptr;
- DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
+ DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
if (netif_running(dev)) {
return -EBUSY;
@@ -3282,22 +3283,22 @@ static int sxg_mac_set_address(p_net_device dev, void *ptr)
return -EBUSY;
}
DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
+ __func__, adapter->netdev->name, adapter->currmacaddr[0],
adapter->currmacaddr[1], adapter->currmacaddr[2],
adapter->currmacaddr[3], adapter->currmacaddr[4],
adapter->currmacaddr[5]);
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
+ __func__, adapter->netdev->name, adapter->currmacaddr[0],
adapter->currmacaddr[1], adapter->currmacaddr[2],
adapter->currmacaddr[3], adapter->currmacaddr[4],
adapter->currmacaddr[5]);
sxg_config_set(adapter, TRUE);
-#endif
return 0;
}
+#endif
/*****************************************************************************/
/************* SXG DRIVER FUNCTIONS (below) ********************************/
@@ -3321,77 +3322,77 @@ static int sxg_initialize_adapter(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
adapter, 0, 0, 0);
- RssIds = 1; // XXXTODO SXG_RSS_CPU_COUNT(adapter);
+ RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
IsrCount = adapter->MsiEnabled ? RssIds : 1;
- // Sanity check SXG_UCODE_REGS structure definition to
- // make sure the length is correct
+ /* Sanity check SXG_UCODE_REGS structure definition to */
+ /* make sure the length is correct */
ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU);
- // Disable interrupts
+ /* Disable interrupts */
SXG_DISABLE_ALL_INTERRUPTS(adapter);
- // Set MTU
+ /* Set MTU */
ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
(adapter->FrameSize == JUMBOMAXFRAME));
WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
- // Set event ring base address and size
+ /* Set event ring base address and size */
WRITE_REG64(adapter,
adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
- // Per-ISR initialization
+ /* Per-ISR initialization */
for (i = 0; i < IsrCount; i++) {
u64 Addr;
- // Set interrupt status pointer
+ /* Set interrupt status pointer */
Addr = adapter->PIsr + (i * sizeof(u32));
WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
}
- // XMT ring zero index
+ /* XMT ring zero index */
WRITE_REG64(adapter,
adapter->UcodeRegs[0].SPSendIndex,
adapter->PXmtRingZeroIndex, 0);
- // Per-RSS initialization
+ /* Per-RSS initialization */
for (i = 0; i < RssIds; i++) {
- // Release all event ring entries to the Microcode
+ /* Release all event ring entries to the Microcode */
WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
TRUE);
}
- // Transmit ring base and size
+ /* Transmit ring base and size */
WRITE_REG64(adapter,
adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
- // Receive ring base and size
+ /* Receive ring base and size */
WRITE_REG64(adapter,
adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
- // Populate the card with receive buffers
+ /* Populate the card with receive buffers */
sxg_stock_rcv_buffers(adapter);
- // Initialize checksum offload capabilities. At the moment
- // we always enable IP and TCP receive checksums on the card.
- // Depending on the checksum configuration specified by the
- // user, we can choose to report or ignore the checksum
- // information provided by the card.
+ /* Initialize checksum offload capabilities. At the moment */
+ /* we always enable IP and TCP receive checksums on the card. */
+ /* Depending on the checksum configuration specified by the */
+ /* user, we can choose to report or ignore the checksum */
+ /* information provided by the card. */
WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
- // Initialize the MAC, XAUI
- DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __FUNCTION__);
+ /* Initialize the MAC, XAUI */
+ DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
status = sxg_initialize_link(adapter);
- DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
status);
if (status != STATUS_SUCCESS) {
return (status);
}
- // Initialize Dead to FALSE.
- // SlicCheckForHang or SlicDumpThread will take it from here.
+ /* Initialize Dead to FALSE. */
+ /* SlicCheckForHang or SlicDumpThread will take it from here. */
adapter->Dead = FALSE;
adapter->PingOutstanding = FALSE;
@@ -3428,14 +3429,14 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
ASSERT(RcvDescriptorBlockHdr);
- // If we don't have the resources to fill the descriptor block,
- // return failure
+ /* If we don't have the resources to fill the descriptor block, */
+ /* return failure */
if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
SXG_RING_FULL(RcvRingInfo)) {
adapter->Stats.NoMem++;
return (STATUS_FAILURE);
}
- // Get a ring descriptor command
+ /* Get a ring descriptor command */
SXG_GET_CMD(RingZero,
RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
ASSERT(RingDescriptorCmd);
@@ -3443,7 +3444,7 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
RcvDescriptorBlock =
(PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress;
- // Fill in the descriptor block
+ /* Fill in the descriptor block */
for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
ASSERT(RcvDataBufferHdr);
@@ -3454,13 +3455,13 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
RcvDataBufferHdr->PhysicalAddress;
}
- // Add the descriptor block to receive descriptor ring 0
+ /* Add the descriptor block to receive descriptor ring 0 */
RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
- // RcvBuffersOnCard is not protected via the receive lock (see
- // sxg_process_event_queue) We don't want to grap a lock every time a
- // buffer is returned to us, so we use atomic interlocked functions
- // instead.
+ /* RcvBuffersOnCard is not protected via the receive lock (see */
+ /* sxg_process_event_queue) We don't want to grap a lock every time a */
+ /* buffer is returned to us, so we use atomic interlocked functions */
+ /* instead. */
adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
@@ -3490,10 +3491,10 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
adapter, adapter->RcvBuffersOnCard,
adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
- // First, see if we've got less than our minimum threshold of
- // receive buffers, there isn't an allocation in progress, and
- // we haven't exceeded our maximum.. get another block of buffers
- // None of this needs to be SMP safe. It's round numbers.
+ /* First, see if we've got less than our minimum threshold of */
+ /* receive buffers, there isn't an allocation in progress, and */
+ /* we haven't exceeded our maximum.. get another block of buffers */
+ /* None of this needs to be SMP safe. It's round numbers. */
if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
(adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
(adapter->AllocationsPending == 0)) {
@@ -3502,12 +3503,12 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV);
}
- // Now grab the RcvQLock lock and proceed
+ /* Now grab the RcvQLock lock and proceed */
spin_lock(&adapter->RcvQLock);
while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
PLIST_ENTRY _ple;
- // Get a descriptor block
+ /* Get a descriptor block */
RcvDescriptorBlockHdr = NULL;
if (adapter->FreeRcvBlockCount) {
_ple = RemoveHeadList(&adapter->FreeRcvBlocks);
@@ -3519,14 +3520,14 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
}
if (RcvDescriptorBlockHdr == NULL) {
- // Bail out..
+ /* Bail out.. */
adapter->Stats.NoMem++;
break;
}
- // Fill in the descriptor block and give it to the card
+ /* Fill in the descriptor block and give it to the card */
if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
STATUS_FAILURE) {
- // Free the descriptor block
+ /* Free the descriptor block */
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
RcvDescriptorBlockHdr);
break;
@@ -3560,15 +3561,15 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
- // Now grab the RcvQLock lock and proceed
+ /* Now grab the RcvQLock lock and proceed */
spin_lock(&adapter->RcvQLock);
ASSERT(Index != RcvRingInfo->Tail);
while (RcvRingInfo->Tail != Index) {
- //
- // Locate the current Cmd (ring descriptor entry), and
- // associated receive descriptor block, and advance
- // the tail
- //
+ /* */
+ /* Locate the current Cmd (ring descriptor entry), and */
+ /* associated receive descriptor block, and advance */
+ /* the tail */
+ /* */
SXG_RETURN_CMD(RingZero,
RcvRingInfo,
RingDescriptorCmd, RcvDescriptorBlockHdr);
@@ -3576,12 +3577,12 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
RcvRingInfo->Head, RcvRingInfo->Tail,
RingDescriptorCmd, RcvDescriptorBlockHdr);
- // Clear the SGL field
+ /* Clear the SGL field */
RingDescriptorCmd->Sgl = 0;
- // Attempt to refill it and hand it right back to the
- // card. If we fail to refill it, free the descriptor block
- // header. The card will be restocked later via the
- // RcvBuffersOnCard test
+ /* Attempt to refill it and hand it right back to the */
+ /* card. If we fail to refill it, free the descriptor block */
+ /* header. The card will be restocked later via the */
+ /* RcvBuffersOnCard test */
if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
STATUS_FAILURE) {
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
diff --git a/drivers/staging/sxg/sxg_os.h b/drivers/staging/sxg/sxg_os.h
index 26fb0ffafa5..01182689aab 100644
--- a/drivers/staging/sxg/sxg_os.h
+++ b/drivers/staging/sxg/sxg_os.h
@@ -44,7 +44,6 @@
#define FALSE (0)
#define TRUE (1)
-
typedef struct _LIST_ENTRY {
struct _LIST_ENTRY *nle_flink;
struct _LIST_ENTRY *nle_blink;
@@ -69,35 +68,32 @@ typedef struct _LIST_ENTRY {
/* These two have to be inlined since they return things. */
-static __inline PLIST_ENTRY
-RemoveHeadList(list_entry *l)
+static __inline PLIST_ENTRY RemoveHeadList(list_entry * l)
{
- list_entry *f;
- list_entry *e;
+ list_entry *f;
+ list_entry *e;
- e = l->nle_flink;
- f = e->nle_flink;
- l->nle_flink = f;
- f->nle_blink = l;
+ e = l->nle_flink;
+ f = e->nle_flink;
+ l->nle_flink = f;
+ f->nle_blink = l;
- return (e);
+ return (e);
}
-static __inline PLIST_ENTRY
-RemoveTailList(list_entry *l)
+static __inline PLIST_ENTRY RemoveTailList(list_entry * l)
{
- list_entry *b;
- list_entry *e;
+ list_entry *b;
+ list_entry *e;
- e = l->nle_blink;
- b = e->nle_blink;
- l->nle_blink = b;
- b->nle_flink = l;
+ e = l->nle_blink;
+ b = e->nle_blink;
+ l->nle_blink = b;
+ b->nle_flink = l;
- return (e);
+ return (e);
}
-
#define InsertTailList(l, e) \
do { \
list_entry *b; \
@@ -120,7 +116,6 @@ RemoveTailList(list_entry *l)
(l)->nle_flink = (e); \
} while (0)
-
#define ATK_DEBUG 1
#if ATK_DEBUG
@@ -133,7 +128,6 @@ RemoveTailList(list_entry *l)
#define SLIC_TIMESTAMP(value)
#endif
-
/****************** SXG DEFINES *****************************************/
#ifdef ATKDBG
@@ -150,5 +144,4 @@ RemoveTailList(list_entry *l)
#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(&reg),(value),(cpu))
#define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg))
-#endif /* _SLIC_OS_SPECIFIC_H_ */
-
+#endif /* _SLIC_OS_SPECIFIC_H_ */
diff --git a/drivers/staging/sxg/sxgdbg.h b/drivers/staging/sxg/sxgdbg.h
index cfb6c7c77a9..4522b8d7149 100644
--- a/drivers/staging/sxg/sxgdbg.h
+++ b/drivers/staging/sxg/sxgdbg.h
@@ -58,7 +58,7 @@
{ \
if (!(a)) { \
DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\
- __FILE__, __FUNCTION__, __LINE__); \
+ __FILE__, __func__, __LINE__); \
} \
}
#endif
diff --git a/drivers/staging/sxg/sxghif.h b/drivers/staging/sxg/sxghif.h
index ed26ceaa131..88bffbaa3be 100644
--- a/drivers/staging/sxg/sxghif.h
+++ b/drivers/staging/sxg/sxghif.h
@@ -14,119 +14,119 @@
*******************************************************************************/
typedef struct _SXG_UCODE_REGS {
// Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0
- u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control
- u32 RsvdReg1; // Code = 1 - TOE -NA
- u32 RsvdReg2; // Code = 2 - TOE -NA
- u32 RsvdReg3; // Code = 3 - TOE -NA
- u32 RsvdReg4; // Code = 4 - TOE -NA
- u32 RsvdReg5; // Code = 5 - TOE -NA
- u32 CardUp; // Code = 6 - Microcode initialized when 1
- u32 RsvdReg7; // Code = 7 - TOE -NA
- u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0
+ u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control
+ u32 RsvdReg1; // Code = 1 - TOE -NA
+ u32 RsvdReg2; // Code = 2 - TOE -NA
+ u32 RsvdReg3; // Code = 3 - TOE -NA
+ u32 RsvdReg4; // Code = 4 - TOE -NA
+ u32 RsvdReg5; // Code = 5 - TOE -NA
+ u32 CardUp; // Code = 6 - Microcode initialized when 1
+ u32 RsvdReg7; // Code = 7 - TOE -NA
+ u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0
// This brings us to ExCode 1 at address 0x40 = Interrupt status pointer
- u32 Isp; // Code = 0 (extended), ExCode = 1
- u32 PadEx1[15]; // Codes 1-15 not used with extended codes
+ u32 Isp; // Code = 0 (extended), ExCode = 1
+ u32 PadEx1[15]; // Codes 1-15 not used with extended codes
// ExCode 2 = Interrupt Status Register
- u32 Isr; // Code = 0 (extended), ExCode = 2
- u32 PadEx2[15];
+ u32 Isr; // Code = 0 (extended), ExCode = 2
+ u32 PadEx2[15];
// ExCode 3 = Event base register. Location of event rings
- u32 EventBase; // Code = 0 (extended), ExCode = 3
- u32 PadEx3[15];
+ u32 EventBase; // Code = 0 (extended), ExCode = 3
+ u32 PadEx3[15];
// ExCode 4 = Event ring size
- u32 EventSize; // Code = 0 (extended), ExCode = 4
- u32 PadEx4[15];
+ u32 EventSize; // Code = 0 (extended), ExCode = 4
+ u32 PadEx4[15];
// ExCode 5 = TCB Buffers base address
- u32 TcbBase; // Code = 0 (extended), ExCode = 5
- u32 PadEx5[15];
+ u32 TcbBase; // Code = 0 (extended), ExCode = 5
+ u32 PadEx5[15];
// ExCode 6 = TCB Composite Buffers base address
- u32 TcbCompBase; // Code = 0 (extended), ExCode = 6
- u32 PadEx6[15];
+ u32 TcbCompBase; // Code = 0 (extended), ExCode = 6
+ u32 PadEx6[15];
// ExCode 7 = Transmit ring base address
- u32 XmtBase; // Code = 0 (extended), ExCode = 7
- u32 PadEx7[15];
+ u32 XmtBase; // Code = 0 (extended), ExCode = 7
+ u32 PadEx7[15];
// ExCode 8 = Transmit ring size
- u32 XmtSize; // Code = 0 (extended), ExCode = 8
- u32 PadEx8[15];
+ u32 XmtSize; // Code = 0 (extended), ExCode = 8
+ u32 PadEx8[15];
// ExCode 9 = Receive ring base address
- u32 RcvBase; // Code = 0 (extended), ExCode = 9
- u32 PadEx9[15];
+ u32 RcvBase; // Code = 0 (extended), ExCode = 9
+ u32 PadEx9[15];
// ExCode 10 = Receive ring size
- u32 RcvSize; // Code = 0 (extended), ExCode = 10
- u32 PadEx10[15];
+ u32 RcvSize; // Code = 0 (extended), ExCode = 10
+ u32 PadEx10[15];
// ExCode 11 = Read EEPROM Config
- u32 Config; // Code = 0 (extended), ExCode = 11
- u32 PadEx11[15];
+ u32 Config; // Code = 0 (extended), ExCode = 11
+ u32 PadEx11[15];
// ExCode 12 = Multicast bits 31:0
- u32 McastLow; // Code = 0 (extended), ExCode = 12
- u32 PadEx12[15];
+ u32 McastLow; // Code = 0 (extended), ExCode = 12
+ u32 PadEx12[15];
// ExCode 13 = Multicast bits 63:32
- u32 McastHigh; // Code = 0 (extended), ExCode = 13
- u32 PadEx13[15];
+ u32 McastHigh; // Code = 0 (extended), ExCode = 13
+ u32 PadEx13[15];
// ExCode 14 = Ping
- u32 Ping; // Code = 0 (extended), ExCode = 14
- u32 PadEx14[15];
+ u32 Ping; // Code = 0 (extended), ExCode = 14
+ u32 PadEx14[15];
// ExCode 15 = Link MTU
- u32 LinkMtu; // Code = 0 (extended), ExCode = 15
- u32 PadEx15[15];
+ u32 LinkMtu; // Code = 0 (extended), ExCode = 15
+ u32 PadEx15[15];
// ExCode 16 = Download synchronization
- u32 LoadSync; // Code = 0 (extended), ExCode = 16
- u32 PadEx16[15];
+ u32 LoadSync; // Code = 0 (extended), ExCode = 16
+ u32 PadEx16[15];
// ExCode 17 = Upper DRAM address bits on 32-bit systems
- u32 Upper; // Code = 0 (extended), ExCode = 17
- u32 PadEx17[15];
+ u32 Upper; // Code = 0 (extended), ExCode = 17
+ u32 PadEx17[15];
// ExCode 18 = Slowpath Send Index Address
- u32 SPSendIndex; // Code = 0 (extended), ExCode = 18
- u32 PadEx18[15];
- u32 RsvdXF; // Code = 0 (extended), ExCode = 19
- u32 PadEx19[15];
+ u32 SPSendIndex; // Code = 0 (extended), ExCode = 18
+ u32 PadEx18[15];
+ u32 RsvdXF; // Code = 0 (extended), ExCode = 19
+ u32 PadEx19[15];
// ExCode 20 = Aggregation
- u32 Aggregation; // Code = 0 (extended), ExCode = 20
- u32 PadEx20[15];
+ u32 Aggregation; // Code = 0 (extended), ExCode = 20
+ u32 PadEx20[15];
// ExCode 21 = Receive MDL push timer
- u32 PushTicks; // Code = 0 (extended), ExCode = 21
- u32 PadEx21[15];
+ u32 PushTicks; // Code = 0 (extended), ExCode = 21
+ u32 PadEx21[15];
// ExCode 22 = TOE NA
- u32 AckFrequency; // Code = 0 (extended), ExCode = 22
- u32 PadEx22[15];
+ u32 AckFrequency; // Code = 0 (extended), ExCode = 22
+ u32 PadEx22[15];
// ExCode 23 = TOE NA
- u32 RsvdReg23;
- u32 PadEx23[15];
+ u32 RsvdReg23;
+ u32 PadEx23[15];
// ExCode 24 = TOE NA
- u32 RsvdReg24;
- u32 PadEx24[15];
+ u32 RsvdReg24;
+ u32 PadEx24[15];
// ExCode 25 = TOE NA
- u32 RsvdReg25; // Code = 0 (extended), ExCode = 25
- u32 PadEx25[15];
+ u32 RsvdReg25; // Code = 0 (extended), ExCode = 25
+ u32 PadEx25[15];
// ExCode 26 = Receive checksum requirements
- u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26
- u32 PadEx26[15];
+ u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26
+ u32 PadEx26[15];
// ExCode 27 = RSS Requirements
- u32 Rss; // Code = 0 (extended), ExCode = 27
- u32 PadEx27[15];
+ u32 Rss; // Code = 0 (extended), ExCode = 27
+ u32 PadEx27[15];
// ExCode 28 = RSS Table
- u32 RssTable; // Code = 0 (extended), ExCode = 28
- u32 PadEx28[15];
+ u32 RssTable; // Code = 0 (extended), ExCode = 28
+ u32 PadEx28[15];
// ExCode 29 = Event ring release entries
- u32 EventRelease; // Code = 0 (extended), ExCode = 29
- u32 PadEx29[15];
+ u32 EventRelease; // Code = 0 (extended), ExCode = 29
+ u32 PadEx29[15];
// ExCode 30 = Number of receive bufferlist commands on ring 0
- u32 RcvCmd; // Code = 0 (extended), ExCode = 30
- u32 PadEx30[15];
+ u32 RcvCmd; // Code = 0 (extended), ExCode = 30
+ u32 PadEx30[15];
// ExCode 31 = slowpath transmit command - Data[31:0] = 1
- u32 XmtCmd; // Code = 0 (extended), ExCode = 31
- u32 PadEx31[15];
+ u32 XmtCmd; // Code = 0 (extended), ExCode = 31
+ u32 PadEx31[15];
// ExCode 32 = Dump command
- u32 DumpCmd; // Code = 0 (extended), ExCode = 32
- u32 PadEx32[15];
+ u32 DumpCmd; // Code = 0 (extended), ExCode = 32
+ u32 PadEx32[15];
// ExCode 33 = Debug command
- u32 DebugCmd; // Code = 0 (extended), ExCode = 33
- u32 PadEx33[15];
+ u32 DebugCmd; // Code = 0 (extended), ExCode = 33
+ u32 PadEx33[15];
// There are 128 possible extended commands - each of account for 16
// words (including the non-relevent base command codes 1-15).
// Pad for the remainder of these here to bring us to the next CPU
// base. As extended codes are added, reduce the first array value in
// the following field
- u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33)
+ u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33)
} SXG_UCODE_REGS, *PSXG_UCODE_REGS;
// Interrupt control register (0) values
@@ -141,7 +141,7 @@ typedef struct _SXG_UCODE_REGS {
// The Microcode supports up to 16 RSS queues
#define SXG_MAX_RSS 16
-#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max
+#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max
#define SXG_RSS_TCP6 0x00000001 // RSS TCP over IPv6
#define SXG_RSS_TCP4 0x00000002 // RSS TCP over IPv4
@@ -170,16 +170,16 @@ typedef struct _SXG_UCODE_REGS {
* SXG_UCODE_REGS definition above
*/
typedef struct _SXG_TCB_REGS {
- u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
- u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
- u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
- u32 Rsvd1; /* Code = 3 - TOE NA */
- u32 Rsvd2; /* Code = 4 - TOE NA */
- u32 Rsvd3; /* Code = 5 - TOE NA */
- u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */
- u32 Rsvd4; /* Code = 7 - TOE NA */
- u32 Rsvd5; /* Code = 8 - TOE NA */
- u32 Pad[7]; /* Codes 8-15 - Not used. */
+ u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
+ u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
+ u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
+ u32 Rsvd1; /* Code = 3 - TOE NA */
+ u32 Rsvd2; /* Code = 4 - TOE NA */
+ u32 Rsvd3; /* Code = 5 - TOE NA */
+ u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */
+ u32 Rsvd4; /* Code = 7 - TOE NA */
+ u32 Rsvd5; /* Code = 8 - TOE NA */
+ u32 Pad[7]; /* Codes 8-15 - Not used. */
} SXG_TCB_REGS, *PSXG_TCB_REGS;
/***************************************************************************
@@ -273,27 +273,27 @@ typedef struct _SXG_TCB_REGS {
*/
#pragma pack(push, 1)
typedef struct _SXG_EVENT {
- u32 Pad[1]; // not used
- u32 SndUna; // SndUna value
- u32 Resid; // receive MDL resid
+ u32 Pad[1]; // not used
+ u32 SndUna; // SndUna value
+ u32 Resid; // receive MDL resid
union {
- void * HostHandle; // Receive host handle
- u32 Rsvd1; // TOE NA
+ void *HostHandle; // Receive host handle
+ u32 Rsvd1; // TOE NA
struct {
- u32 NotUsed;
- u32 Rsvd2; // TOE NA
+ u32 NotUsed;
+ u32 Rsvd2; // TOE NA
} Flush;
};
- u32 Toeplitz; // RSS Toeplitz hash
+ u32 Toeplitz; // RSS Toeplitz hash
union {
- ushort Rsvd3; // TOE NA
- ushort HdrOffset; // Slowpath
+ ushort Rsvd3; // TOE NA
+ ushort HdrOffset; // Slowpath
};
- ushort Length; //
- unsigned char Rsvd4; // TOE NA
- unsigned char Code; // Event code
- unsigned char CommandIndex; // New ring index
- unsigned char Status; // Event status
+ ushort Length; //
+ unsigned char Rsvd4; // TOE NA
+ unsigned char Code; // Event code
+ unsigned char CommandIndex; // New ring index
+ unsigned char Status; // Event status
} SXG_EVENT, *PSXG_EVENT;
#pragma pack(pop)
@@ -318,12 +318,12 @@ typedef struct _SXG_EVENT {
// Event ring
// Size must be power of 2, between 128 and 16k
#define EVENT_RING_SIZE 4096 // ??
-#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time.
-#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16)
+#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time.
+#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16)
typedef struct _SXG_EVENT_RING {
- SXG_EVENT Ring[EVENT_RING_SIZE];
-}SXG_EVENT_RING, *PSXG_EVENT_RING;
+ SXG_EVENT Ring[EVENT_RING_SIZE];
+} SXG_EVENT_RING, *PSXG_EVENT_RING;
/***************************************************************************
*
@@ -341,7 +341,7 @@ typedef struct _SXG_EVENT_RING {
#define SXG_TCB_PER_BUCKET 16
#define SXG_TCB_BUCKET_MASK 0xFF0 // Bucket portion of TCB ID
#define SXG_TCB_ELEMENT_MASK 0x00F // Element within bucket
-#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k
+#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k
#define SXG_TCB_BUFFER_SIZE 512 // ASSERT format is correct
@@ -368,7 +368,6 @@ typedef struct _SXG_EVENT_RING {
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
-
#if DBG
// Horrible kludge to distinguish dumb-nic, slowpath, and
// fastpath traffic. Decrement the HopLimit by one
@@ -396,16 +395,16 @@ typedef struct _SXG_EVENT_RING {
* Receive and transmit rings
***************************************************************************/
#define SXG_MAX_RING_SIZE 256
-#define SXG_XMT_RING_SIZE 128 // Start with 128
-#define SXG_RCV_RING_SIZE 128 // Start with 128
+#define SXG_XMT_RING_SIZE 128 // Start with 128
+#define SXG_RCV_RING_SIZE 128 // Start with 128
#define SXG_MAX_ENTRIES 4096
// Structure and macros to manage a ring
typedef struct _SXG_RING_INFO {
- unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE
- unsigned char Tail; // Where we pull off completed entries
- ushort Size; // Ring size - Must be multiple of 2
- void * Context[SXG_MAX_RING_SIZE]; // Shadow ring
+ unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE
+ unsigned char Tail; // Where we pull off completed entries
+ ushort Size; // Ring size - Must be multiple of 2
+ void *Context[SXG_MAX_RING_SIZE]; // Shadow ring
} SXG_RING_INFO, *PSXG_RING_INFO;
#define SXG_INITIALIZE_RING(_ring, _size) { \
@@ -483,40 +482,40 @@ typedef struct _SXG_RING_INFO {
*/
#pragma pack(push, 1)
typedef struct _SXG_CMD {
- dma_addr_t Sgl; // Physical address of SGL
+ dma_addr_t Sgl; // Physical address of SGL
union {
struct {
- dma64_addr_t FirstSgeAddress;// Address of first SGE
- u32 FirstSgeLength; // Length of first SGE
+ dma64_addr_t FirstSgeAddress; // Address of first SGE
+ u32 FirstSgeLength; // Length of first SGE
union {
- u32 Rsvd1; // TOE NA
- u32 SgeOffset; // Slowpath - 2nd SGE offset
- u32 Resid; // MDL completion - clobbers update
+ u32 Rsvd1; // TOE NA
+ u32 SgeOffset; // Slowpath - 2nd SGE offset
+ u32 Resid; // MDL completion - clobbers update
};
union {
- u32 TotalLength; // Total transfer length
- u32 Mss; // LSO MSS
+ u32 TotalLength; // Total transfer length
+ u32 Mss; // LSO MSS
};
} Buffer;
};
union {
struct {
- unsigned char Flags:4; // slowpath flags
- unsigned char IpHl:4; // Ip header length (>>2)
- unsigned char MacLen; // Mac header len
+ unsigned char Flags:4; // slowpath flags
+ unsigned char IpHl:4; // Ip header length (>>2)
+ unsigned char MacLen; // Mac header len
} CsumFlags;
struct {
- ushort Flags:4; // slowpath flags
- ushort TcpHdrOff:7; // TCP
- ushort MacLen:5; // Mac header len
+ ushort Flags:4; // slowpath flags
+ ushort TcpHdrOff:7; // TCP
+ ushort MacLen:5; // Mac header len
} LsoFlags;
- ushort Flags; // flags
+ ushort Flags; // flags
};
union {
- ushort SgEntries; // SG entry count including first sge
+ ushort SgEntries; // SG entry count including first sge
struct {
- unsigned char Status; // Copied from event status
- unsigned char NotUsed;
+ unsigned char Status; // Copied from event status
+ unsigned char NotUsed;
} Status;
};
} SXG_CMD, *PSXG_CMD;
@@ -524,8 +523,8 @@ typedef struct _SXG_CMD {
#pragma pack(push, 1)
typedef struct _VLAN_HDR {
- ushort VlanTci;
- ushort VlanTpid;
+ ushort VlanTci;
+ ushort VlanTpid;
} VLAN_HDR, *PVLAN_HDR;
#pragma pack(pop)
@@ -561,16 +560,16 @@ typedef struct _VLAN_HDR {
*
*/
// Slowpath CMD flags
-#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP
-#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP
-#define SXG_SLOWCMD_LSO 0x04 // Large segment send
+#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP
+#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP
+#define SXG_SLOWCMD_LSO 0x04 // Large segment send
typedef struct _SXG_XMT_RING {
- SXG_CMD Descriptors[SXG_XMT_RING_SIZE];
+ SXG_CMD Descriptors[SXG_XMT_RING_SIZE];
} SXG_XMT_RING, *PSXG_XMT_RING;
typedef struct _SXG_RCV_RING {
- SXG_CMD Descriptors[SXG_RCV_RING_SIZE];
+ SXG_CMD Descriptors[SXG_RCV_RING_SIZE];
} SXG_RCV_RING, *PSXG_RCV_RING;
/***************************************************************************
@@ -578,8 +577,8 @@ typedef struct _SXG_RCV_RING {
* shared memory allocation
***************************************************************************/
typedef enum {
- SXG_BUFFER_TYPE_RCV, // Receive buffer
- SXG_BUFFER_TYPE_SGL // SGL buffer
+ SXG_BUFFER_TYPE_RCV, // Receive buffer
+ SXG_BUFFER_TYPE_SGL // SGL buffer
} SXG_BUFFER_TYPE;
// State for SXG buffers
@@ -668,60 +667,60 @@ typedef enum {
#define SXG_RCV_DATA_BUFFERS 4096 // Amount to give to the card
#define SXG_INITIAL_RCV_DATA_BUFFERS 8192 // Initial pool of buffers
#define SXG_MIN_RCV_DATA_BUFFERS 2048 // Minimum amount and when to get more
-#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers
+#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers
// Receive buffer header
typedef struct _SXG_RCV_DATA_BUFFER_HDR {
- dma_addr_t PhysicalAddress; // Buffer physical address
+ dma_addr_t PhysicalAddress; // Buffer physical address
// Note - DO NOT USE the VirtualAddress field to locate data.
// Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
- void *VirtualAddress; // Start of buffer
- LIST_ENTRY FreeList; // Free queue of buffers
- struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue
- u32 Size; // Buffer size
- u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET
- unsigned char State; // See SXG_BUFFER state above
- unsigned char Status; // Event status (to log PUSH)
- struct sk_buff * skb; // Double mapped (nbl and pkt)
+ void *VirtualAddress; // Start of buffer
+ LIST_ENTRY FreeList; // Free queue of buffers
+ struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue
+ u32 Size; // Buffer size
+ u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET
+ unsigned char State; // See SXG_BUFFER state above
+ unsigned char Status; // Event status (to log PUSH)
+ struct sk_buff *skb; // Double mapped (nbl and pkt)
} SXG_RCV_DATA_BUFFER_HDR, *PSXG_RCV_DATA_BUFFER_HDR;
// SxgSlowReceive uses the PACKET (skb) contained
// in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data
#define SxgDumbRcvPacket skb
-#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR
+#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR
#define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR
#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR
// Receive data descriptor
typedef struct _SXG_RCV_DATA_DESCRIPTOR {
union {
- struct sk_buff * VirtualAddress; // Host handle
- u64 ForceTo8Bytes; // Force x86 to 8-byte boundary
+ struct sk_buff *VirtualAddress; // Host handle
+ u64 ForceTo8Bytes; // Force x86 to 8-byte boundary
};
- dma_addr_t PhysicalAddress;
+ dma_addr_t PhysicalAddress;
} SXG_RCV_DATA_DESCRIPTOR, *PSXG_RCV_DATA_DESCRIPTOR;
// Receive descriptor block
#define SXG_RCV_DESCRIPTORS_PER_BLOCK 128
#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check
typedef struct _SXG_RCV_DESCRIPTOR_BLOCK {
- SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
+ SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
} SXG_RCV_DESCRIPTOR_BLOCK, *PSXG_RCV_DESCRIPTOR_BLOCK;
// Receive descriptor block header
typedef struct _SXG_RCV_DESCRIPTOR_BLOCK_HDR {
- void * VirtualAddress; // Start of 2k buffer
- dma_addr_t PhysicalAddress; // ..and it's physical address
- LIST_ENTRY FreeList; // Free queue of descriptor blocks
- unsigned char State; // See SXG_BUFFER state above
+ void *VirtualAddress; // Start of 2k buffer
+ dma_addr_t PhysicalAddress; // ..and it's physical address
+ LIST_ENTRY FreeList; // Free queue of descriptor blocks
+ unsigned char State; // See SXG_BUFFER state above
} SXG_RCV_DESCRIPTOR_BLOCK_HDR, *PSXG_RCV_DESCRIPTOR_BLOCK_HDR;
// Receive block header
typedef struct _SXG_RCV_BLOCK_HDR {
- void * VirtualAddress; // Start of virtual memory
- dma_addr_t PhysicalAddress; // ..and it's physical address
- LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS
+ void *VirtualAddress; // Start of virtual memory
+ dma_addr_t PhysicalAddress; // ..and it's physical address
+ LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS
} SXG_RCV_BLOCK_HDR, *PSXG_RCV_BLOCK_HDR;
// Macros to determine data structure offsets into receive block
@@ -747,8 +746,8 @@ typedef struct _SXG_RCV_BLOCK_HDR {
// Use the miniport reserved portion of the NBL to locate
// our SXG_RCV_DATA_BUFFER_HDR structure.
typedef struct _SXG_RCV_NBL_RESERVED {
- PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
- void * Available;
+ PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
+ void *Available;
} SXG_RCV_NBL_RESERVED, *PSXG_RCV_NBL_RESERVED;
#define SXG_RCV_NBL_BUFFER_HDR(_NBL) (((PSXG_RCV_NBL_RESERVED)NET_BUFFER_LIST_MINIPORT_RESERVED(_NBL))->RcvDataBufferHdr)
@@ -760,12 +759,11 @@ typedef struct _SXG_RCV_NBL_RESERVED {
#define SXG_MIN_SGL_BUFFERS 2048 // Minimum amount and when to get more
#define SXG_MAX_SGL_BUFFERS 16384 // Maximum to allocate (note ADAPT:ushort)
-
// Self identifying structure type
typedef enum _SXG_SGL_TYPE {
- SXG_SGL_DUMB, // Dumb NIC SGL
- SXG_SGL_SLOW, // Slowpath protocol header - see below
- SXG_SGL_CHIMNEY // Chimney offload SGL
+ SXG_SGL_DUMB, // Dumb NIC SGL
+ SXG_SGL_SLOW, // Slowpath protocol header - see below
+ SXG_SGL_CHIMNEY // Chimney offload SGL
} SXG_SGL_TYPE, PSXG_SGL_TYPE;
// Note - the description below is Microsoft specific
@@ -774,14 +772,14 @@ typedef enum _SXG_SGL_TYPE {
// for the SCATTER_GATHER_LIST portion of the SXG_SCATTER_GATHER data structure.
// The following considerations apply when setting this value:
// - First, the Sahara card is designed to read the Microsoft SGL structure
-// straight out of host memory. This means that the SGL must reside in
-// shared memory. If the length here is smaller than the SGL for the
-// NET_BUFFER, then NDIS will allocate its own buffer. The buffer
-// that NDIS allocates is not in shared memory, so when this happens,
-// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
-// In other words.. we don't want this value to be too small.
+// straight out of host memory. This means that the SGL must reside in
+// shared memory. If the length here is smaller than the SGL for the
+// NET_BUFFER, then NDIS will allocate its own buffer. The buffer
+// that NDIS allocates is not in shared memory, so when this happens,
+// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
+// In other words.. we don't want this value to be too small.
// - On the other hand.. we're allocating up to 16k of these things. If
-// we make this too big, we start to consume a ton of memory..
+// we make this too big, we start to consume a ton of memory..
// At the moment, I'm going to limit the number of SG entries to 150.
// If each entry maps roughly 4k, then this should cover roughly 600kB
// NET_BUFFERs. Furthermore, since each entry is 24 bytes, the total
@@ -801,24 +799,23 @@ typedef enum _SXG_SGL_TYPE {
// the SGL. The following structure defines an x64
// formatted SGL entry
typedef struct _SXG_X64_SGE {
- dma64_addr_t Address; // same as wdm.h
- u32 Length; // same as wdm.h
- u32 CompilerPad;// The compiler pads to 8-bytes
- u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
+ dma64_addr_t Address; // same as wdm.h
+ u32 Length; // same as wdm.h
+ u32 CompilerPad; // The compiler pads to 8-bytes
+ u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
} SXG_X64_SGE, *PSXG_X64_SGE;
typedef struct _SCATTER_GATHER_ELEMENT {
- dma64_addr_t Address; // same as wdm.h
- u32 Length; // same as wdm.h
- u32 CompilerPad;// The compiler pads to 8-bytes
- u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
+ dma64_addr_t Address; // same as wdm.h
+ u32 Length; // same as wdm.h
+ u32 CompilerPad; // The compiler pads to 8-bytes
+ u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
} SCATTER_GATHER_ELEMENT, *PSCATTER_GATHER_ELEMENT;
-
typedef struct _SCATTER_GATHER_LIST {
- u32 NumberOfElements;
- u32 * Reserved;
- SCATTER_GATHER_ELEMENT Elements[];
+ u32 NumberOfElements;
+ u32 *Reserved;
+ SCATTER_GATHER_ELEMENT Elements[];
} SCATTER_GATHER_LIST, *PSCATTER_GATHER_LIST;
// The card doesn't care about anything except elements, so
@@ -826,26 +823,26 @@ typedef struct _SCATTER_GATHER_LIST {
// SGL structure. But redefine from wdm.h:SCATTER_GATHER_LIST so
// we can specify SXG_X64_SGE and define a fixed number of elements
typedef struct _SXG_X64_SGL {
- u32 NumberOfElements;
- u32 * Reserved;
- SXG_X64_SGE Elements[SXG_SGL_ENTRIES];
+ u32 NumberOfElements;
+ u32 *Reserved;
+ SXG_X64_SGE Elements[SXG_SGL_ENTRIES];
} SXG_X64_SGL, *PSXG_X64_SGL;
typedef struct _SXG_SCATTER_GATHER {
- SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload
- void * adapter; // Back pointer to adapter
- LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks
- LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks
- dma_addr_t PhysicalAddress;// physical address
- unsigned char State; // See SXG_BUFFER state above
- unsigned char CmdIndex; // Command ring index
- struct sk_buff * DumbPacket; // Associated Packet
- u32 Direction; // For asynchronous completions
- u32 CurOffset; // Current SGL offset
- u32 SglRef; // SGL reference count
- VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL
- PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl
- SXG_X64_SGL Sgl; // SGL handed to card
+ SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload
+ void *adapter; // Back pointer to adapter
+ LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks
+ LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks
+ dma_addr_t PhysicalAddress; // physical address
+ unsigned char State; // See SXG_BUFFER state above
+ unsigned char CmdIndex; // Command ring index
+ struct sk_buff *DumbPacket; // Associated Packet
+ u32 Direction; // For asynchronous completions
+ u32 CurOffset; // Current SGL offset
+ u32 SglRef; // SGL reference count
+ VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL
+ PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl
+ SXG_X64_SGL Sgl; // SGL handed to card
} SXG_SCATTER_GATHER, *PSXG_SCATTER_GATHER;
#if defined(CONFIG_X86_64)
@@ -856,6 +853,5 @@ typedef struct _SXG_SCATTER_GATHER {
#define SXG_SGL_BUFFER(_SxgSgl) NULL
#define SXG_SGL_BUF_SIZE 0
#else
- Stop Compilation;
+Stop Compilation;
#endif
-
diff --git a/drivers/staging/sxg/sxghw.h b/drivers/staging/sxg/sxghw.h
index 8f4f6effdd9..2222ae91fd9 100644
--- a/drivers/staging/sxg/sxghw.h
+++ b/drivers/staging/sxg/sxghw.h
@@ -13,11 +13,11 @@
/*******************************************************************************
* Configuration space
*******************************************************************************/
-// PCI Vendor ID
-#define SXG_VENDOR_ID 0x139A // Alacritech's Vendor ID
+/* PCI Vendor ID */
+#define SXG_VENDOR_ID 0x139A /* Alacritech's Vendor ID */
// PCI Device ID
-#define SXG_DEVICE_ID 0x0009 // Sahara Device ID
+#define SXG_DEVICE_ID 0x0009 /* Sahara Device ID */
//
// Subsystem IDs.
@@ -141,7 +141,7 @@ typedef struct _SXG_HW_REGS {
#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 // Used to sanity check UCODE_REGS structure
// Sahara receive sequencer status values
-#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention
+#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention
#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 // Transport mask
#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 // Transport error
#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 // Transport cksum error
@@ -156,9 +156,9 @@ typedef struct _SXG_HW_REGS {
#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 // Transport FTP
#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 // Transport HTTP
#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 // Transport SMB
-#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask
+#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask
#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 // Network error
-#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error
+#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error
#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 // Network underflow error
#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 // Network header length
#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 // Network overflow detected
@@ -167,67 +167,67 @@ typedef struct _SXG_HW_REGS {
#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 // Network offset detected
#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 // Network fragment detected
#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 // Network transport type mask
-#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP
-#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP
-#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP
-#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority
-#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift
-#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error
-#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask
-#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D
-#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C
-#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B
-#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A
+#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP
+#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP
+#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP
+#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority
+#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift
+#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error
+#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask
+#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D
+#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C
+#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B
+#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A
#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 // Link address broadcast
#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 // Link address multicast
#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 // Link control multicast
-#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
-#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error
-#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
-#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error
-#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early
+#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
+#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error
+#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
+#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error
+#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early
#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 // Buffer overflow
-#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error
-#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble
-#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error
-#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow
-#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow
-#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3
-#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap
-#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN
+#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error
+#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble
+#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error
+#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow
+#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow
+#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3
+#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap
+#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN
#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 // Network type mask
-#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet
-#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet
-#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet
+#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet
+#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet
+#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet
/***************************************************************************
* Sahara receive and transmit configuration registers
***************************************************************************/
-#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset
-#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic
-#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser
-#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector
-#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames
-#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames
-#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn
-#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz
-#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz
-#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers
+#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset
+#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic
+#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser
+#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector
+#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames
+#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames
+#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn
+#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz
+#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz
+#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers
#define RCV_CONFIG_PRIORITY_MASK 0x00300000 // Priority level
#define RCV_CONFIG_HASH_MASK 0x00030000 // Hash depth
-#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8
-#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16
-#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4
-#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2
+#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8
+#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16
+#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4
+#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2
#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 // Buffer length bits 15:4. ie multiple of 16.
-#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn
+#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn
// Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
// We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
// and round up to nearest 16 byte boundary
#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK)
-#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset
-#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic
+#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset
+#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic
#define XMT_CONFIG_MAC_PARITY 0x20000000 // Inhibit MAC RAM parity error
#define XMT_CONFIG_BUF_PARITY 0x10000000 // Inhibit D2F buffer parity error
#define XMT_CONFIG_MEM_PARITY 0x08000000 // Inhibit 1T SRAM parity error
@@ -249,9 +249,9 @@ typedef struct _SXG_HW_REGS {
// A-XGMAC Configuration Register 1
#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 // Allow the sending of Pause frames
-#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit
+#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit
#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 // Allow the detection of Pause frames
-#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive
+#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive
#define AXGMAC_CFG1_XMT_STATE 0x04000000 // Current transmit state - READ ONLY
#define AXGMAC_CFG1_RCV_STATE 0x01000000 // Current receive state - READ ONLY
#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 // Only pause for 64 slot on XOFF
@@ -262,24 +262,24 @@ typedef struct _SXG_HW_REGS {
#define AXGMAC_CFG1_RCV_FCS2 0x00000200 // Delay receive FCS 2 4-byte words
#define AXGMAC_CFG1_RCV_FCS3 0x00000300 // Delay receive FCS 3 4-byte words
#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 // Per-packet override enable
-#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable
+#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable
#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 // ASSERT srdrpfrm on short frame (<64)
#define AXGMAC_CFG1_RCV_STRICT 0x00000010 // RCV only 802.3AE when CLEAR
#define AXGMAC_CFG1_CHECK_LEN 0x00000008 // Verify frame length
-#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS
+#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS
#define AXGMAC_CFG1_PAD_MASK 0x00000003 // Mask for pad bits
-#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes
+#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes
#define AXGMAC_CFG1_PAD_VLAN 0x00000002 // Detect VLAN and pad to 68 bytes
-#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes
+#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes
// A-XGMAC Configuration Register 2
#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 // Generate single pause frame (test)
#define AXGMAC_CFG2_LF_MANUAL 0x08000000 // Manual link fault sequence
-#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence
+#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence
#define AXGMAC_CFG2_LF_REMOTE 0x02000000 // Remote link fault (READ ONLY)
#define AXGMAC_CFG2_LF_LOCAL 0x01000000 // Local link fault (READ ONLY)
#define AXGMAC_CFG2_IPG_MASK 0x001F0000 // Inter packet gap
-#define AXGMAC_CFG2_IPG_SHIFT 16
+#define AXGMAC_CFG2_IPG_SHIFT 16
#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 // Pause transmit module
#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 // Enable IPG extension algorithm
#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F // IPG extension
@@ -299,9 +299,9 @@ typedef struct _SXG_HW_REGS {
#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 // Sixth octet
// A-XGMAC Maximum frame length register
-#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length
+#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length
#define AXGMAC_MAXFRAME_XMT_SHIFT 16
-#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length
+#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length
// This register doesn't need to be written for standard MTU.
// For jumbo, I'll just statically define the value here. This
// value sets the receive byte count to 9036 (0x234C) and the
@@ -324,34 +324,34 @@ typedef struct _SXG_HW_REGS {
// A-XGMAC AMIIM Field Register
#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 // 2-bit ST field
-#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
+#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
#define AXGMAC_AMIIM_FIELD_OP 0x30000000 // 2-bit OP field
-#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
-#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec)
+#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
+#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec)
#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23
#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 // Device address field (hstregadx in spec)
#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18
#define AXGMAC_AMIIM_FIELD_TA 0x00030000 // 2-bit TA field
-#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
+#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field
// Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register
-#define MIIM_OP_ADDR 0 // MIIM Address set operation
-#define MIIM_OP_WRITE 1 // MIIM Write register operation
-#define MIIM_OP_READ 2 // MIIM Read register operation
+#define MIIM_OP_ADDR 0 // MIIM Address set operation
+#define MIIM_OP_WRITE 1 // MIIM Write register operation
+#define MIIM_OP_READ 2 // MIIM Read register operation
#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT)
// Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register
-#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1
+#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1
// Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register
-#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number
-#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number
-#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number
-#define MIIM_DEV_XGXS 5 // XGXS MIIM device number
+#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number
+#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number
+#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number
+#define MIIM_DEV_XGXS 5 // XGXS MIIM device number
// Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register
-#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation
+#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation
// A-XGMAC AMIIM Configuration Register
#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 // Bypass preamble of mngmt frame
@@ -365,25 +365,25 @@ typedef struct _SXG_HW_REGS {
#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 // Set until cmd operation complete
// Link Status and Control Register
-#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY
+#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY
#define LS_SERDES_POWER_DOWN 0x40000000 // Power down the Sahara Serdes
-#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic
-#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up
-#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete
-#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete
-#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz)
-#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY
-#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up
-#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed
-#define LS_LINK_ALARM 0x00000004 // Link alarm pin
-#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits
-#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm
+#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic
+#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up
+#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete
+#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete
+#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz)
+#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY
+#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up
+#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed
+#define LS_LINK_ALARM 0x00000004 // Link alarm pin
+#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits
+#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm
#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 // 01 => Attn on link alarm or status change
-#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change
-#define LS_ATTN_NONE 0x00000003 // 11 => no Attn
+#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change
+#define LS_ATTN_NONE 0x00000003 // 11 => no Attn
// Link Address High Registers
-#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address
+#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address
/***************************************************************************
@@ -396,7 +396,7 @@ typedef struct _SXG_HW_REGS {
#define XGXS_ADDRESS_STATUS1 0x0001 // XS Status 1
#define XGXS_ADDRESS_DEVID_LOW 0x0002 // XS Device ID (low)
#define XGXS_ADDRESS_DEVID_HIGH 0x0003 // XS Device ID (high)
-#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability
+#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability
#define XGXS_ADDRESS_DEV_LOW 0x0005 // XS Devices in package
#define XGXS_ADDRESS_DEV_HIGH 0x0006 // XS Devices in package
#define XGXS_ADDRESS_STATUS2 0x0008 // XS Status 2
@@ -410,27 +410,27 @@ typedef struct _SXG_HW_REGS {
#define XGXS_ADDRESS_RESET_HI2 0x8003 // Vendor-Specific Reset Hi 2
// XS Control 1 register bit definitions
-#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing
+#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing
#define XGXS_CONTROL1_LOOPBACK 0x4000 // Enable loopback
#define XGXS_CONTROL1_SPEED1 0x2000 // 0 = unspecified, 1 = 10Gb+
#define XGXS_CONTROL1_LOWPOWER 0x0400 // 1 = Low power mode
#define XGXS_CONTROL1_SPEED2 0x0040 // Same as SPEED1 (?)
-#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?)
+#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?)
// XS Status 1 register bit definitions
-#define XGXS_STATUS1_FAULT 0x0080 // Fault detected
-#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up
+#define XGXS_STATUS1_FAULT 0x0080 // Fault detected
+#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up
#define XGXS_STATUS1_LOWPOWER 0x0002 // 1 = Low power supported
// XS Speed register bit definitions
-#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable
+#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable
// XS Devices register bit definitions
-#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present
-#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present
-#define XGXS_DEVICES_PCS 0x0008 // PCS Present
-#define XGXS_DEVICES_WIS 0x0004 // WIS Present
-#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present
+#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present
+#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present
+#define XGXS_DEVICES_PCS 0x0008 // PCS Present
+#define XGXS_DEVICES_WIS 0x0004 // WIS Present
+#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present
#define XGXS_DEVICES_CLAUSE22 0x0001 // Clause 22 registers present
// XS Devices High register bit definitions
@@ -444,18 +444,18 @@ typedef struct _SXG_HW_REGS {
#define XGXS_STATUS2_RCV_FAULT 0x0400 // Receive fault
// XS Package ID High register bit definitions
-#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique
-#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model
-#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number
+#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique
+#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model
+#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number
// XS Lane Status register bit definitions
-#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status
-#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability
-#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability
-#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync
-#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync
-#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync
-#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync
+#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status
+#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability
+#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability
+#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync
+#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync
+#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync
+#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync
// XS Test Control register bit definitions
#define XGXS_TEST_PATTERN_ENABLE 0x0004 // Test pattern enabled
@@ -473,10 +473,10 @@ typedef struct _SXG_HW_REGS {
// LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device)
#define LASI_RX_ALARM_CONTROL 0x9000 // LASI RX_ALARM Control
#define LASI_TX_ALARM_CONTROL 0x9001 // LASI TX_ALARM Control
-#define LASI_CONTROL 0x9002 // LASI Control
+#define LASI_CONTROL 0x9002 // LASI Control
#define LASI_RX_ALARM_STATUS 0x9003 // LASI RX_ALARM Status
#define LASI_TX_ALARM_STATUS 0x9004 // LASI TX_ALARM Status
-#define LASI_STATUS 0x9005 // LASI Status
+#define LASI_STATUS 0x9005 // LASI Status
// LASI_CONTROL bit definitions
#define LASI_CTL_RX_ALARM_ENABLE 0x0004 // Enable RX_ALARM interrupts
@@ -489,34 +489,34 @@ typedef struct _SXG_HW_REGS {
#define LASI_STATUS_LS_ALARM 0x0001 // Link Status
// PHY registers - PMA/PMD (device 1)
-#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1
-#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1
-#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect
+#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1
+#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1
+#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect
// other PMA/PMD registers exist and can be defined as needed
// PHY registers - PCS (device 3)
-#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1
-#define PHY_PCS_STATUS1 0x0001 // PCS Status 1
-#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1
+#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1
+#define PHY_PCS_STATUS1 0x0001 // PCS Status 1
+#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1
// other PCS registers exist and can be defined as needed
// PHY registers - XS (device 4)
-#define PHY_XS_CONTROL1 0x0000 // XS Control 1
-#define PHY_XS_STATUS1 0x0001 // XS Status 1
-#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status
+#define PHY_XS_CONTROL1 0x0000 // XS Control 1
+#define PHY_XS_STATUS1 0x0001 // XS Status 1
+#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status
// other XS registers exist and can be defined as needed
// PHY_PMA_CONTROL1 register bit definitions
-#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset
+#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset
// PHY_PMA_RCV_DET register bit definitions
-#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect
+#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect
// PHY_PCS_10G_STATUS1 register bit definitions
-#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks
+#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks
// PHY_XS_LANE_STATUS register bit definitions
-#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned
+#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned
// PHY Microcode download data structure
typedef struct _PHY_UCODE {
@@ -558,8 +558,8 @@ typedef struct _XMT_DESC {
// command codes
#define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor
#define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor
-#define XMT_DESC_CMD_FORMAT 2 // format descriptor
-#define XMT_DESC_CMD_PRIME 3 // prime descriptor
+#define XMT_DESC_CMD_FORMAT 2 // format descriptor
+#define XMT_DESC_CMD_PRIME 3 // prime descriptor
#define XMT_DESC_CMD_CODE_SHFT 6 // comand code shift (shift to bits [31:30] in word 0)
// shifted command codes
#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT)
@@ -569,22 +569,22 @@ typedef struct _XMT_DESC {
// XMT_DESC Control Byte (XmtCtl) definitions
// NOTE: These bits do not work on Sahara (Rev A)!
-#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics)
+#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics)
#define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics)
#define XMT_CTL_PER_PKT_QUAL 0x20 // per packet qualifier
#define XMT_CTL_PAD_MODE_NONE 0x00 // do not pad frame
-#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes
+#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes
#define XMT_CTL_PAD_MODE_VLAN_68 0x10 // pad frame to 64 bytes, and VLAN frames to 68 bytes
-#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes
-#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame
-#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution
-#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word
-#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words
-#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words
+#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes
+#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame
+#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution
+#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word
+#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words
+#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words
// XMT_DESC XmtBufId definition
-#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing
- // the buffer (DRAM) address by 256 (or << 8)
+#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing
+ // the buffer (DRAM) address by 256 (or << 8)
/*****************************************************************************
* Receiver Sequencer Definitions
@@ -594,8 +594,8 @@ typedef struct _XMT_DESC {
#define RCV_EVTQ_RBFID_MASK 0x0000FFFF // bit mask for the Receive Buffer ID
// Receive Buffer ID definition
-#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing
- // the buffer (DRAM) address by 32 (or << 5)
+#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing
+ // the buffer (DRAM) address by 32 (or << 5)
// Format of the 18 byte Receive Buffer returned by the
// Receive Sequencer for received packets
@@ -623,48 +623,48 @@ typedef struct _RCV_BUF_HDR {
* Queue definitions
*****************************************************************************/
-// Ingress (read only) queue numbers
-#define PXY_BUF_Q 0 // Proxy Buffer Queue
-#define HST_EVT_Q 1 // Host Event Queue
-#define XMT_BUF_Q 2 // Transmit Buffer Queue
-#define SKT_EVL_Q 3 // RcvSqr Socket Event Low Priority Queue
-#define RCV_EVL_Q 4 // RcvSqr Rcv Event Low Priority Queue
-#define SKT_EVH_Q 5 // RcvSqr Socket Event High Priority Queue
-#define RCV_EVH_Q 6 // RcvSqr Rcv Event High Priority Queue
-#define DMA_RSP_Q 7 // Dma Response Queue - one per CPU context
-// Local (read/write) queue numbers
-#define LOCAL_A_Q 8 // Spare local Queue
-#define LOCAL_B_Q 9 // Spare local Queue
-#define LOCAL_C_Q 10 // Spare local Queue
-#define FSM_EVT_Q 11 // Finite-State-Machine Event Queue
-#define SBF_PAL_Q 12 // System Buffer Physical Address (low) Queue
-#define SBF_PAH_Q 13 // System Buffer Physical Address (high) Queue
-#define SBF_VAL_Q 14 // System Buffer Virtual Address (low) Queue
-#define SBF_VAH_Q 15 // System Buffer Virtual Address (high) Queue
-// Egress (write only) queue numbers
-#define H2G_CMD_Q 16 // Host to GlbRam DMA Command Queue
-#define H2D_CMD_Q 17 // Host to DRAM DMA Command Queue
-#define G2H_CMD_Q 18 // GlbRam to Host DMA Command Queue
-#define G2D_CMD_Q 19 // GlbRam to DRAM DMA Command Queue
-#define D2H_CMD_Q 20 // DRAM to Host DMA Command Queue
-#define D2G_CMD_Q 21 // DRAM to GlbRam DMA Command Queue
-#define D2D_CMD_Q 22 // DRAM to DRAM DMA Command Queue
-#define PXL_CMD_Q 23 // Low Priority Proxy Command Queue
-#define PXH_CMD_Q 24 // High Priority Proxy Command Queue
-#define RSQ_CMD_Q 25 // Receive Sequencer Command Queue
-#define RCV_BUF_Q 26 // Receive Buffer Queue
-
-// Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q)
-#define PXY_COPY_EN 0x00200000 // enable copy of xmt descriptor to xmt command queue
-#define PXY_SIZE_16 0x00000000 // copy 16 bytes
-#define PXY_SIZE_32 0x00100000 // copy 32 bytes
+/* Ingress (read only) queue numbers */
+#define PXY_BUF_Q 0 /* Proxy Buffer Queue */
+#define HST_EVT_Q 1 /* Host Event Queue */
+#define XMT_BUF_Q 2 /* Transmit Buffer Queue */
+#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */
+#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */
+#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */
+#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */
+#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */
+/* Local (read/write) queue numbers */
+#define LOCAL_A_Q 8 /* Spare local Queue */
+#define LOCAL_B_Q 9 /* Spare local Queue */
+#define LOCAL_C_Q 10 /* Spare local Queue */
+#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */
+#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */
+#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */
+#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */
+#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */
+/* Egress (write only) queue numbers */
+#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */
+#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */
+#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */
+#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */
+#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */
+#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */
+#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */
+#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */
+#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */
+#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */
+#define RCV_BUF_Q 26 /* Receive Buffer Queue */
+
+/* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */
+#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */
+#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */
+#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */
/*****************************************************************************
* SXG EEPROM/Flash Configuration Definitions
*****************************************************************************/
#pragma pack(push, 1)
-//
+/* */
typedef struct _HW_CFG_DATA {
ushort Addr;
union {
@@ -673,22 +673,22 @@ typedef struct _HW_CFG_DATA {
};
} HW_CFG_DATA, *PHW_CFG_DATA;
-//
+/* */
#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4)
-// MAC address
+/* MAC address */
typedef struct _SXG_CONFIG_MAC {
- unsigned char MacAddr[6]; // MAC Address
+ unsigned char MacAddr[6]; /* MAC Address */
} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC;
-//
+/* */
typedef struct _ATK_FRU {
unsigned char PartNum[6];
unsigned char Revision[2];
unsigned char Serial[14];
} ATK_FRU, *PATK_FRU;
-// OEM FRU Format types
+/* OEM FRU Format types */
#define ATK_FRU_FORMAT 0x0000
#define CPQ_FRU_FORMAT 0x0001
#define DELL_FRU_FORMAT 0x0002
@@ -697,24 +697,24 @@ typedef struct _ATK_FRU {
#define EMC_FRU_FORMAT 0x0005
#define NO_FRU_FORMAT 0xFFFF
-// EEPROM/Flash Format
+/* EEPROM/Flash Format */
typedef struct _SXG_CONFIG {
- //
- // Section 1 (128 bytes)
- //
- ushort MagicWord; // EEPROM/FLASH Magic code 'A5A5'
- ushort SpiClks; // SPI bus clock dividers
+ /* */
+ /* Section 1 (128 bytes) */
+ /* */
+ ushort MagicWord; /* EEPROM/FLASH Magic code 'A5A5' */
+ ushort SpiClks; /* SPI bus clock dividers */
HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES];
- //
- //
- //
- ushort Version; // EEPROM format version
- SXG_CONFIG_MAC MacAddr[4]; // space for 4 MAC addresses
- ATK_FRU AtkFru; // FRU information
- ushort OemFruFormat; // OEM FRU format type
- unsigned char OemFru[76]; // OEM FRU information (optional)
- ushort Checksum; // Checksum of section 2
- // CS info XXXTODO
+ /* */
+ /* */
+ /* */
+ ushort Version; /* EEPROM format version */
+ SXG_CONFIG_MAC MacAddr[4]; /* space for 4 MAC addresses */
+ ATK_FRU AtkFru; /* FRU information */
+ ushort OemFruFormat; /* OEM FRU format type */
+ unsigned char OemFru[76]; /* OEM FRU information (optional) */
+ ushort Checksum; /* Checksum of section 2 */
+ /* CS info XXXTODO */
} SXG_CONFIG, *PSXG_CONFIG;
#pragma pack(pop)
@@ -723,12 +723,12 @@ typedef struct _SXG_CONFIG {
*****************************************************************************/
// Sahara (ASIC level) defines
-#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB
-#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB
-#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB)
-#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits)
+#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB
+#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB
+#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB)
+#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits)
// Arabia (board level) defines
-#define FLASH_SIZE 0x080000 // 512 KB (4 Mb)
-#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area
-#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area
+#define FLASH_SIZE 0x080000 // 512 KB (4 Mb)
+#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area
+#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area
diff --git a/drivers/staging/sxg/sxgphycode.h b/drivers/staging/sxg/sxgphycode.h
index 26b36c81eb1..8dbaeda7eca 100644
--- a/drivers/staging/sxg/sxgphycode.h
+++ b/drivers/staging/sxg/sxgphycode.h
@@ -34,7 +34,7 @@ static PHY_UCODE PhyUcode[] = {
*/
/* Addr, Data */
{0xc017, 0xfeb0}, /* flip RX_LOS polarity (mandatory */
- /* patch for SFP+ applications) */
+ /* patch for SFP+ applications) */
{0xC001, 0x0428}, /* flip RX serial polarity */
{0xc013, 0xf341}, /* invert lxmit clock (mandatory patch) */
@@ -43,7 +43,7 @@ static PHY_UCODE PhyUcode[] = {
{0xc210, 0x8000}, /* reset datapath (mandatory patch) */
{0xc210, 0x0000}, /* reset datapath (mandatory patch) */
{0x0000, 0x0032}, /* wait for 50ms for datapath reset to */
- /* complete. (mandatory patch) */
+ /* complete. (mandatory patch) */
/* Configure the LED's */
{0xc214, 0x0099}, /* configure the LED drivers */
@@ -52,15 +52,15 @@ static PHY_UCODE PhyUcode[] = {
/* Transceiver-specific MDIO Patches: */
{0xc010, 0x448a}, /* (bit 14) mask out high BER input from the */
- /* LOS signal in 1.000A */
- /* (mandatory patch for SR code)*/
+ /* LOS signal in 1.000A */
+ /* (mandatory patch for SR code) */
{0xc003, 0x0181}, /* (bit 7) enable the CDR inc setting in */
- /* 1.C005 (mandatory patch for SR code) */
+ /* 1.C005 (mandatory patch for SR code) */
/* Transceiver-specific Microcontroller Initialization: */
{0xc04a, 0x5200}, /* activate microcontroller and pause */
{0x0000, 0x0032}, /* wait 50ms for microcontroller before */
- /* writing in code. */
+ /* writing in code. */
/* code block starts here: */
{0xcc00, 0x2009},