aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes@sipsolutions.net>2009-07-24 11:13:05 -0700
committerJohn W. Linville <linville@tuxdriver.com>2009-07-27 15:24:21 -0400
commitc2acea8e9b86ba5a5469ff477445676a223af4e2 (patch)
tree3c4705b13dd5c85817a1132a17743757135b7047 /drivers/net/wireless/iwlwifi/iwl-tx.c
parentfbf3a2af3834e8e93e9c2876de62c5b49988e352 (diff)
iwlwifi: fix up command sending
The current command sending in iwlwifi is a bit of a mess: 1) there is a struct, iwl_cmd, that contains both driver and device data in a single packed structure -- this is very confusing 2) the on-stack data and the command metadata share a structure by embedding the latter in the former, which is also rather confusing because it leads to weird unions and similarly odd constructs 3) each txq always has enough space for 256 commands, even if only 32 end up being used This patch fixes these things: 1) rename iwl_cmd to iwl_device_cmd and keep track of command metadata and device command separately, in two arrays in each tx queue 2) remove the 'meta' member from iwl_host_cmd and only put in the required members 3) allocate the cmd/meta arrays separately instead of embedding them into the txq structure Signed-off-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c96
1 files changed, 59 insertions, 37 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 0912987af60..51ddbab63c2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -141,7 +141,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- len = sizeof(struct iwl_cmd) * q->n_window;
+ len = sizeof(struct iwl_device_cmd) * q->n_window;
/* De-alloc array of command/tx buffers */
for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -156,6 +156,12 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
kfree(txq->txb);
txq->txb = NULL;
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
@@ -179,7 +185,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
if (q->n_bd == 0)
return;
- len = sizeof(struct iwl_cmd) * q->n_window;
+ len = sizeof(struct iwl_device_cmd) * q->n_window;
len += IWL_MAX_SCAN_SIZE;
/* De-alloc array of command/tx buffers */
@@ -318,6 +324,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
{
int i, len;
int ret;
+ int actual_slots = slots_num;
/*
* Alloc buffer array for commands (Tx or other types of commands).
@@ -327,14 +334,22 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
* For normal Tx queues (all other queues), no super-size command
* space is needed.
*/
- len = sizeof(struct iwl_cmd);
- for (i = 0; i <= slots_num; i++) {
- if (i == slots_num) {
- if (txq_id == IWL_CMD_QUEUE_NUM)
- len += IWL_MAX_SCAN_SIZE;
- else
- continue;
- }
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ actual_slots++;
+
+ txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
+ GFP_KERNEL);
+ txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
+ GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto out_free_arrays;
+
+ len = sizeof(struct iwl_device_cmd);
+ for (i = 0; i < actual_slots; i++) {
+ /* only happens for cmd queue */
+ if (i == slots_num)
+ len += IWL_MAX_SCAN_SIZE;
txq->cmd[i] = kmalloc(len, GFP_KERNEL);
if (!txq->cmd[i])
@@ -364,15 +379,12 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
return 0;
err:
- for (i = 0; i < slots_num; i++) {
+ for (i = 0; i < actual_slots; i++)
kfree(txq->cmd[i]);
- txq->cmd[i] = NULL;
- }
+out_free_arrays:
+ kfree(txq->meta);
+ kfree(txq->cmd);
- if (txq_id == IWL_CMD_QUEUE_NUM) {
- kfree(txq->cmd[slots_num]);
- txq->cmd[slots_num] = NULL;
- }
return -ENOMEM;
}
EXPORT_SYMBOL(iwl_tx_queue_init);
@@ -673,7 +685,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_tx_queue *txq;
struct iwl_queue *q;
- struct iwl_cmd *out_cmd;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
struct iwl_tx_cmd *tx_cmd;
int swq_id, txq_id;
dma_addr_t phys_addr;
@@ -766,6 +779,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
tx_cmd = &out_cmd->cmd.tx;
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
@@ -828,8 +842,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txcmd_phys = pci_map_single(priv->pci_dev,
&out_cmd->hdr, len,
PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
- pci_unmap_len_set(&out_cmd->meta, len, len);
+ pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ pci_unmap_len_set(out_meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
@@ -923,7 +937,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
- struct iwl_cmd *out_cmd;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
dma_addr_t phys_addr;
unsigned long flags;
int len, ret;
@@ -937,25 +952,31 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
* the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
* we will need to increase the size of the TFD entries */
BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
- !(cmd->meta.flags & CMD_SIZE_HUGE));
+ !(cmd->flags & CMD_SIZE_HUGE));
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n");
return -EIO;
}
- if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
IWL_ERR(priv, "No space for Tx\n");
return -ENOSPC;
}
spin_lock_irqsave(&priv->hcmd_lock, flags);
- idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
+ idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ out_meta->flags = cmd->flags;
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+ if (cmd->flags & CMD_ASYNC)
+ out_meta->callback = cmd->callback;
out_cmd->hdr.cmd = cmd->id;
- memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
/* At this point, the out_cmd now has all of the incoming cmd
@@ -964,9 +985,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
INDEX_TO_SEQ(q->write_ptr));
- if (out_cmd->meta.flags & CMD_SIZE_HUGE)
+ if (cmd->flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
+ len = sizeof(struct iwl_device_cmd);
len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
@@ -998,8 +1019,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
fix_size, PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
- pci_unmap_len_set(&out_cmd->meta, len, fix_size);
+ pci_unmap_addr_set(out_meta, mapping, phys_addr);
+ pci_unmap_len_set(out_meta, len, fix_size);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, fix_size, 1,
@@ -1068,8 +1089,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
}
pci_unmap_single(priv->pci_dev,
- pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
- pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
+ pci_unmap_addr(&txq->meta[cmd_idx], mapping),
+ pci_unmap_len(&txq->meta[cmd_idx], len),
PCI_DMA_BIDIRECTIONAL);
for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
@@ -1100,7 +1121,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
int index = SEQ_TO_INDEX(sequence);
int cmd_index;
bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
- struct iwl_cmd *cmd;
+ struct iwl_device_cmd *cmd;
+ struct iwl_cmd_meta *meta;
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
@@ -1116,18 +1138,18 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
+ meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
/* Input error checking is done when commands are added to queue. */
- if (cmd->meta.flags & CMD_WANT_SKB) {
- cmd->meta.source->u.skb = rxb->skb;
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_skb = rxb->skb;
rxb->skb = NULL;
- } else if (cmd->meta.u.callback &&
- !cmd->meta.u.callback(priv, cmd, rxb->skb))
+ } else if (meta->callback && !meta->callback(priv, cmd, rxb->skb))
rxb->skb = NULL;
iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
- if (!(cmd->meta.flags & CMD_ASYNC)) {
+ if (!(meta->flags & CMD_ASYNC)) {
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
}