/* * HT handling * * Copyright 2003, Jouni Malinen * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2007-2008, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include "ieee80211_i.h" #include "sta_info.h" #include "wme.h" void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, struct ieee80211_ht_cap *ht_cap_ie, struct ieee80211_sta_ht_cap *ht_cap) { u8 ampdu_info, tx_mcs_set_cap; int i, max_tx_streams; BUG_ON(!ht_cap); memset(ht_cap, 0, sizeof(*ht_cap)); if (!ht_cap_ie) return; ht_cap->ht_supported = true; ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & sband->ht_cap.cap; ht_cap->cap &= ~IEEE80211_HT_CAP_SM_PS; ht_cap->cap |= sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS; ampdu_info = ht_cap_ie->ampdu_params_info; ht_cap->ampdu_factor = ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR; ht_cap->ampdu_density = (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; /* own MCS TX capabilities */ tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; /* can we TX with MCS rates? */ if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) return; /* Counting from 0, therefore +1 */ if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF) max_tx_streams = ((tx_mcs_set_cap & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; else max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS; /* * 802.11n D5.0 20.3.5 / 20.6 says: * - indices 0 to 7 and 32 are single spatial stream * - 8 to 31 are multiple spatial streams using equal modulation * [8..15 for two streams, 16..23 for three and 24..31 for four] * - remainder are multiple spatial streams using unequal modulation */ for (i = 0; i < max_tx_streams; i++) ht_cap->mcs.rx_mask[i] = sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; i < IEEE80211_HT_MCS_MASK_LEN; i++) ht_cap->mcs.rx_mask[i] = sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; /* handle MCS rate 32 too */ if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) ht_cap->mcs.rx_mask[32/8] |= 1; } /* * ieee80211_enable_ht should be called only after the operating band * has been determined as ht configuration depends on the hw's * HT abilities for a specific band. */ u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, struct ieee80211_ht_info *hti, u16 ap_ht_cap_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct ieee80211_bss_ht_conf ht; u32 changed = 0; bool enable_ht = true, ht_changed; enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; memset(&ht, 0, sizeof(ht)); /* HT is not supported */ if (!sband->ht_cap.ht_supported) enable_ht = false; /* check that channel matches the right operating channel */ if (local->hw.conf.channel->center_freq != ieee80211_channel_to_frequency(hti->control_chan)) enable_ht = false; if (enable_ht) { channel_type = NL80211_CHAN_HT20; if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: channel_type = NL80211_CHAN_HT40PLUS; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: channel_type = NL80211_CHAN_HT40MINUS; break; } } } ht_changed = local->hw.conf.ht.enabled != enable_ht || channel_type != local->hw.conf.ht.channel_type; local->oper_channel_type = channel_type; local->hw.conf.ht.enabled = enable_ht; if (ht_changed) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_HT); /* disable HT */ if (!enable_ht) return 0; ht.operation_mode = le16_to_cpu(hti->operation_mode); /* if bss configuration changed store the new one */ if (memcmp(&sdata->vif.bss_conf.ht, &ht, sizeof(ht))) { changed |= BSS_CHANGED_HT; sdata->vif.bss_conf.ht = ht; } return changed; } static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u8 dialog_token, u16 start_seq_num, u16 agg_size, u16 timeout) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_sta *ifsta = &sdata->u.sta; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 capab; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_ERR "%s: failed to allocate buffer " "for addba request frame\n", sdata->dev->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP) memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); else memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; mgmt->u.action.u.addba_req.dialog_token = dialog_token; capab = (u16)(1 << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(start_seq_num << 4); ieee80211_tx_skb(sdata, skb, 0); } static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, u8 dialog_token, u16 status, u16 policy, u16 buf_size, u16 timeout) { struct ieee80211_if_sta *ifsta = &sdata->u.sta; struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 capab; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer " "for addba resp frame\n", sdata->dev->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP) memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); else memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; mgmt->u.action.u.addba_resp.dialog_token = dialog_token; capab = (u16)(policy << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); ieee80211_tx_skb(sdata, skb, 0); } static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u16 initiator, u16 reason_code) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_sta *ifsta = &sdata->u.sta; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 params; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_ERR "%s: failed to allocate buffer " "for delba frame\n", sdata->dev->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP) memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); else memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; params = (u16)(initiator << 11); /* bit 11 initiator */ params |= (u16)(tid << 12); /* bit 15:12 TID number */ mgmt->u.action.u.delba.params = cpu_to_le16(params); mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); ieee80211_tx_skb(sdata, skb, 0); } void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_bar *bar; u16 bar_control = 0; skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_ERR "%s: failed to allocate buffer for " "bar frame\n", sdata->dev->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); memset(bar, 0, sizeof(*bar)); bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); memcpy(bar->ra, ra, ETH_ALEN); memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN); bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; bar_control |= (u16)(tid << 12); bar->control = cpu_to_le16(bar_control); bar->start_seq_num = cpu_to_le16(ssn); ieee80211_tx_skb(sdata, skb, 0); } void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 initiator, u16 reason) { struct ieee80211_local *local = sdata->local; struct ieee80211_hw *hw = &local->hw; struct sta_info *sta; int ret, i; rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { rcu_read_unlock(); return; } /* check if TID is in operational state */ spin_lock_bh(&sta->lock); if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) { spin_unlock_bh(&sta->lock); rcu_read_unlock(); return; } sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_REQ_STOP_BA_MSK | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); spin_unlock_bh(&sta->lock); /* stop HW Rx aggregation. ampdu_action existence * already verified in session init so we add the BUG_ON */ BUG_ON(!local->ops->ampdu_action); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, &sta->sta, tid, NULL); if (ret) printk(KERN_DEBUG "HW problem - can not stop rx " "aggregation for tid %d\n", tid); /* shutdown timer has not expired */ if (initiator != WLAN_BACK_TIMER) del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer); /* check if this is a self generated aggregation halt */ if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) ieee80211_send_delba(sdata, ra, tid, 0, reason); /* free the reordering buffer */ for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { /* release the reordered frames */ dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; } } /* free resources */ kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); kfree(sta->ampdu_mlme.tid_rx[tid]); sta->ampdu_mlme.tid_rx[tid] = NULL; sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE; rcu_read_unlock(); } /* * After sending add Block Ack request we activated a timer until * add Block Ack response will arrive from the recipient. * If this timer expires sta_addba_resp_timer_expired will be executed. */ static void sta_addba_resp_timer_expired(unsigned long data) { /* not an elegant detour, but there is no choice as the timer passes * only one argument, and both sta_info and TID are needed, so init * flow in sta_info_create gives the TID as data, while the timer_to_id * array gives the sta through container_of */ u16 tid = *(u8 *)data; struct sta_info *temp_sta = container_of((void *)data, struct sta_info, timer_to_tid[tid]); struct ieee80211_local *local = temp_sta->local; struct ieee80211_hw *hw = &local->hw; struct sta_info *sta; u8 *state; rcu_read_lock(); sta = sta_info_get(local, temp_sta->sta.addr); if (!sta) { rcu_read_unlock(); return; } state = &sta->ampdu_mlme.tid_state_tx[tid]; /* check if the TID waits for addBA response */ spin_lock_bh(&sta->lock); if (!(*state & HT_ADDBA_REQUESTED_MSK)) { spin_unlock_bh(&sta->lock); *state = HT_AGG_STATE_IDLE; #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "timer expired on tid %d but we are not " "expecting addBA response there", tid); #endif goto timer_expired_exit; } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); #endif /* go through the state check in stop_BA_session */ *state = HT_AGG_STATE_OPERATIONAL; spin_unlock_bh(&sta->lock); ieee80211_stop_tx_ba_session(hw, temp_sta->sta.addr, tid, WLAN_BACK_INITIATOR); timer_expired_exit: rcu_read_unlock(); } void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr) { struct ieee80211_local *local = sdata->local; int i; for (i = 0; i < STA_TID_NUM; i++) { ieee80211_stop_tx_ba_session(&local->hw, addr, i, WLAN_BACK_INITIATOR); ieee80211_sta_stop_rx_ba_session(sdata, addr, i, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_LEAVE_QBSS); } } int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); struct sta_info *sta; struct ieee80211_sub_if_data *sdata; u16 start_seq_num; u8 *state; int ret = 0; if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) return -EINVAL; #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n", ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find the station\n"); #endif ret = -ENOENT; goto exit; } spin_lock_bh(&sta->lock); /* we have tried too many times, receiver does not want A-MPDU */ if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { ret = -EBUSY; goto err_unlock_sta; } state = &sta->ampdu_mlme.tid_state_tx[tid]; /* check if the TID is not in aggregation flow already */ if (*state != HT_AGG_STATE_IDLE) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - session is not " "idle on tid %u\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ ret = -EAGAIN; goto err_unlock_sta; } /* prepare A-MPDU MLME for Tx aggregation */ sta->ampdu_mlme.tid_tx[tid] = kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); if (!sta->ampdu_mlme.tid_tx[tid]) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "allocate tx mlme to tid %d failed\n", tid); #endif ret = -ENOMEM; goto err_unlock_sta; } /* Tx timer */ sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = sta_addba_resp_timer_expired; sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); if (hw->ampdu_queues) { /* create a new queue for this aggregation */ ret = ieee80211_ht_agg_queue_add(local, sta, tid); /* case no queue is available to aggregation * don't switch to aggregation */ if (ret) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - " "queue unavailable for tid %d\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto err_unlock_queue; } } sdata = sta->sdata; /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the * call back right away, it must see that the flow has begun */ *state |= HT_ADDBA_REQUESTED_MSK; /* This is slightly racy because the queue isn't stopped */ start_seq_num = sta->tid_seq[tid]; if (local->ops->ampdu_action) ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, &sta->sta, tid, &start_seq_num); if (ret) { /* No need to requeue the packets in the agg queue, since we * held the tx lock: no packet could be enqueued to the newly * allocated queue */ if (hw->ampdu_queues) ieee80211_ht_agg_queue_remove(local, sta, tid, 0); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - HW unavailable for" " tid %d\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ *state = HT_AGG_STATE_IDLE; goto err_unlock_queue; } /* Will put all the packets in the new SW queue */ if (hw->ampdu_queues) ieee80211_requeue(local, ieee802_1d_to_ac[tid]); spin_unlock_bh(&sta->lock); /* send an addBA request */ sta->ampdu_mlme.dialog_token_allocator++; sta->ampdu_mlme.tid_tx[tid]->dialog_token = sta->ampdu_mlme.dialog_token_allocator; sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; ieee80211_send_addba_request(sta->sdata, ra, tid, sta->ampdu_mlme.tid_tx[tid]->dialog_token, sta->ampdu_mlme.tid_tx[tid]->ssn, 0x40, 5000); /* activate the timer for the recipient's addBA response */ sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = jiffies + ADDBA_RESP_INTERVAL; add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); #endif goto exit; err_unlock_queue: kfree(sta->ampdu_mlme.tid_tx[tid]); sta->ampdu_mlme.tid_tx[tid] = NULL; ret = -EBUSY; err_unlock_sta: spin_unlock_bh(&sta->lock); exit: rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ieee80211_start_tx_ba_session); int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid, enum ieee80211_back_parties initiator) { struct ieee80211_local *local = hw_to_local(hw); struct sta_info *sta; u8 *state; int ret = 0; if (tid >= STA_TID_NUM) return -EINVAL; rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { rcu_read_unlock(); return -ENOENT; } /* check if the TID is in aggregation */ state = &sta->ampdu_mlme.tid_state_tx[tid]; spin_lock_bh(&sta->lock); if (*state != HT_AGG_STATE_OPERATIONAL) { ret = -ENOENT; goto stop_BA_exit; } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (hw->ampdu_queues) ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]); *state = HT_AGG_STATE_REQ_STOP_BA_MSK | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); if (local->ops->ampdu_action) ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP, &sta->sta, tid, NULL); /* case HW denied going back to legacy */ if (ret) { WARN_ON(ret != -EBUSY); *state = HT_AGG_STATE_OPERATIONAL; if (hw->ampdu_queues) ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); goto stop_BA_exit; } stop_BA_exit: spin_unlock_bh(&sta->lock); rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); struct sta_info *sta; u8 *state; if (tid >= STA_TID_NUM) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", tid, STA_TID_NUM); #endif return; } rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { rcu_read_unlock(); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find station: %pM\n", ra); #endif return; } state = &sta->ampdu_mlme.tid_state_tx[tid]; spin_lock_bh(&sta->lock); if (!(*state & HT_ADDBA_REQUESTED_MSK)) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", *state); #endif spin_unlock_bh(&sta->lock); rcu_read_unlock(); return; } WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK); *state |= HT_ADDBA_DRV_READY_MSK; if (*state == HT_AGG_STATE_OPERATIONAL) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); #endif if (hw->ampdu_queues) ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); } spin_unlock_bh(&sta->lock); rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) { struct ieee80211_local *local = hw_to_local(hw); struct sta_info *sta; u8 *state; int agg_queue; if (tid >= STA_TID_NUM) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", tid, STA_TID_NUM); #endif return; } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n", ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ rcu_read_lock(); sta = sta_info_get(local, ra); if (!sta) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find station: %pM\n", ra); #endif rcu_read_unlock(); return; } state = &sta->ampdu_mlme.tid_state_tx[tid]; /* NOTE: no need to use sta->lock in this state check, as * ieee80211_stop_tx_ba_session will let only one stop call to * pass through per sta/tid */ if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); #endif rcu_read_unlock(); return; } if (*state & HT_AGG_STATE_INITIATOR_MSK) ieee80211_send_delba(sta->sdata, ra, tid, WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); if (hw->ampdu_queues) { agg_queue = sta->tid_to_tx_q[tid]; ieee80211_ht_agg_queue_remove(local, sta, tid, 1); /* We just requeued the all the frames that were in the * removed queue, and since we might miss a softirq we do * netif_schedule_queue. ieee80211_wake_queue is not used * here as this queue is not necessarily stopped */ netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue)); } spin_lock_bh(&sta->lock); *state = HT_AGG_STATE_IDLE; sta->ampdu_mlme.addba_req_num[tid] = 0; kfree(sta->ampdu_mlme.tid_tx[tid]); sta->ampdu_mlme.tid_tx[tid] = NULL; spin_unlock_bh(&sta->lock); rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_ra_tid *ra_tid; struct sk_buff *skb = dev_alloc_skb(0); if (unlikely(!skb)) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_WARNING "%s: Not enough memory, " "dropping start BA session", skb->dev->name); #endif return; } ra_tid = (struct ieee80211_ra_tid *) &skb->cb; memcpy(&ra_tid->ra, ra, ETH_ALEN); ra_tid->tid = tid; skb->pkt_type = IEEE80211_ADDBA_MSG; skb_queue_tail(&local->skb_queue, skb); tasklet_schedule(&local->tasklet); } EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_ra_tid *ra_tid; struct sk_buff *skb = dev_alloc_skb(0); if (unlikely(!skb)) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_WARNING "%s: Not enough memory, " "dropping stop BA session", skb->dev->name); #endif return; } ra_tid = (struct ieee80211_ra_tid *) &skb->cb; memcpy(&ra_tid->ra, ra, ETH_ALEN); ra_tid->tid = tid; skb->pkt_type = IEEE80211_DELBA_MSG; skb_queue_tail(&local->skb_queue, skb); tasklet_schedule(&local->tasklet); } EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); /* * After accepting the AddBA Request we activated a timer, * resetting it after each frame that arrives from the originator. * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. */ static void sta_rx_agg_session_timer_expired(unsigned long data) { /* not an elegant detour, but there is no choice as the timer passes * only one argument, and various sta_info are needed here, so init * flow in sta_info_create gives the TID as data, while the timer_to_id * array gives the sta through container_of */ u8 *ptid = (u8 *)data; u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); #endif ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, (u16)*ptid, WLAN_BACK_TIMER, WLAN_REASON_QSTA_TIMEOUT); } void ieee80211_process_addba_request(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_hw *hw = &local->hw; struct ieee80211_conf *conf = &hw->conf; struct tid_ampdu_rx *tid_agg_rx; u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; u8 dialog_token; int ret = -EOPNOTSUPP; /* extract session parameters from addba request frame */ dialog_token = mgmt->u.action.u.addba_req.dialog_token; timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); start_seq_num = le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; status = WLAN_STATUS_REQUEST_DECLINED; /* sanity check for incoming parameters: * check if configuration can support the BA policy * and if buffer size does not exceeds max value */ /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || (buf_size > IEEE80211_MAX_AMPDU_BUF)) { status = WLAN_STATUS_INVALID_QOS_PARAM; #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "AddBA Req with bad params from " "%pM on tid %u. policy %d, buffer size %d\n", mgmt->sa, tid, ba_policy, buf_size); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto end_no_lock; } /* determine default buffer size */ if (buf_size == 0) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[conf->channel->band]; buf_size = IEEE80211_MIN_AMPDU_BUF; buf_size = buf_size << sband->ht_cap.ampdu_factor; } /* examine state machine */ spin_lock_bh(&sta->lock); if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "unexpected AddBA Req from " "%pM on tid %u\n", mgmt->sa, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto end; } /* prepare A-MPDU MLME for Rx aggregation */ sta->ampdu_mlme.tid_rx[tid] = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); if (!sta->ampdu_mlme.tid_rx[tid]) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "allocate rx mlme to tid %d failed\n", tid); #endif goto end; } /* rx timer */ sta->ampdu_mlme.tid_rx[tid]->session_timer.function = sta_rx_agg_session_timer_expired; sta->ampdu_mlme.tid_rx[tid]->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; /* prepare reordering buffer */ tid_agg_rx->reorder_buf = kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); if (!tid_agg_rx->reorder_buf) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "can not allocate reordering buffer " "to tid %d\n", tid); #endif kfree(sta->ampdu_mlme.tid_rx[tid]); goto end; } memset(tid_agg_rx->reorder_buf, 0, buf_size * sizeof(struct sk_buff *)); if (local->ops->ampdu_action) ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, &sta->sta, tid, &start_seq_num); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (ret) { kfree(tid_agg_rx->reorder_buf); kfree(tid_agg_rx); sta->ampdu_mlme.tid_rx[tid] = NULL; goto end; } /* change state and send addba resp */ sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; tid_agg_rx->dialog_token = dialog_token; tid_agg_rx->ssn = start_seq_num; tid_agg_rx->head_seq_num = start_seq_num; tid_agg_rx->buf_size = buf_size; tid_agg_rx->timeout = timeout; tid_agg_rx->stored_mpdu_num = 0; status = WLAN_STATUS_SUCCESS; end: spin_unlock_bh(&sta->lock); end_no_lock: ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, dialog_token, status, 1, buf_size, timeout); } void ieee80211_process_addba_resp(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_hw *hw = &local->hw; u16 capab; u16 tid, start_seq_num; u8 *state; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; state = &sta->ampdu_mlme.tid_state_tx[tid]; spin_lock_bh(&sta->lock); if (!(*state & HT_ADDBA_REQUESTED_MSK)) { spin_unlock_bh(&sta->lock); return; } if (mgmt->u.action.u.addba_resp.dialog_token != sta->ampdu_mlme.tid_tx[tid]->dialog_token) { spin_unlock_bh(&sta->lock); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ return; } del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) == WLAN_STATUS_SUCCESS) { *state |= HT_ADDBA_RECEIVED_MSK; sta->ampdu_mlme.addba_req_num[tid] = 0; if (*state == HT_AGG_STATE_OPERATIONAL && local->hw.ampdu_queues) ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); if (local->ops->ampdu_action) { (void)local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_RESUME, &sta->sta, tid, &start_seq_num); } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Resuming TX aggregation for tid %d\n", tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ spin_unlock_bh(&sta->lock); } else { sta->ampdu_mlme.addba_req_num[tid]++; /* this will allow the state check in stop_BA_session */ *state = HT_AGG_STATE_OPERATIONAL; spin_unlock_bh(&sta->lock); ieee80211_stop_tx_ba_session(hw, sta->sta.addr, tid, WLAN_BACK_INITIATOR); } } void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_local *local = sdata->local; u16 tid, params; u16 initiator; params = le16_to_cpu(mgmt->u.action.u.delba.params); tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n", mgmt->sa, initiator ? "initiator" : "recipient", tid, mgmt->u.action.u.delba.reason_code); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (initiator == WLAN_BACK_INITIATOR) ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid, WLAN_BACK_INITIATOR, 0); else { /* WLAN_BACK_RECIPIENT */ spin_lock_bh(&sta->lock); sta->ampdu_mlme.tid_state_tx[tid] = HT_AGG_STATE_OPERATIONAL; spin_unlock_bh(&sta->lock); ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid, WLAN_BACK_RECIPIENT); } }