| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | From: Felix Fietkau <nbd@nbd.name> |
| 2 | Date: Fri, 22 Jan 2021 19:24:59 +0100 |
| 3 | Subject: [PATCH] mac80211: minstrel_ht: reduce the need to sample slower |
| 4 | rates |
| 5 | |
| 6 | In order to more gracefully be able to fall back to lower rates without too |
| 7 | much throughput fluctuations, initialize all untested rates below tested ones |
| 8 | to the maximum probabilty of higher rates. |
| 9 | Usually this leads to untested lower rates getting initialized with a |
| 10 | probability value of 100%, making them better candidates for fallback without |
| 11 | having to rely on random probing |
| 12 | |
| 13 | Signed-off-by: Felix Fietkau <nbd@nbd.name> |
| 14 | --- |
| 15 | |
| 16 | --- a/net/mac80211/rc80211_minstrel_ht.c |
| 17 | +++ b/net/mac80211/rc80211_minstrel_ht.c |
| 18 | @@ -791,14 +791,11 @@ minstrel_ht_calc_rate_stats(struct minst |
| 19 | unsigned int cur_prob; |
| 20 | |
| 21 | if (unlikely(mrs->attempts > 0)) { |
| 22 | - mrs->sample_skipped = 0; |
| 23 | cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts); |
| 24 | minstrel_filter_avg_add(&mrs->prob_avg, |
| 25 | &mrs->prob_avg_1, cur_prob); |
| 26 | mrs->att_hist += mrs->attempts; |
| 27 | mrs->succ_hist += mrs->success; |
| 28 | - } else { |
| 29 | - mrs->sample_skipped++; |
| 30 | } |
| 31 | |
| 32 | mrs->last_success = mrs->success; |
| 33 | @@ -851,7 +848,6 @@ minstrel_ht_update_stats(struct minstrel |
| 34 | mi->ampdu_packets = 0; |
| 35 | } |
| 36 | |
| 37 | - mi->sample_slow = 0; |
| 38 | mi->sample_count = 0; |
| 39 | |
| 40 | if (mi->supported[MINSTREL_CCK_GROUP]) |
| 41 | @@ -882,6 +878,7 @@ minstrel_ht_update_stats(struct minstrel |
| 42 | /* Find best rate sets within all MCS groups*/ |
| 43 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { |
| 44 | u16 *tp_rate = tmp_mcs_tp_rate; |
| 45 | + u16 last_prob = 0; |
| 46 | |
| 47 | mg = &mi->groups[group]; |
| 48 | if (!mi->supported[group]) |
| 49 | @@ -896,7 +893,7 @@ minstrel_ht_update_stats(struct minstrel |
| 50 | if (group == MINSTREL_CCK_GROUP && ht_supported) |
| 51 | tp_rate = tmp_legacy_tp_rate; |
| 52 | |
| 53 | - for (i = 0; i < MCS_GROUP_RATES; i++) { |
| 54 | + for (i = MCS_GROUP_RATES - 1; i >= 0; i--) { |
| 55 | if (!(mi->supported[group] & BIT(i))) |
| 56 | continue; |
| 57 | |
| 58 | @@ -905,6 +902,11 @@ minstrel_ht_update_stats(struct minstrel |
| 59 | mrs = &mg->rates[i]; |
| 60 | mrs->retry_updated = false; |
| 61 | minstrel_ht_calc_rate_stats(mp, mrs); |
| 62 | + |
| 63 | + if (mrs->att_hist) |
| 64 | + last_prob = max(last_prob, mrs->prob_avg); |
| 65 | + else |
| 66 | + mrs->prob_avg = max(last_prob, mrs->prob_avg); |
| 67 | cur_prob = mrs->prob_avg; |
| 68 | |
| 69 | if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0) |
| 70 | @@ -1469,13 +1471,9 @@ minstrel_get_sample_rate(struct minstrel |
| 71 | if (sample_dur >= minstrel_get_duration(tp_rate2) && |
| 72 | (cur_max_tp_streams - 1 < |
| 73 | minstrel_mcs_groups[sample_group].streams || |
| 74 | - sample_dur >= minstrel_get_duration(mi->max_prob_rate))) { |
| 75 | - if (mrs->sample_skipped < 20) |
| 76 | + sample_dur >= minstrel_get_duration(mi->max_prob_rate))) |
| 77 | return -1; |
| 78 | |
| 79 | - if (mi->sample_slow++ > 2) |
| 80 | - return -1; |
| 81 | - } |
| 82 | mi->sample_tries--; |
| 83 | |
| 84 | return sample_idx; |
| 85 | --- a/net/mac80211/rc80211_minstrel_ht.h |
| 86 | +++ b/net/mac80211/rc80211_minstrel_ht.h |
| 87 | @@ -123,7 +123,6 @@ struct minstrel_rate_stats { |
| 88 | u8 retry_count; |
| 89 | u8 retry_count_rtscts; |
| 90 | |
| 91 | - u8 sample_skipped; |
| 92 | bool retry_updated; |
| 93 | }; |
| 94 | |
| 95 | @@ -179,7 +178,6 @@ struct minstrel_ht_sta { |
| 96 | u8 sample_wait; |
| 97 | u8 sample_tries; |
| 98 | u8 sample_count; |
| 99 | - u8 sample_slow; |
| 100 | |
| 101 | enum minstrel_sample_mode sample_mode; |
| 102 | u16 sample_rate; |