blob: 33c2630c2496b6837818ef25282b7151f4131f94 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * (C) Copyright Red Hat Inc. 2017
4 *
5 * This file is part of the SCTP kernel implementation
6 *
7 * These functions manipulate sctp stream queue/scheduling.
8 *
9 * Please send any bug reports or fixes you make to the
10 * email addresched(es):
11 * lksctp developers <linux-sctp@vger.kernel.org>
12 *
13 * Written or modified by:
14 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
15 */
16
17#include <linux/list.h>
18#include <net/sctp/sctp.h>
19#include <net/sctp/sm.h>
20#include <net/sctp/stream_sched.h>
21
22/* First Come First Serve (a.k.a. FIFO)
23 * RFC DRAFT ndata Section 3.1
24 */
25static int sctp_sched_fcfs_set(struct sctp_stream *stream, __u16 sid,
26 __u16 value, gfp_t gfp)
27{
28 return 0;
29}
30
31static int sctp_sched_fcfs_get(struct sctp_stream *stream, __u16 sid,
32 __u16 *value)
33{
34 *value = 0;
35 return 0;
36}
37
38static int sctp_sched_fcfs_init(struct sctp_stream *stream)
39{
40 return 0;
41}
42
43static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
44 gfp_t gfp)
45{
46 return 0;
47}
48
49static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid)
50{
51}
52
53static void sctp_sched_fcfs_free(struct sctp_stream *stream)
54{
55}
56
57static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
58 struct sctp_datamsg *msg)
59{
60}
61
62static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
63{
64 struct sctp_stream *stream = &q->asoc->stream;
65 struct sctp_chunk *ch = NULL;
66 struct list_head *entry;
67
68 if (list_empty(&q->out_chunk_list))
69 goto out;
70
71 if (stream->out_curr) {
72 ch = list_entry(stream->out_curr->ext->outq.next,
73 struct sctp_chunk, stream_list);
74 } else {
75 entry = q->out_chunk_list.next;
76 ch = list_entry(entry, struct sctp_chunk, list);
77 }
78
79 sctp_sched_dequeue_common(q, ch);
80
81out:
82 return ch;
83}
84
85static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
86 struct sctp_chunk *chunk)
87{
88}
89
90static void sctp_sched_fcfs_sched_all(struct sctp_stream *stream)
91{
92}
93
94static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream)
95{
96}
97
98static struct sctp_sched_ops sctp_sched_fcfs = {
99 .set = sctp_sched_fcfs_set,
100 .get = sctp_sched_fcfs_get,
101 .init = sctp_sched_fcfs_init,
102 .init_sid = sctp_sched_fcfs_init_sid,
103 .free_sid = sctp_sched_fcfs_free_sid,
104 .free = sctp_sched_fcfs_free,
105 .enqueue = sctp_sched_fcfs_enqueue,
106 .dequeue = sctp_sched_fcfs_dequeue,
107 .dequeue_done = sctp_sched_fcfs_dequeue_done,
108 .sched_all = sctp_sched_fcfs_sched_all,
109 .unsched_all = sctp_sched_fcfs_unsched_all,
110};
111
112static void sctp_sched_ops_fcfs_init(void)
113{
114 sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
115}
116
117/* API to other parts of the stack */
118
119static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
120
121void sctp_sched_ops_register(enum sctp_sched_type sched,
122 struct sctp_sched_ops *sched_ops)
123{
124 sctp_sched_ops[sched] = sched_ops;
125}
126
127void sctp_sched_ops_init(void)
128{
129 sctp_sched_ops_fcfs_init();
130 sctp_sched_ops_prio_init();
131 sctp_sched_ops_rr_init();
132}
133
134int sctp_sched_set_sched(struct sctp_association *asoc,
135 enum sctp_sched_type sched)
136{
137 struct sctp_sched_ops *n = sctp_sched_ops[sched];
138 struct sctp_sched_ops *old = asoc->outqueue.sched;
139 struct sctp_datamsg *msg = NULL;
140 struct sctp_chunk *ch;
141 int i, ret = 0;
142
143 if (old == n)
144 return ret;
145
146 if (sched > SCTP_SS_MAX)
147 return -EINVAL;
148
149 if (old) {
150 old->free(&asoc->stream);
151
152 /* Give the next scheduler a clean slate. */
153 for (i = 0; i < asoc->stream.outcnt; i++) {
154 void *p = SCTP_SO(&asoc->stream, i)->ext;
155
156 if (!p)
157 continue;
158
159 p += offsetofend(struct sctp_stream_out_ext, outq);
160 memset(p, 0, sizeof(struct sctp_stream_out_ext) -
161 offsetofend(struct sctp_stream_out_ext, outq));
162 }
163 }
164
165 asoc->outqueue.sched = n;
166 n->init(&asoc->stream);
167 for (i = 0; i < asoc->stream.outcnt; i++) {
168 if (!SCTP_SO(&asoc->stream, i)->ext)
169 continue;
170
171 ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
172 if (ret)
173 goto err;
174 }
175
176 /* We have to requeue all chunks already queued. */
177 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
178 if (ch->msg == msg)
179 continue;
180 msg = ch->msg;
181 n->enqueue(&asoc->outqueue, msg);
182 }
183
184 return ret;
185
186err:
187 n->free(&asoc->stream);
188 asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
189
190 return ret;
191}
192
193int sctp_sched_get_sched(struct sctp_association *asoc)
194{
195 int i;
196
197 for (i = 0; i <= SCTP_SS_MAX; i++)
198 if (asoc->outqueue.sched == sctp_sched_ops[i])
199 return i;
200
201 return 0;
202}
203
204int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
205 __u16 value, gfp_t gfp)
206{
207 if (sid >= asoc->stream.outcnt)
208 return -EINVAL;
209
210 if (!SCTP_SO(&asoc->stream, sid)->ext) {
211 int ret;
212
213 ret = sctp_stream_init_ext(&asoc->stream, sid);
214 if (ret)
215 return ret;
216 }
217
218 return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
219}
220
221int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
222 __u16 *value)
223{
224 if (sid >= asoc->stream.outcnt)
225 return -EINVAL;
226
227 if (!SCTP_SO(&asoc->stream, sid)->ext)
228 return 0;
229
230 return asoc->outqueue.sched->get(&asoc->stream, sid, value);
231}
232
233void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
234{
235 if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
236 !q->asoc->peer.intl_capable) {
237 struct sctp_stream_out *sout;
238 __u16 sid;
239
240 /* datamsg is not finish, so save it as current one,
241 * in case application switch scheduler or a higher
242 * priority stream comes in.
243 */
244 sid = sctp_chunk_stream_no(ch);
245 sout = SCTP_SO(&q->asoc->stream, sid);
246 q->asoc->stream.out_curr = sout;
247 return;
248 }
249
250 q->asoc->stream.out_curr = NULL;
251 q->sched->dequeue_done(q, ch);
252}
253
254/* Auxiliary functions for the schedulers */
255void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
256{
257 list_del_init(&ch->list);
258 list_del_init(&ch->stream_list);
259 q->out_qlen -= ch->skb->len;
260}
261
262int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
263{
264 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
265 struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext;
266
267 INIT_LIST_HEAD(&ext->outq);
268 return sched->init_sid(stream, sid, gfp);
269}
270
271struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream)
272{
273 struct sctp_association *asoc;
274
275 asoc = container_of(stream, struct sctp_association, stream);
276
277 return asoc->outqueue.sched;
278}