blob: 64481febe69cc4c87296e1757c48a9cf2a6caf48 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2008-2015 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <stdlib.h>
24#include <debug.h>
25#include <trace.h>
26#include <pow2.h>
27#include <string.h>
28#include <assert.h>
29#include <lib/cbuf.h>
30#include <kernel/event.h>
31#include <kernel/spinlock.h>
32
33#define LOCAL_TRACE 0
34
35#define INC_POINTER(cbuf, ptr, inc) \
36 modpow2(((ptr) + (inc)), (cbuf)->len_pow2)
37
38void cbuf_initialize(cbuf_t *cbuf, size_t len)
39{
40 cbuf_initialize_etc(cbuf, len, malloc(len));
41}
42
43void cbuf_initialize_etc(cbuf_t *cbuf, size_t len, void *buf)
44{
45 DEBUG_ASSERT(cbuf);
46 DEBUG_ASSERT(len > 0);
47 DEBUG_ASSERT(ispow2(len));
48
49 cbuf->head = 0;
50 cbuf->tail = 0;
51 cbuf->len_pow2 = log2_uint(len);
52 cbuf->buf = buf;
53 event_init(&cbuf->event, false, 0);
54 spin_lock_init(&cbuf->lock);
55
56 LTRACEF("len %zd, len_pow2 %u\n", len, cbuf->len_pow2);
57}
58
59size_t cbuf_space_avail(cbuf_t *cbuf)
60{
61 uint consumed = modpow2((uint)(cbuf->head - cbuf->tail), cbuf->len_pow2);
62 return valpow2(cbuf->len_pow2) - consumed - 1;
63}
64
65size_t cbuf_space_used(cbuf_t *cbuf)
66{
67 return modpow2((uint)(cbuf->head - cbuf->tail), cbuf->len_pow2);
68}
69
70size_t cbuf_write(cbuf_t *cbuf, const void *_buf, size_t len, bool canreschedule)
71{
72 const char *buf = (const char *)_buf;
73
74 LTRACEF("len %zd\n", len);
75
76 DEBUG_ASSERT(cbuf);
77 DEBUG_ASSERT(len < valpow2(cbuf->len_pow2));
78
79 spin_lock_saved_state_t state;
80 spin_lock_irqsave(&cbuf->lock, state);
81
82 size_t write_len;
83 size_t pos = 0;
84
85 while (pos < len && cbuf_space_avail(cbuf) > 0) {
86 if (cbuf->head >= cbuf->tail) {
87 write_len = MIN(valpow2(cbuf->len_pow2) - cbuf->head, len - pos);
88 } else {
89 write_len = MIN(cbuf->tail - cbuf->head - 1, len - pos);
90 }
91
92 // if it's full, abort and return how much we've written
93 if (write_len == 0) {
94 break;
95 }
96
97 if (NULL == buf) {
98 memset(cbuf->buf + cbuf->head, 0, write_len);
99 } else {
100 memcpy(cbuf->buf + cbuf->head, buf + pos, write_len);
101 }
102
103 cbuf->head = INC_POINTER(cbuf, cbuf->head, write_len);
104 pos += write_len;
105 }
106
107 if (cbuf->head != cbuf->tail)
108 event_signal(&cbuf->event, false);
109
110 spin_unlock_irqrestore(&cbuf->lock, state);
111
112 // XXX convert to only rescheduling if
113 if (canreschedule)
114 thread_preempt();
115
116 return pos;
117}
118
119size_t cbuf_read(cbuf_t *cbuf, void *_buf, size_t buflen, bool block)
120{
121 char *buf = (char *)_buf;
122
123 DEBUG_ASSERT(cbuf);
124
125retry:
126 // block on the cbuf outside of the lock, which may
127 // unblock us early and we'll have to double check below
128 if (block)
129 event_wait(&cbuf->event);
130
131 spin_lock_saved_state_t state;
132 spin_lock_irqsave(&cbuf->lock, state);
133
134 // see if there's data available
135 size_t ret = 0;
136 if (cbuf->tail != cbuf->head) {
137 size_t pos = 0;
138
139 // loop until we've read everything we need
140 // at most this will make two passes to deal with wraparound
141 while (pos < buflen && cbuf->tail != cbuf->head) {
142 size_t read_len;
143 if (cbuf->head > cbuf->tail) {
144 // simple case where there is no wraparound
145 read_len = MIN(cbuf->head - cbuf->tail, buflen - pos);
146 } else {
147 // read to the end of buffer in this pass
148 read_len = MIN(valpow2(cbuf->len_pow2) - cbuf->tail, buflen - pos);
149 }
150
151 // Only perform the copy if a buf was supplied
152 if (NULL != buf) {
153 memcpy(buf + pos, cbuf->buf + cbuf->tail, read_len);
154 }
155
156 cbuf->tail = INC_POINTER(cbuf, cbuf->tail, read_len);
157 pos += read_len;
158 }
159
160 if (cbuf->tail == cbuf->head) {
161 DEBUG_ASSERT(pos > 0);
162 // we've emptied the buffer, unsignal the event
163 event_unsignal(&cbuf->event);
164 }
165
166 ret = pos;
167 }
168
169 spin_unlock_irqrestore(&cbuf->lock, state);
170
171 // we apparently blocked but raced with another thread and found no data, retry
172 if (block && ret == 0)
173 goto retry;
174
175 return ret;
176}
177
178size_t cbuf_peek(cbuf_t *cbuf, iovec_t* regions)
179{
180 DEBUG_ASSERT(cbuf && regions);
181
182 spin_lock_saved_state_t state;
183 spin_lock_irqsave(&cbuf->lock, state);
184
185 size_t ret = cbuf_space_used(cbuf);
186 size_t sz = cbuf_size(cbuf);
187
188 DEBUG_ASSERT(cbuf->tail < sz);
189 DEBUG_ASSERT(ret <= sz);
190
191 regions[0].iov_base = ret ? (cbuf->buf + cbuf->tail) : NULL;
192 if (ret + cbuf->tail > sz) {
193 regions[0].iov_len = sz - cbuf->tail;
194 regions[1].iov_base = cbuf->buf;
195 regions[1].iov_len = ret - regions[0].iov_len;
196 } else {
197 regions[0].iov_len = ret;
198 regions[1].iov_base = NULL;
199 regions[1].iov_len = 0;
200 }
201
202 spin_unlock_irqrestore(&cbuf->lock, state);
203 return ret;
204}
205
206size_t cbuf_write_char(cbuf_t *cbuf, char c, bool canreschedule)
207{
208 DEBUG_ASSERT(cbuf);
209
210 spin_lock_saved_state_t state;
211 spin_lock_irqsave(&cbuf->lock, state);
212
213 size_t ret = 0;
214 if (cbuf_space_avail(cbuf) > 0) {
215 cbuf->buf[cbuf->head] = c;
216
217 cbuf->head = INC_POINTER(cbuf, cbuf->head, 1);
218 ret = 1;
219
220 if (cbuf->head != cbuf->tail)
221 event_signal(&cbuf->event, canreschedule);
222 }
223
224 spin_unlock_irqrestore(&cbuf->lock, state);
225
226 return ret;
227}
228
229size_t cbuf_read_char(cbuf_t *cbuf, char *c, bool block)
230{
231 DEBUG_ASSERT(cbuf);
232 DEBUG_ASSERT(c);
233
234retry:
235 if (block)
236 event_wait(&cbuf->event);
237
238 spin_lock_saved_state_t state;
239 spin_lock_irqsave(&cbuf->lock, state);
240
241 // see if there's data available
242 size_t ret = 0;
243 if (cbuf->tail != cbuf->head) {
244
245 *c = cbuf->buf[cbuf->tail];
246 cbuf->tail = INC_POINTER(cbuf, cbuf->tail, 1);
247
248 if (cbuf->tail == cbuf->head) {
249 // we've emptied the buffer, unsignal the event
250 event_unsignal(&cbuf->event);
251 }
252
253 ret = 1;
254 }
255
256 spin_unlock_irqrestore(&cbuf->lock, state);
257
258 if (block && ret == 0)
259 goto retry;
260
261 return ret;
262}
263