Per-user upload and download throttle rates - Yuri
[l2tpns.git] / tbf.c
1 // L2TPNS: token bucket filters
2
3 char const *cvs_id_tbf = "$Id: tbf.c,v 1.5 2004-08-26 06:22:37 fred_nerk Exp $";
4
5 #define _GNU_SOURCE
6
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/mman.h>
10
11 #include "l2tpns.h"
12 #include "util.h"
13 #include "tbf.h"
14
15 tbft *filter_list = NULL;
16 static int filter_list_size = 0;
17
18 static int timer_chain = -1; // Head of timer chain.
19
20 static void tbf_run_queue(int tbf_id);
21
22 void init_tbf(void)
23 {
24 filter_list = mmap(NULL, sizeof(*filter_list) * MAXTBFS, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
25 if (!filter_list)
26 return;
27
28 filter_list_size = MAXTBFS;
29 filter_list[0].sid = -1; // Reserved.
30 }
31 //
32 // Put a TBF on the timer list.
33 // This is a doubly linked list..
34 // We put ourselves on the tail of the list.
35 //
36 static void add_to_timer(int id)
37 {
38 if (!filter_list)
39 return;
40
41 if (timer_chain == -1) {
42 filter_list[id].next = filter_list[id].prev = id;
43 timer_chain = id;
44 return;
45 }
46
47 filter_list[id].next = timer_chain;
48 filter_list[id].prev = filter_list[timer_chain].prev;
49 filter_list[filter_list[timer_chain].prev].next = id;
50 filter_list[timer_chain].prev = id;
51 }
52
53 //
54 // Remove a TBF from the timer list.
55 // This is a doubly linked list.
56 static void del_from_timer(int id)
57 {
58 if (!filter_list)
59 return;
60
61 if (filter_list[id].next == id) { // Last element in chain?
62 if (timer_chain != id) { // WTF?
63 log(0,0,0,0, "Removed a singleton element from TBF, but tc didn't point to it!\n");
64 } else
65 timer_chain = -1;
66 filter_list[id].next = filter_list[id].prev = 0;
67 return;
68 }
69
70 filter_list[filter_list[id].next].prev = filter_list[id].prev;
71 filter_list[filter_list[id].prev].next = filter_list[id].next;
72 if (timer_chain == id)
73 timer_chain = filter_list[id].next;
74
75 filter_list[id].next = filter_list[id].prev = 0; // Mark as off the timer chain.
76 }
77
78 //
79 // Free a token bucket filter structure for re-use.
80 //
81
82 int free_tbf(int tid)
83 {
84 if (tid < 1) // Make sure we don't free id # 0
85 return -1;
86
87 if (!filter_list) // WTF?
88 return -1;
89
90 if (filter_list[tid].next)
91 del_from_timer(tid);
92 filter_list[tid].sid = 0;
93
94 return 0; // Done!
95 }
96
97 //
98 // Allocate a new token bucket filter.
99 //
100 int new_tbf(int sid, int max_credit, int rate, void (*f)(sessionidt, u8 *, int))
101 {
102 int i;
103 static int p = 0;
104
105 log(3,0,0,0, "Allocating new TBF (sess %d, rate %d, helper %p)\n", sid, rate, f);
106
107 if (!filter_list)
108 return 0; // Couldn't alloc memory!
109
110 // again:
111
112 for (i = 0 ; i < filter_list_size ; ++i, p = (p+1)%filter_list_size ) {
113 if (filter_list[p].sid)
114 continue;
115
116 memset((void*) &filter_list[p], 0, sizeof(filter_list[p]) ); // Clear counters and data.
117 filter_list[p].sid = sid;
118 filter_list[p].credit = max_credit;
119 filter_list[p].queued = 0;
120 filter_list[p].max_credit = max_credit;
121 filter_list[p].rate = rate;
122 filter_list[p].oldest = 0;
123 filter_list[p].send = f;
124 return p;
125 }
126
127 #if 0
128 // All allocated filters are used! Increase the size of the allocated
129 // filters.
130
131 {
132 int new_size = filter_list_size * 2;
133 tbft *new = mremap(filter_list, filter_list_size * sizeof(*new), new_size * sizeof(*new), MREMAP_MAYMOVE);
134
135 if (new == MAP_FAILED)
136 {
137 log(0,0,0,0, "Ran out of token bucket filters and mremap failed! Sess %d will be un-throttled\n", sid);
138 return 0;
139 }
140
141 i = filter_list_size;
142 filter_list_size = new_size;
143 filter_list = new;
144 }
145
146 for (; i < filter_list_size; ++i)
147 filter_list[i].sid = 0;
148
149 goto again;
150 #else
151 log(0,0,0,0, "Ran out of token bucket filters! Sess %d will be un-throttled\n", sid);
152 return 0;
153 #endif
154 }
155
156 //
157 // Sanity check all the TBF records. This is
158 // typically done when we become a master..
159 //
160 void fsck_tbfs(void)
161 {
162 int i , sid;
163
164 if (!filter_list)
165 return;
166
167 for (i = 1; i < filter_list_size; ++i) {
168 if (!filter_list[i].sid) // Is it used??
169 continue;
170
171 sid = filter_list[i].sid;
172 if (i != session[sid].tbf_in &&
173 i != session[sid].tbf_out) { // Ooops.
174
175 free_tbf(i); // Mark it as free...
176 }
177 }
178
179 for (i = 0; i < config->cluster_highest_sessionid ; ++i) {
180 if (session[i].tbf_in && filter_list[session[i].tbf_in].sid != i) {
181 filter_list[session[i].tbf_in].sid = i; // Ouch!? FIXME. What to do here?
182 }
183 if (session[i].tbf_out && filter_list[session[i].tbf_out].sid != i) {
184 filter_list[session[i].tbf_out].sid = i; // Ouch!? FIXME. What to do here?
185 }
186 }
187 }
188
189
190 //
191 // Run a packet through a token bucket filter.
192 // If we can send it right away, we do. Else we
193 // try and queue it to send later. Else we drop it.
194 //
195 int tbf_queue_packet(int tbf_id, char * data, int size)
196 {
197 int i;
198 tbft * f;
199
200 if (!filter_list)
201 return -1;
202
203 if (tbf_id > filter_list_size || tbf_id < 1) { // Out of range ID??
204 // Very bad. Just drop it.
205 return -1;
206 }
207
208 f = &filter_list[tbf_id];
209
210 if (!f->sid) // Is this a real structure??
211 return -1;
212
213 tbf_run_queue(tbf_id); // Caculate credit and send any queued packets if possible..
214
215 f->b_queued += size;
216 f->p_queued ++;
217
218 if (!f->queued && f->credit > size) { // If the queue is empty, and we have
219 // enough credit, just send it now.
220 f->credit -= size;
221 if (f->send) {
222 f->send(f->sid, data, size);
223 f->b_sent += size;
224 f->p_sent ++;
225 } else {
226 f->b_dropped += size;
227 f->p_dropped ++;
228 }
229 return size;
230 }
231
232 // Not enough credit. Can we have room in the queue?
233 if (f->queued >= TBF_MAX_QUEUE) {
234 f->p_dropped ++;
235 f->b_dropped += size;
236 return -1; // No, just drop it.
237 }
238
239 // Is it too big to fit into a queue slot?
240 if (size >= TBF_MAX_SIZE) {
241 f->p_dropped ++;
242 f->b_dropped += size;
243 return -1; // Yes, just drop it.
244 }
245
246 // Ok. We have a slot, and it's big enough to
247 // contain the packet, so queue the packet!
248 i = ( f->oldest + f->queued ) % TBF_MAX_QUEUE;
249 memcpy(f->packets[i], data, size);
250
251 f->sizes[i] = size;
252 f->queued ++;
253 f->p_delayed ++;
254
255 if (!f->next) // Are we off the timer chain?
256 add_to_timer(tbf_id); // Put ourselves on the timer chain.
257
258 return 0; // All done.
259 }
260
261 //
262 // Send queued packets from the filter if possible.
263 // (We're normally only called if this is possible.. )
264 static void tbf_run_queue(int tbf_id)
265 {
266 tbft * f;
267
268 if (!filter_list)
269 return;
270
271 f = &filter_list[tbf_id];
272
273 // Calculate available credit...
274 f->credit += (TIME - f->lasttime) * f->rate / 10; // current time is 1/10th of a second.
275 if (f->credit > f->max_credit)
276 f->credit = f->max_credit;
277 f->lasttime = TIME;
278
279 while (f->queued > 0 && f->credit >= f->sizes[f->oldest]) { // While we have enough credit..
280
281 if (f->send) {
282 f->send(f->sid, f->packets[f->oldest], f->sizes[f->oldest]);
283 f->b_sent += f->sizes[f->oldest];
284 f->p_sent ++;
285 } else {
286 f->b_dropped += f->sizes[f->oldest];
287 f->p_dropped ++;
288 }
289
290 f->credit -= f->sizes[f->oldest];
291
292 f->oldest = (f->oldest + 1 ) % TBF_MAX_QUEUE;
293 f->queued--; // One less queued packet..
294 }
295
296 if (f->queued) // Still more to do. Hang around on the timer list.
297 return;
298
299 if (f->next) // Are we on the timer list??
300 del_from_timer(tbf_id); // Nothing more to do. Get off the timer list.
301 }
302
303 //
304 // Periodically walk the timer list..
305 //
306 int tbf_run_timer(void)
307 {
308 int i = timer_chain;
309 int count = filter_list_size + 1; // Safety check.
310 int last = -1;
311 int tbf_id; // structure being processed.
312
313 if (timer_chain < 0)
314 return 0; // Nothing to do...
315
316 if (!filter_list) // No structures built yet.
317 return 0;
318
319 last = filter_list[i].prev; // last element to process.
320
321 do {
322 tbf_id = i;
323 i = filter_list[i].next; // Get the next in the queue.
324
325 tbf_run_queue(tbf_id); // Run the timer queue..
326 } while ( timer_chain > 0 && i && tbf_id != last && --count > 0);
327
328
329 #if 0 // Debugging.
330 for (i = 0; i < filter_list_size; ++i) {
331 if (!filter_list[i].next)
332 continue;
333 if (filter_list[i].lasttime == TIME) // Did we just run it?
334 continue;
335
336 log(1,0,0,0, "Missed tbf %d! Not on the timer chain?(n %d, p %d, tc %d)\n", i,
337 filter_list[i].next, filter_list[i].prev, timer_chain);
338 tbf_run_queue(i);
339 }
340 #endif
341
342 return 1;
343 }
344
345 int cmd_show_tbf(struct cli_def *cli, char *command, char **argv, int argc)
346 {
347 int i;
348 int count = 0;
349
350 if (CLI_HELP_REQUESTED)
351 return CLI_HELP_NO_ARGS;
352
353 if (!config->cluster_iam_master) {
354 cli_print(cli, "Can't do this on a slave. Do it on %s", inet_toa(config->cluster_master_address));
355 return CLI_OK;
356 }
357
358 if (!filter_list)
359 return CLI_OK;
360
361 cli_print(cli,"%6s %5s %5s %6s %6s | %7s %7s %8s %8s %8s %8s", "TBF#", "Sid", "Rate", "Credit", "Queued",
362 "ByteIn","PackIn","ByteSent","PackSent", "PackDrop", "PackDelay");
363
364 for (i = 1; i < filter_list_size; ++i) {
365 if (!filter_list[i].sid) // Is it used?
366 continue; // No.
367
368 cli_print(cli, "%5d%1s %5d %5d %6d %6d | %7d %7d %8d %8d %8d %8d",
369 i, (filter_list[i].next ? "*" : " "),
370 filter_list[i].sid,
371 filter_list[i].rate * 8,
372 filter_list[i].credit,
373 filter_list[i].queued,
374
375 filter_list[i].b_queued,
376 filter_list[i].p_queued,
377 filter_list[i].b_sent,
378 filter_list[i].p_sent,
379 filter_list[i].p_dropped,
380 filter_list[i].p_delayed);
381 ++count;
382 }
383 cli_print(cli, "%d tbf entries used, %d total", count, filter_list_size);
384 return CLI_OK;
385 }