IPv6 load-balancing
[l2tpns.git] / tbf.c
1 // L2TPNS: token bucket filters
2
3 #include <string.h>
4 #include <linux/rtnetlink.h>
5 #include <netinet/ip6.h>
6
7 #include "dhcp6.h"
8 #include "l2tpns.h"
9 #include "util.h"
10 #include "tbf.h"
11
12 tbft *filter_list = NULL;
13 static int filter_list_size = 0;
14
15 static int timer_chain = -1; // Head of timer chain.
16
17 static void tbf_run_queue(int tbf_id);
18
19 void init_tbf(int num_tbfs)
20 {
21 if (!(filter_list = shared_malloc(sizeof(*filter_list) * num_tbfs)))
22 return;
23
24 filter_list_size = num_tbfs;
25 filter_list[0].sid = -1; // Reserved.
26 }
27 //
28 // Put a TBF on the timer list.
29 // This is a doubly linked list..
30 // We put ourselves on the tail of the list.
31 //
32 static void add_to_timer(int id)
33 {
34 if (!filter_list)
35 return;
36
37 if (timer_chain == -1) {
38 filter_list[id].next = filter_list[id].prev = id;
39 timer_chain = id;
40 return;
41 }
42
43 filter_list[id].next = timer_chain;
44 filter_list[id].prev = filter_list[timer_chain].prev;
45 filter_list[filter_list[timer_chain].prev].next = id;
46 filter_list[timer_chain].prev = id;
47 }
48
49 //
50 // Remove a TBF from the timer list.
51 // This is a doubly linked list.
52 static void del_from_timer(int id)
53 {
54 if (!filter_list)
55 return;
56
57 if (filter_list[id].next == id) { // Last element in chain?
58 if (timer_chain != id) { // WTF?
59 LOG(0, 0, 0, "Removed a singleton element from TBF, but tc didn't point to it!\n");
60 } else
61 timer_chain = -1;
62 filter_list[id].next = filter_list[id].prev = 0;
63 return;
64 }
65
66 filter_list[filter_list[id].next].prev = filter_list[id].prev;
67 filter_list[filter_list[id].prev].next = filter_list[id].next;
68 if (timer_chain == id)
69 timer_chain = filter_list[id].next;
70
71 filter_list[id].next = filter_list[id].prev = 0; // Mark as off the timer chain.
72 }
73
74 //
75 // Free a token bucket filter structure for re-use.
76 //
77
78 int free_tbf(int tid)
79 {
80 if (tid < 1) // Make sure we don't free id # 0
81 return -1;
82
83 if (!filter_list) // WTF?
84 return -1;
85
86 if (filter_list[tid].next)
87 del_from_timer(tid);
88 filter_list[tid].sid = 0;
89
90 return 0; // Done!
91 }
92
93 //
94 // Allocate a new token bucket filter.
95 //
96 int new_tbf(int sid, int max_credit, int rate, void (*f)(sessionidt, uint8_t *, int))
97 {
98 int i;
99 static int p = 0;
100
101 LOG(4, 0, 0, "Allocating new TBF (sess %d, rate %d, helper %p)\n", sid, rate, f);
102
103 if (!filter_list)
104 return 0; // Couldn't alloc memory!
105
106 for (i = 0 ; i < filter_list_size ; ++i, p = (p+1)%filter_list_size ) {
107 if (filter_list[p].sid)
108 continue;
109
110 memset((void*) &filter_list[p], 0, sizeof(filter_list[p]) ); // Clear counters and data.
111 filter_list[p].sid = sid;
112 filter_list[p].credit = max_credit;
113 filter_list[p].queued = 0;
114 filter_list[p].max_credit = max_credit;
115 filter_list[p].rate = rate;
116 filter_list[p].oldest = 0;
117 filter_list[p].send = f;
118 return p;
119 }
120
121 LOG(0, 0, 0, "Ran out of token bucket filters! Sess %d will be un-throttled\n", sid);
122 return 0;
123 }
124
125 //
126 // Sanity check all the TBF records. This is
127 // typically done when we become a master..
128 //
129 void fsck_tbfs(void)
130 {
131 int i , sid;
132
133 if (!filter_list)
134 return;
135
136 for (i = 1; i < filter_list_size; ++i) {
137 if (!filter_list[i].sid) // Is it used??
138 continue;
139
140 sid = filter_list[i].sid;
141 if (i != session[sid].tbf_in &&
142 i != session[sid].tbf_out) { // Ooops.
143
144 free_tbf(i); // Mark it as free...
145 }
146 }
147
148 for (i = 0; i < config->cluster_highest_sessionid ; ++i) {
149 if (session[i].tbf_in && filter_list[session[i].tbf_in].sid != i) {
150 filter_list[session[i].tbf_in].sid = i; // Ouch!? FIXME. What to do here?
151 }
152 if (session[i].tbf_out && filter_list[session[i].tbf_out].sid != i) {
153 filter_list[session[i].tbf_out].sid = i; // Ouch!? FIXME. What to do here?
154 }
155 }
156 }
157
158
159 //
160 // Run a packet through a token bucket filter.
161 // If we can send it right away, we do. Else we
162 // try and queue it to send later. Else we drop it.
163 //
164 int tbf_queue_packet(int tbf_id, uint8_t *data, int size)
165 {
166 int i;
167 tbft *f;
168
169 if (!filter_list)
170 return -1;
171
172 if (tbf_id > filter_list_size || tbf_id < 1) { // Out of range ID??
173 // Very bad. Just drop it.
174 return -1;
175 }
176
177 f = &filter_list[tbf_id];
178
179 if (!f->sid) // Is this a real structure??
180 return -1;
181
182 tbf_run_queue(tbf_id); // Caculate credit and send any queued packets if possible..
183
184 f->b_queued += size;
185 f->p_queued ++;
186
187 if (!f->queued && f->credit > size) { // If the queue is empty, and we have
188 // enough credit, just send it now.
189 f->credit -= size;
190 if (f->send) {
191 f->send(f->sid, data, size);
192 f->b_sent += size;
193 f->p_sent ++;
194 } else {
195 f->b_dropped += size;
196 f->p_dropped ++;
197 }
198 return size;
199 }
200
201 // Not enough credit. Can we have room in the queue?
202 if (f->queued >= TBF_MAX_QUEUE) {
203 f->p_dropped ++;
204 f->b_dropped += size;
205 return -1; // No, just drop it.
206 }
207
208 // Is it too big to fit into a queue slot?
209 if (size >= TBF_MAX_SIZE) {
210 f->p_dropped ++;
211 f->b_dropped += size;
212 return -1; // Yes, just drop it.
213 }
214
215 // Ok. We have a slot, and it's big enough to
216 // contain the packet, so queue the packet!
217 i = ( f->oldest + f->queued ) % TBF_MAX_QUEUE;
218 memcpy(f->packets[i], data, size);
219
220 f->sizes[i] = size;
221 f->queued ++;
222 f->p_delayed ++;
223
224 if (!f->next) // Are we off the timer chain?
225 add_to_timer(tbf_id); // Put ourselves on the timer chain.
226
227 return 0; // All done.
228 }
229
230 //
231 // Send queued packets from the filter if possible.
232 // (We're normally only called if this is possible.. )
233 static void tbf_run_queue(int tbf_id)
234 {
235 tbft * f;
236
237 if (!filter_list)
238 return;
239
240 f = &filter_list[tbf_id];
241
242 // Calculate available credit...
243 f->credit += (TIME - f->lasttime) * f->rate / 10; // current time is 1/10th of a second.
244 if (f->credit > f->max_credit)
245 f->credit = f->max_credit;
246 f->lasttime = TIME;
247
248 while (f->queued > 0 && f->credit >= f->sizes[f->oldest]) { // While we have enough credit..
249
250 if (f->send) {
251 f->send(f->sid, f->packets[f->oldest], f->sizes[f->oldest]);
252 f->b_sent += f->sizes[f->oldest];
253 f->p_sent ++;
254 } else {
255 f->b_dropped += f->sizes[f->oldest];
256 f->p_dropped ++;
257 }
258
259 f->credit -= f->sizes[f->oldest];
260
261 f->oldest = (f->oldest + 1 ) % TBF_MAX_QUEUE;
262 f->queued--; // One less queued packet..
263 }
264
265 if (f->queued) // Still more to do. Hang around on the timer list.
266 return;
267
268 if (f->next) // Are we on the timer list??
269 del_from_timer(tbf_id); // Nothing more to do. Get off the timer list.
270 }
271
272 //
273 // Periodically walk the timer list..
274 //
275 int tbf_run_timer(void)
276 {
277 int i = timer_chain;
278 int count = filter_list_size + 1; // Safety check.
279 int last = -1;
280 int tbf_id; // structure being processed.
281
282 if (timer_chain < 0)
283 return 0; // Nothing to do...
284
285 if (!filter_list) // No structures built yet.
286 return 0;
287
288 last = filter_list[i].prev; // last element to process.
289
290 do {
291 tbf_id = i;
292 i = filter_list[i].next; // Get the next in the queue.
293
294 tbf_run_queue(tbf_id); // Run the timer queue..
295 } while ( timer_chain > 0 && i && tbf_id != last && --count > 0);
296
297
298 #if 0 // Debugging.
299 for (i = 0; i < filter_list_size; ++i) {
300 if (!filter_list[i].next)
301 continue;
302 if (filter_list[i].lasttime == TIME) // Did we just run it?
303 continue;
304
305 LOG(1, 0, 0, "Missed tbf %d! Not on the timer chain?(n %d, p %d, tc %d)\n", i,
306 filter_list[i].next, filter_list[i].prev, timer_chain);
307 tbf_run_queue(i);
308 }
309 #endif
310
311 return 1;
312 }
313
314 int cmd_show_tbf(struct cli_def *cli, const char *command, char **argv, int argc)
315 {
316 int i;
317 int count = 0;
318
319 if (CLI_HELP_REQUESTED)
320 return CLI_HELP_NO_ARGS;
321
322 if (!config->cluster_iam_master) {
323 cli_error(cli, "Can't do this on a slave. Do it on %s",
324 fmtaddr(config->cluster_master_address, 0));
325
326 return CLI_OK;
327 }
328
329 if (!filter_list)
330 return CLI_OK;
331
332 cli_print(cli,"%6s %5s %5s %6s %6s | %7s %7s %8s %8s %8s %8s", "TBF#", "Sid", "Rate", "Credit", "Queued",
333 "ByteIn","PackIn","ByteSent","PackSent", "PackDrop", "PackDelay");
334
335 for (i = 1; i < filter_list_size; ++i) {
336 if (!filter_list[i].sid) // Is it used?
337 continue; // No.
338
339 cli_print(cli, "%5d%1s %5d %5d %6d %6d | %7d %7d %8d %8d %8d %8d",
340 i, (filter_list[i].next ? "*" : " "),
341 filter_list[i].sid,
342 filter_list[i].rate * 8,
343 filter_list[i].credit,
344 filter_list[i].queued,
345
346 filter_list[i].b_queued,
347 filter_list[i].p_queued,
348 filter_list[i].b_sent,
349 filter_list[i].p_sent,
350 filter_list[i].p_dropped,
351 filter_list[i].p_delayed);
352 ++count;
353 }
354 cli_print(cli, "%d tbf entries used, %d total", count, filter_list_size);
355 return CLI_OK;
356 }