Fix service_name management and add pppoe_only_equal_svc_name parameter
[l2tpns.git] / tbf.c
1 // L2TPNS: token bucket filters
2
3 #include <string.h>
4 #include "l2tpns.h"
5 #include "util.h"
6 #include "tbf.h"
7
8 tbft *filter_list = NULL;
9 static int filter_list_size = 0;
10
11 static int timer_chain = -1; // Head of timer chain.
12
13 static void tbf_run_queue(int tbf_id);
14
15 void init_tbf(int num_tbfs)
16 {
17 if (!(filter_list = shared_malloc(sizeof(*filter_list) * num_tbfs)))
18 return;
19
20 filter_list_size = num_tbfs;
21 filter_list[0].sid = -1; // Reserved.
22 }
23 //
24 // Put a TBF on the timer list.
25 // This is a doubly linked list..
26 // We put ourselves on the tail of the list.
27 //
28 static void add_to_timer(int id)
29 {
30 if (!filter_list)
31 return;
32
33 if (timer_chain == -1) {
34 filter_list[id].next = filter_list[id].prev = id;
35 timer_chain = id;
36 return;
37 }
38
39 filter_list[id].next = timer_chain;
40 filter_list[id].prev = filter_list[timer_chain].prev;
41 filter_list[filter_list[timer_chain].prev].next = id;
42 filter_list[timer_chain].prev = id;
43 }
44
45 //
46 // Remove a TBF from the timer list.
47 // This is a doubly linked list.
48 static void del_from_timer(int id)
49 {
50 if (!filter_list)
51 return;
52
53 if (filter_list[id].next == id) { // Last element in chain?
54 if (timer_chain != id) { // WTF?
55 LOG(0, 0, 0, "Removed a singleton element from TBF, but tc didn't point to it!\n");
56 } else
57 timer_chain = -1;
58 filter_list[id].next = filter_list[id].prev = 0;
59 return;
60 }
61
62 filter_list[filter_list[id].next].prev = filter_list[id].prev;
63 filter_list[filter_list[id].prev].next = filter_list[id].next;
64 if (timer_chain == id)
65 timer_chain = filter_list[id].next;
66
67 filter_list[id].next = filter_list[id].prev = 0; // Mark as off the timer chain.
68 }
69
70 //
71 // Free a token bucket filter structure for re-use.
72 //
73
74 int free_tbf(int tid)
75 {
76 if (tid < 1) // Make sure we don't free id # 0
77 return -1;
78
79 if (!filter_list) // WTF?
80 return -1;
81
82 if (filter_list[tid].next)
83 del_from_timer(tid);
84 filter_list[tid].sid = 0;
85
86 return 0; // Done!
87 }
88
89 //
90 // Allocate a new token bucket filter.
91 //
92 int new_tbf(int sid, int max_credit, int rate, void (*f)(sessionidt, uint8_t *, int))
93 {
94 int i;
95 static int p = 0;
96
97 LOG(4, 0, 0, "Allocating new TBF (sess %d, rate %d, helper %p)\n", sid, rate, f);
98
99 if (!filter_list)
100 return 0; // Couldn't alloc memory!
101
102 for (i = 0 ; i < filter_list_size ; ++i, p = (p+1)%filter_list_size ) {
103 if (filter_list[p].sid)
104 continue;
105
106 memset((void*) &filter_list[p], 0, sizeof(filter_list[p]) ); // Clear counters and data.
107 filter_list[p].sid = sid;
108 filter_list[p].credit = max_credit;
109 filter_list[p].queued = 0;
110 filter_list[p].max_credit = max_credit;
111 filter_list[p].rate = rate;
112 filter_list[p].oldest = 0;
113 filter_list[p].send = f;
114 return p;
115 }
116
117 LOG(0, 0, 0, "Ran out of token bucket filters! Sess %d will be un-throttled\n", sid);
118 return 0;
119 }
120
121 //
122 // Sanity check all the TBF records. This is
123 // typically done when we become a master..
124 //
125 void fsck_tbfs(void)
126 {
127 int i , sid;
128
129 if (!filter_list)
130 return;
131
132 for (i = 1; i < filter_list_size; ++i) {
133 if (!filter_list[i].sid) // Is it used??
134 continue;
135
136 sid = filter_list[i].sid;
137 if (i != session[sid].tbf_in &&
138 i != session[sid].tbf_out) { // Ooops.
139
140 free_tbf(i); // Mark it as free...
141 }
142 }
143
144 for (i = 0; i < config->cluster_highest_sessionid ; ++i) {
145 if (session[i].tbf_in && filter_list[session[i].tbf_in].sid != i) {
146 filter_list[session[i].tbf_in].sid = i; // Ouch!? FIXME. What to do here?
147 }
148 if (session[i].tbf_out && filter_list[session[i].tbf_out].sid != i) {
149 filter_list[session[i].tbf_out].sid = i; // Ouch!? FIXME. What to do here?
150 }
151 }
152 }
153
154
155 //
156 // Run a packet through a token bucket filter.
157 // If we can send it right away, we do. Else we
158 // try and queue it to send later. Else we drop it.
159 //
160 int tbf_queue_packet(int tbf_id, uint8_t *data, int size)
161 {
162 int i;
163 tbft *f;
164
165 if (!filter_list)
166 return -1;
167
168 if (tbf_id > filter_list_size || tbf_id < 1) { // Out of range ID??
169 // Very bad. Just drop it.
170 return -1;
171 }
172
173 f = &filter_list[tbf_id];
174
175 if (!f->sid) // Is this a real structure??
176 return -1;
177
178 tbf_run_queue(tbf_id); // Caculate credit and send any queued packets if possible..
179
180 f->b_queued += size;
181 f->p_queued ++;
182
183 if (!f->queued && f->credit > size) { // If the queue is empty, and we have
184 // enough credit, just send it now.
185 f->credit -= size;
186 if (f->send) {
187 f->send(f->sid, data, size);
188 f->b_sent += size;
189 f->p_sent ++;
190 } else {
191 f->b_dropped += size;
192 f->p_dropped ++;
193 }
194 return size;
195 }
196
197 // Not enough credit. Can we have room in the queue?
198 if (f->queued >= TBF_MAX_QUEUE) {
199 f->p_dropped ++;
200 f->b_dropped += size;
201 return -1; // No, just drop it.
202 }
203
204 // Is it too big to fit into a queue slot?
205 if (size >= TBF_MAX_SIZE) {
206 f->p_dropped ++;
207 f->b_dropped += size;
208 return -1; // Yes, just drop it.
209 }
210
211 // Ok. We have a slot, and it's big enough to
212 // contain the packet, so queue the packet!
213 i = ( f->oldest + f->queued ) % TBF_MAX_QUEUE;
214 memcpy(f->packets[i], data, size);
215
216 f->sizes[i] = size;
217 f->queued ++;
218 f->p_delayed ++;
219
220 if (!f->next) // Are we off the timer chain?
221 add_to_timer(tbf_id); // Put ourselves on the timer chain.
222
223 return 0; // All done.
224 }
225
226 //
227 // Send queued packets from the filter if possible.
228 // (We're normally only called if this is possible.. )
229 static void tbf_run_queue(int tbf_id)
230 {
231 tbft * f;
232
233 if (!filter_list)
234 return;
235
236 f = &filter_list[tbf_id];
237
238 // Calculate available credit...
239 f->credit += (TIME - f->lasttime) * f->rate / 10; // current time is 1/10th of a second.
240 if (f->credit > f->max_credit)
241 f->credit = f->max_credit;
242 f->lasttime = TIME;
243
244 while (f->queued > 0 && f->credit >= f->sizes[f->oldest]) { // While we have enough credit..
245
246 if (f->send) {
247 f->send(f->sid, f->packets[f->oldest], f->sizes[f->oldest]);
248 f->b_sent += f->sizes[f->oldest];
249 f->p_sent ++;
250 } else {
251 f->b_dropped += f->sizes[f->oldest];
252 f->p_dropped ++;
253 }
254
255 f->credit -= f->sizes[f->oldest];
256
257 f->oldest = (f->oldest + 1 ) % TBF_MAX_QUEUE;
258 f->queued--; // One less queued packet..
259 }
260
261 if (f->queued) // Still more to do. Hang around on the timer list.
262 return;
263
264 if (f->next) // Are we on the timer list??
265 del_from_timer(tbf_id); // Nothing more to do. Get off the timer list.
266 }
267
268 //
269 // Periodically walk the timer list..
270 //
271 int tbf_run_timer(void)
272 {
273 int i = timer_chain;
274 int count = filter_list_size + 1; // Safety check.
275 int last = -1;
276 int tbf_id; // structure being processed.
277
278 if (timer_chain < 0)
279 return 0; // Nothing to do...
280
281 if (!filter_list) // No structures built yet.
282 return 0;
283
284 last = filter_list[i].prev; // last element to process.
285
286 do {
287 tbf_id = i;
288 i = filter_list[i].next; // Get the next in the queue.
289
290 tbf_run_queue(tbf_id); // Run the timer queue..
291 } while ( timer_chain > 0 && i && tbf_id != last && --count > 0);
292
293
294 #if 0 // Debugging.
295 for (i = 0; i < filter_list_size; ++i) {
296 if (!filter_list[i].next)
297 continue;
298 if (filter_list[i].lasttime == TIME) // Did we just run it?
299 continue;
300
301 LOG(1, 0, 0, "Missed tbf %d! Not on the timer chain?(n %d, p %d, tc %d)\n", i,
302 filter_list[i].next, filter_list[i].prev, timer_chain);
303 tbf_run_queue(i);
304 }
305 #endif
306
307 return 1;
308 }
309
310 int cmd_show_tbf(struct cli_def *cli, char *command, char **argv, int argc)
311 {
312 int i;
313 int count = 0;
314
315 if (CLI_HELP_REQUESTED)
316 return CLI_HELP_NO_ARGS;
317
318 if (!config->cluster_iam_master) {
319 cli_error(cli, "Can't do this on a slave. Do it on %s",
320 fmtaddr(config->cluster_master_address, 0));
321
322 return CLI_OK;
323 }
324
325 if (!filter_list)
326 return CLI_OK;
327
328 cli_print(cli,"%6s %5s %5s %6s %6s | %7s %7s %8s %8s %8s %8s", "TBF#", "Sid", "Rate", "Credit", "Queued",
329 "ByteIn","PackIn","ByteSent","PackSent", "PackDrop", "PackDelay");
330
331 for (i = 1; i < filter_list_size; ++i) {
332 if (!filter_list[i].sid) // Is it used?
333 continue; // No.
334
335 cli_print(cli, "%5d%1s %5d %5d %6d %6d | %7d %7d %8d %8d %8d %8d",
336 i, (filter_list[i].next ? "*" : " "),
337 filter_list[i].sid,
338 filter_list[i].rate * 8,
339 filter_list[i].credit,
340 filter_list[i].queued,
341
342 filter_list[i].b_queued,
343 filter_list[i].p_queued,
344 filter_list[i].b_sent,
345 filter_list[i].p_sent,
346 filter_list[i].p_dropped,
347 filter_list[i].p_delayed);
348 ++count;
349 }
350 cli_print(cli, "%d tbf entries used, %d total", count, filter_list_size);
351 return CLI_OK;
352 }