Authorize to change the source IP of the tunnels l2tp
[l2tpns.git] / cluster.c
1 // L2TPNS Clustering Stuff
2
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <stdarg.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <sys/file.h>
9 #include <sys/stat.h>
10 #include <sys/socket.h>
11 #include <netinet/in.h>
12 #include <arpa/inet.h>
13 #include <sys/ioctl.h>
14 #include <net/if.h>
15 #include <string.h>
16 #include <malloc.h>
17 #include <errno.h>
18 #include <libcli.h>
19
20 #include "l2tpns.h"
21 #include "cluster.h"
22 #include "util.h"
23 #include "tbf.h"
24
25 #ifdef BGP
26 #include "bgp.h"
27 #endif
28 /*
29 * All cluster packets have the same format.
30 *
31 * One or more instances of
32 * a 32 bit 'type' id.
33 * a 32 bit 'extra' data dependant on the 'type'.
34 * zero or more bytes of structure data, dependant on the type.
35 *
36 */
37
38 // Module variables.
39 extern int cluster_sockfd; // The filedescriptor for the cluster communications port.
40
41 in_addr_t my_address = 0; // The network address of my ethernet port.
42 static int walk_session_number = 0; // The next session to send when doing the slow table walk.
43 static int walk_bundle_number = 0; // The next bundle to send when doing the slow table walk.
44 static int walk_tunnel_number = 0; // The next tunnel to send when doing the slow table walk.
45 int forked = 0; // Sanity check: CLI must not diddle with heartbeat table
46
47 #define MAX_HEART_SIZE (8192) // Maximum size of heartbeat packet. Must be less than max IP packet size :)
48 #define MAX_CHANGES (MAX_HEART_SIZE/(sizeof(sessiont) + sizeof(int) ) - 2) // Assumes a session is the biggest type!
49
50 static struct {
51 int type;
52 int id;
53 } cluster_changes[MAX_CHANGES]; // Queue of changed structures that need to go out when next heartbeat.
54
55 static struct {
56 int seq;
57 int size;
58 uint8_t data[MAX_HEART_SIZE];
59 } past_hearts[HB_HISTORY_SIZE]; // Ring buffer of heartbeats that we've recently sent out. Needed so
60 // we can re-transmit if needed.
61
62 static struct {
63 in_addr_t peer;
64 uint32_t basetime;
65 clockt timestamp;
66 int uptodate;
67 } peers[CLUSTER_MAX_SIZE]; // List of all the peers we've heard from.
68 static int num_peers; // Number of peers in list.
69
70 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
71 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
72
73 //
74 // Create a listening socket
75 //
76 // This joins the cluster multi-cast group.
77 //
78 int cluster_init()
79 {
80 struct sockaddr_in addr;
81 struct sockaddr_in interface_addr;
82 struct ip_mreq mreq;
83 struct ifreq ifr;
84 int opt;
85
86 config->cluster_undefined_sessions = MAXSESSION-1;
87 config->cluster_undefined_bundles = MAXBUNDLE-1;
88 config->cluster_undefined_tunnels = MAXTUNNEL-1;
89
90 if (!config->cluster_address)
91 return 0;
92 if (!*config->cluster_interface)
93 return 0;
94
95 cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
96
97 memset(&addr, 0, sizeof(addr));
98 addr.sin_family = AF_INET;
99 addr.sin_port = htons(CLUSTERPORT);
100 addr.sin_addr.s_addr = INADDR_ANY;
101 setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
102
103 opt = fcntl(cluster_sockfd, F_GETFL, 0);
104 fcntl(cluster_sockfd, F_SETFL, opt | O_NONBLOCK);
105
106 if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
107 {
108 LOG(0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
109 return -1;
110 }
111
112 strcpy(ifr.ifr_name, config->cluster_interface);
113 if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0)
114 {
115 LOG(0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
116 return -1;
117 }
118
119 memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr));
120 my_address = interface_addr.sin_addr.s_addr;
121
122 // Join multicast group.
123 mreq.imr_multiaddr.s_addr = config->cluster_address;
124 mreq.imr_interface = interface_addr.sin_addr;
125
126
127 opt = 0; // Turn off multicast loopback.
128 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
129
130 if (config->cluster_mcast_ttl != 1)
131 {
132 uint8_t ttl = 0;
133 if (config->cluster_mcast_ttl > 0)
134 ttl = config->cluster_mcast_ttl < 256 ? config->cluster_mcast_ttl : 255;
135
136 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl));
137 }
138
139 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0)
140 {
141 LOG(0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
142 return -1;
143 }
144
145 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0)
146 {
147 LOG(0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
148 return -1;
149 }
150
151 config->cluster_last_hb = TIME;
152 config->cluster_seq_number = -1;
153
154 return cluster_sockfd;
155 }
156
157
158 //
159 // Send a chunk of data to the entire cluster (usually via the multicast
160 // address ).
161 //
162
163 static int cluster_send_data(void *data, int datalen)
164 {
165 struct sockaddr_in addr = {0};
166
167 if (!cluster_sockfd) return -1;
168 if (!config->cluster_address) return 0;
169
170 addr.sin_addr.s_addr = config->cluster_address;
171 addr.sin_port = htons(CLUSTERPORT);
172 addr.sin_family = AF_INET;
173
174 LOG(5, 0, 0, "Cluster send data: %d bytes\n", datalen);
175
176 if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
177 {
178 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
179 return -1;
180 }
181
182 return 0;
183 }
184
185 //
186 // Add a chunk of data to a heartbeat packet.
187 // Maintains the format. Assumes that the caller
188 // has passed in a big enough buffer!
189 //
190 static void add_type(uint8_t **p, int type, int more, uint8_t *data, int size)
191 {
192 *((uint32_t *) (*p)) = type;
193 *p += sizeof(uint32_t);
194
195 *((uint32_t *)(*p)) = more;
196 *p += sizeof(uint32_t);
197
198 if (data && size > 0) {
199 memcpy(*p, data, size);
200 *p += size;
201 }
202 }
203
204 // advertise our presence via BGP or gratuitous ARP
205 static void advertise_routes(void)
206 {
207 #ifdef BGP
208 if (bgp_configured)
209 bgp_enable_routing(1);
210 else
211 #endif /* BGP */
212 if (config->send_garp)
213 send_garp(config->bind_address); // Start taking traffic.
214 }
215
216 // withdraw our routes (BGP only)
217 static void withdraw_routes(void)
218 {
219 #ifdef BGP
220 if (bgp_configured)
221 bgp_enable_routing(0);
222 #endif /* BGP */
223 }
224
225 static void cluster_uptodate(void)
226 {
227 if (config->cluster_iam_uptodate)
228 return;
229
230 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles)
231 return;
232
233 config->cluster_iam_uptodate = 1;
234
235 LOG(0, 0, 0, "Now uptodate with master.\n");
236 advertise_routes();
237 }
238
239 //
240 // Send a unicast UDP packet to a peer with 'data' as the
241 // contents.
242 //
243 static int peer_send_data(in_addr_t peer, uint8_t *data, int size)
244 {
245 struct sockaddr_in addr = {0};
246
247 if (!cluster_sockfd) return -1;
248 if (!config->cluster_address) return 0;
249
250 if (!peer) // Odd??
251 return -1;
252
253 addr.sin_addr.s_addr = peer;
254 addr.sin_port = htons(CLUSTERPORT);
255 addr.sin_family = AF_INET;
256
257 LOG_HEX(5, "Peer send", data, size);
258
259 if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
260 {
261 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
262 return -1;
263 }
264
265 return 0;
266 }
267
268 //
269 // Send a structured message to a peer with a single element of type 'type'.
270 //
271 static int peer_send_message(in_addr_t peer, int type, int more, uint8_t *data, int size)
272 {
273 uint8_t buf[65536]; // Vast overkill.
274 uint8_t *p = buf;
275
276 LOG(4, 0, 0, "Sending message to peer (type %d, more %d, size %d)\n", type, more, size);
277 add_type(&p, type, more, data, size);
278
279 return peer_send_data(peer, buf, (p-buf) );
280 }
281
282 // send a packet to the master
283 static int _forward_packet(uint8_t *data, int size, in_addr_t addr, int port, int type)
284 {
285 uint8_t buf[65536]; // Vast overkill.
286 uint8_t *p = buf;
287
288 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
289 return -1;
290
291 LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
292
293 STAT(c_forwarded);
294 add_type(&p, type, addr, (uint8_t *) &port, sizeof(port)); // ick. should be uint16_t
295 memcpy(p, data, size);
296 p += size;
297
298 return peer_send_data(config->cluster_master_address, buf, (p - buf));
299 }
300
301 //
302 // Forward a state changing packet to the master.
303 //
304 // The master just processes the payload as if it had
305 // received it off the tun device.
306 //
307 int master_forward_packet(uint8_t *data, int size, in_addr_t addr, int port)
308 {
309 return _forward_packet(data, size, addr, port, C_FORWARD);
310 }
311
312 // Forward a DAE RADIUS packet to the master.
313 int master_forward_dae_packet(uint8_t *data, int size, in_addr_t addr, int port)
314 {
315 return _forward_packet(data, size, addr, port, C_FORWARD_DAE);
316 }
317
318 //
319 // Forward a throttled packet to the master for handling.
320 //
321 // The master just drops the packet into the appropriate
322 // token bucket queue, and lets normal processing take care
323 // of it.
324 //
325 int master_throttle_packet(int tbfid, uint8_t *data, int size)
326 {
327 uint8_t buf[65536]; // Vast overkill.
328 uint8_t *p = buf;
329
330 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
331 return -1;
332
333 LOG(4, 0, 0, "Throttling packet master (size %d, tbfid %d)\n", size, tbfid);
334
335 add_type(&p, C_THROTTLE, tbfid, data, size);
336
337 return peer_send_data(config->cluster_master_address, buf, (p-buf) );
338
339 }
340
341 //
342 // Forward a walled garden packet to the master for handling.
343 //
344 // The master just writes the packet straight to the tun
345 // device (where is will normally loop through the
346 // firewall rules, and come back in on the tun device)
347 //
348 // (Note that this must be called with the tun header
349 // as the start of the data).
350 int master_garden_packet(sessionidt s, uint8_t *data, int size)
351 {
352 uint8_t buf[65536]; // Vast overkill.
353 uint8_t *p = buf;
354
355 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
356 return -1;
357
358 LOG(4, 0, 0, "Walled garden packet to master (size %d)\n", size);
359
360 add_type(&p, C_GARDEN, s, data, size);
361
362 return peer_send_data(config->cluster_master_address, buf, (p-buf));
363
364 }
365
366 //
367 // Forward a MPPP packet to the master for handling.
368 //
369 // (Note that this must be called with the tun header
370 // as the start of the data).
371 // (i.e. this routine writes to data[-8]).
372 int master_forward_mppp_packet(sessionidt s, uint8_t *data, int size)
373 {
374 uint8_t *p = data - (2 * sizeof(uint32_t));
375 uint8_t *psave = p;
376
377 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
378 return -1;
379
380 LOG(4, 0, 0, "Forward MPPP packet to master (size %d)\n", size);
381
382 add_type(&p, C_MPPP_FORWARD, s, NULL, 0);
383
384 return peer_send_data(config->cluster_master_address, psave, size + (2 * sizeof(uint32_t)));
385
386 }
387
388 //
389 // Send a chunk of data as a heartbeat..
390 // We save it in the history buffer as we do so.
391 //
392 static void send_heartbeat(int seq, uint8_t *data, int size)
393 {
394 int i;
395
396 if (size > sizeof(past_hearts[0].data))
397 {
398 LOG(0, 0, 0, "Tried to heartbeat something larger than the maximum packet!\n");
399 kill(0, SIGTERM);
400 exit(1);
401 }
402 i = seq % HB_HISTORY_SIZE;
403 past_hearts[i].seq = seq;
404 past_hearts[i].size = size;
405 memcpy(&past_hearts[i].data, data, size); // Save it.
406 cluster_send_data(data, size);
407 }
408
409 //
410 // Send an 'i am alive' message to every machine in the cluster.
411 //
412 void cluster_send_ping(time_t basetime)
413 {
414 uint8_t buff[100 + sizeof(pingt)];
415 uint8_t *p = buff;
416 pingt x;
417
418 if (config->cluster_iam_master && basetime) // We're heartbeating so no need to ping.
419 return;
420
421 LOG(5, 0, 0, "Sending cluster ping...\n");
422
423 x.ver = 1;
424 x.addr = config->bind_address;
425 x.undef = config->cluster_undefined_sessions + config->cluster_undefined_tunnels + config->cluster_undefined_bundles;
426 x.basetime = basetime;
427
428 add_type(&p, C_PING, basetime, (uint8_t *) &x, sizeof(x));
429 cluster_send_data(buff, (p-buff) );
430 }
431
432 //
433 // Walk the session counters looking for non-zero ones to send
434 // to the master. We send up to 600 of them at one time.
435 // We examine a maximum of 3000 sessions.
436 // (50k max session should mean that we normally
437 // examine the entire session table every 25 seconds).
438
439 #define MAX_B_RECS (600)
440 void master_update_counts(void)
441 {
442 int i, c;
443 bytest b[MAX_B_RECS+1];
444
445 if (config->cluster_iam_master) // Only happens on the slaves.
446 return;
447
448 if (!config->cluster_master_address) // If we don't have a master, skip it for a while.
449 return;
450
451 i = MAX_B_RECS * 5; // Examine max 3000 sessions;
452 if (config->cluster_highest_sessionid > i)
453 i = config->cluster_highest_sessionid;
454
455 for ( c = 0; i > 0 ; --i) {
456 // Next session to look at.
457 walk_session_number++;
458 if ( walk_session_number > config->cluster_highest_sessionid)
459 walk_session_number = 1;
460
461 if (!sess_local[walk_session_number].cin && !sess_local[walk_session_number].cout)
462 continue; // Unchanged. Skip it.
463
464 b[c].sid = walk_session_number;
465 b[c].pin = sess_local[walk_session_number].pin;
466 b[c].pout = sess_local[walk_session_number].pout;
467 b[c].cin = sess_local[walk_session_number].cin;
468 b[c].cout = sess_local[walk_session_number].cout;
469
470 // Reset counters.
471 sess_local[walk_session_number].pin = sess_local[walk_session_number].pout = 0;
472 sess_local[walk_session_number].cin = sess_local[walk_session_number].cout = 0;
473
474 if (++c > MAX_B_RECS) // Send a max of 600 elements in a packet.
475 break;
476 }
477
478 if (!c) // Didn't find any that changes. Get out of here!
479 return;
480
481
482 // Forward the data to the master.
483 LOG(4, 0, 0, "Sending byte counters to master (%d elements)\n", c);
484 peer_send_message(config->cluster_master_address, C_BYTES, c, (uint8_t *) &b, sizeof(b[0]) * c);
485 return;
486 }
487
488 //
489 // On the master, check how our slaves are going. If
490 // one of them's not up-to-date we'll heartbeat faster.
491 // If we don't have any of them, then we need to turn
492 // on our own packet handling!
493 //
494 void cluster_check_slaves(void)
495 {
496 int i;
497 static int have_peers = 0;
498 int had_peers = have_peers;
499 clockt t = TIME;
500
501 if (!config->cluster_iam_master)
502 return; // Only runs on the master...
503
504 config->cluster_iam_uptodate = 1; // cleared in loop below
505
506 for (i = have_peers = 0; i < num_peers; i++)
507 {
508 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
509 continue; // Stale peer! Skip them.
510
511 if (!peers[i].basetime)
512 continue; // Shutdown peer! Skip them.
513
514 if (peers[i].uptodate)
515 have_peers++;
516 else
517 config->cluster_iam_uptodate = 0; // Start fast heartbeats
518 }
519
520 // in a cluster, withdraw/add routes when we get a peer/lose peers
521 if (have_peers != had_peers)
522 {
523 if (had_peers < config->cluster_master_min_adv &&
524 have_peers >= config->cluster_master_min_adv)
525 withdraw_routes();
526
527 else if (had_peers >= config->cluster_master_min_adv &&
528 have_peers < config->cluster_master_min_adv)
529 advertise_routes();
530 }
531 }
532
533 //
534 // Check that we have a master. If it's been too
535 // long since we heard from a master then hold an election.
536 //
537 void cluster_check_master(void)
538 {
539 int i, count, high_unique_id = 0;
540 int last_free = 0;
541 clockt t = TIME;
542 static int probed = 0;
543 int have_peers;
544
545 if (config->cluster_iam_master)
546 return; // Only runs on the slaves...
547
548 // If the master is late (missed 2 hearbeats by a second and a
549 // hair) it may be that the switch has dropped us from the
550 // multicast group, try unicasting probes to the master
551 // which will hopefully respond with a unicast heartbeat that
552 // will allow us to limp along until the querier next runs.
553 if (config->cluster_master_address
554 && TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
555 {
556 if (!probed || (TIME > (probed + 2 * config->cluster_hb_interval)))
557 {
558 probed = TIME;
559 LOG(1, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
560 0.1 * (TIME - (config->cluster_last_hb + config->cluster_hb_interval)));
561
562 peer_send_message(config->cluster_master_address,
563 C_LASTSEEN, config->cluster_seq_number, NULL, 0);
564 }
565 } else { // We got a recent heartbeat; reset the probe flag.
566 probed = 0;
567 }
568
569 if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
570 return; // Everything's ok!
571
572 config->cluster_last_hb = TIME + 1; // Just the one election thanks.
573 config->cluster_master_address = 0;
574
575 LOG(0, 0, 0, "Master timed out! Holding election...\n");
576
577 // In the process of shutting down, can't be master
578 if (main_quit)
579 return;
580
581 for (i = have_peers = 0; i < num_peers; i++)
582 {
583 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
584 continue; // Stale peer! Skip them.
585
586 if (!peers[i].basetime)
587 continue; // Shutdown peer! Skip them.
588
589 if (peers[i].basetime < basetime) {
590 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
591 return; // They'll win the election. Get out of here.
592 }
593
594 if (peers[i].basetime == basetime &&
595 peers[i].peer > my_address) {
596 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
597 return; // They'll win the election. Wait for them to come up.
598 }
599
600 if (peers[i].uptodate)
601 have_peers++;
602 }
603
604 // Wow. it's been ages since I last heard a heartbeat
605 // and I'm better than an of my peers so it's time
606 // to become a master!!!
607
608 config->cluster_iam_master = 1;
609
610 LOG(0, 0, 0, "I am declaring myself the master!\n");
611
612 if (have_peers < config->cluster_master_min_adv)
613 advertise_routes();
614 else
615 withdraw_routes();
616
617 if (config->cluster_seq_number == -1)
618 config->cluster_seq_number = 0;
619
620 //
621 // Go through and mark all the tunnels as defined.
622 // Count the highest used tunnel number as well.
623 //
624 config->cluster_highest_tunnelid = 0;
625 for (i = 0; i < MAXTUNNEL; ++i) {
626 if (tunnel[i].state == TUNNELUNDEF)
627 tunnel[i].state = TUNNELFREE;
628
629 if (tunnel[i].state != TUNNELFREE && i > config->cluster_highest_tunnelid)
630 config->cluster_highest_tunnelid = i;
631 }
632
633 //
634 // Go through and mark all the bundles as defined.
635 // Count the highest used bundle number as well.
636 //
637 config->cluster_highest_bundleid = 0;
638 for (i = 0; i < MAXBUNDLE; ++i) {
639 if (bundle[i].state == BUNDLEUNDEF)
640 bundle[i].state = BUNDLEFREE;
641
642 if (bundle[i].state != BUNDLEFREE && i > config->cluster_highest_bundleid)
643 config->cluster_highest_bundleid = i;
644 }
645
646 //
647 // Go through and mark all the sessions as being defined.
648 // reset the idle timeouts.
649 // add temporary byte counters to permanent ones.
650 // Re-string the free list.
651 // Find the ID of the highest session.
652 last_free = 0;
653 high_unique_id = 0;
654 config->cluster_highest_sessionid = 0;
655 for (i = 0, count = 0; i < MAXSESSION; ++i) {
656 if (session[i].tunnel == T_UNDEF) {
657 session[i].tunnel = T_FREE;
658 ++count;
659 }
660
661 if (!session[i].opened) { // Unused session. Add to free list.
662 memset(&session[i], 0, sizeof(session[i]));
663 session[i].tunnel = T_FREE;
664 session[last_free].next = i;
665 session[i].next = 0;
666 last_free = i;
667 continue;
668 }
669
670 // Reset idle timeouts..
671 session[i].last_packet = session[i].last_data = time_now;
672
673 // Reset die relative to our uptime rather than the old master's
674 if (session[i].die) session[i].die = TIME;
675
676 // Accumulate un-sent byte/packet counters.
677 increment_counter(&session[i].cin, &session[i].cin_wrap, sess_local[i].cin);
678 increment_counter(&session[i].cout, &session[i].cout_wrap, sess_local[i].cout);
679 session[i].cin_delta += sess_local[i].cin;
680 session[i].cout_delta += sess_local[i].cout;
681
682 session[i].pin += sess_local[i].pin;
683 session[i].pout += sess_local[i].pout;
684
685 sess_local[i].cin = sess_local[i].cout = 0;
686 sess_local[i].pin = sess_local[i].pout = 0;
687
688 sess_local[i].radius = 0; // Reset authentication as the radius blocks aren't up to date.
689
690 if (session[i].unique_id >= high_unique_id) // This is different to the index into the session table!!!
691 high_unique_id = session[i].unique_id+1;
692
693 session[i].tbf_in = session[i].tbf_out = 0; // Remove stale pointers from old master.
694 throttle_session(i, session[i].throttle_in, session[i].throttle_out);
695
696 config->cluster_highest_sessionid = i;
697 }
698
699 session[last_free].next = 0; // End of chain.
700 last_id = high_unique_id; // Keep track of the highest used session ID.
701
702 become_master();
703
704 rebuild_address_pool();
705
706 // If we're not the very first master, this is a big issue!
707 if (count > 0)
708 LOG(0, 0, 0, "Warning: Fixed %d uninitialized sessions in becoming master!\n", count);
709
710 config->cluster_undefined_sessions = 0;
711 config->cluster_undefined_bundles = 0;
712 config->cluster_undefined_tunnels = 0;
713 config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
714
715 // FIXME. We need to fix up the tunnel control message
716 // queue here! There's a number of other variables we
717 // should also update.
718 }
719
720
721 //
722 // Check that our session table is validly matching what the
723 // master has in mind.
724 //
725 // In particular, if we have too many sessions marked 'undefined'
726 // we fix it up here, and we ensure that the 'first free session'
727 // pointer is valid.
728 //
729 static void cluster_check_sessions(int highsession, int freesession_ptr, int highbundle, int hightunnel)
730 {
731 int i;
732
733 sessionfree = freesession_ptr; // Keep the freesession ptr valid.
734
735 if (config->cluster_iam_uptodate)
736 return;
737
738 if (highsession > config->cluster_undefined_sessions && highbundle > config->cluster_undefined_bundles && hightunnel > config->cluster_undefined_tunnels)
739 return;
740
741 // Clear out defined sessions, counting the number of
742 // undefs remaining.
743 config->cluster_undefined_sessions = 0;
744 for (i = 1 ; i < MAXSESSION; ++i) {
745 if (i > highsession) {
746 if (session[i].tunnel == T_UNDEF) session[i].tunnel = T_FREE; // Defined.
747 continue;
748 }
749
750 if (session[i].tunnel == T_UNDEF)
751 ++config->cluster_undefined_sessions;
752 }
753
754 // Clear out defined bundles, counting the number of
755 // undefs remaining.
756 config->cluster_undefined_bundles = 0;
757 for (i = 1 ; i < MAXBUNDLE; ++i) {
758 if (i > highbundle) {
759 if (bundle[i].state == BUNDLEUNDEF) bundle[i].state = BUNDLEFREE; // Defined.
760 continue;
761 }
762
763 if (bundle[i].state == BUNDLEUNDEF)
764 ++config->cluster_undefined_bundles;
765 }
766
767 // Clear out defined tunnels, counting the number of
768 // undefs remaining.
769 config->cluster_undefined_tunnels = 0;
770 for (i = 1 ; i < MAXTUNNEL; ++i) {
771 if (i > hightunnel) {
772 if (tunnel[i].state == TUNNELUNDEF) tunnel[i].state = TUNNELFREE; // Defined.
773 continue;
774 }
775
776 if (tunnel[i].state == TUNNELUNDEF)
777 ++config->cluster_undefined_tunnels;
778 }
779
780
781 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles) {
782 LOG(2, 0, 0, "Cleared undefined sessions/bundles/tunnels. %d sess (high %d), %d bund (high %d), %d tunn (high %d)\n",
783 config->cluster_undefined_sessions, highsession, config->cluster_undefined_bundles, highbundle, config->cluster_undefined_tunnels, hightunnel);
784 return;
785 }
786
787 // Are we up to date?
788
789 if (!config->cluster_iam_uptodate)
790 cluster_uptodate();
791 }
792
793 static int hb_add_type(uint8_t **p, int type, int id)
794 {
795 switch (type) {
796 case C_CSESSION: { // Compressed C_SESSION.
797 uint8_t c[sizeof(sessiont) * 2]; // Bigger than worst case.
798 uint8_t *d = (uint8_t *) &session[id];
799 uint8_t *orig = d;
800 int size;
801
802 size = rle_compress( &d, sizeof(sessiont), c, sizeof(c) );
803
804 // Did we compress the full structure, and is the size actually
805 // reduced??
806 if ( (d - orig) == sizeof(sessiont) && size < sizeof(sessiont) ) {
807 add_type(p, C_CSESSION, id, c, size);
808 break;
809 }
810 // Failed to compress : Fall through.
811 }
812 case C_SESSION:
813 add_type(p, C_SESSION, id, (uint8_t *) &session[id], sizeof(sessiont));
814 break;
815
816 case C_CBUNDLE: { // Compressed C_BUNDLE
817 uint8_t c[sizeof(bundlet) * 2]; // Bigger than worst case.
818 uint8_t *d = (uint8_t *) &bundle[id];
819 uint8_t *orig = d;
820 int size;
821
822 size = rle_compress( &d, sizeof(bundlet), c, sizeof(c) );
823
824 // Did we compress the full structure, and is the size actually
825 // reduced??
826 if ( (d - orig) == sizeof(bundlet) && size < sizeof(bundlet) ) {
827 add_type(p, C_CBUNDLE, id, c, size);
828 break;
829 }
830 // Failed to compress : Fall through.
831 }
832
833 case C_BUNDLE:
834 add_type(p, C_BUNDLE, id, (uint8_t *) &bundle[id], sizeof(bundlet));
835 break;
836
837 case C_CTUNNEL: { // Compressed C_TUNNEL
838 uint8_t c[sizeof(tunnelt) * 2]; // Bigger than worst case.
839 uint8_t *d = (uint8_t *) &tunnel[id];
840 uint8_t *orig = d;
841 int size;
842
843 size = rle_compress( &d, sizeof(tunnelt), c, sizeof(c) );
844
845 // Did we compress the full structure, and is the size actually
846 // reduced??
847 if ( (d - orig) == sizeof(tunnelt) && size < sizeof(tunnelt) ) {
848 add_type(p, C_CTUNNEL, id, c, size);
849 break;
850 }
851 // Failed to compress : Fall through.
852 }
853 case C_TUNNEL:
854 add_type(p, C_TUNNEL, id, (uint8_t *) &tunnel[id], sizeof(tunnelt));
855 break;
856 default:
857 LOG(0, 0, 0, "Found an invalid type in heart queue! (%d)\n", type);
858 kill(0, SIGTERM);
859 exit(1);
860 }
861 return 0;
862 }
863
864 //
865 // Send a heartbeat, incidently sending out any queued changes..
866 //
867 void cluster_heartbeat()
868 {
869 int i, count = 0, tcount = 0, bcount = 0;
870 uint8_t buff[MAX_HEART_SIZE + sizeof(heartt) + sizeof(int) ];
871 heartt h;
872 uint8_t *p = buff;
873
874 if (!config->cluster_iam_master) // Only the master does this.
875 return;
876
877 config->cluster_table_version += config->cluster_num_changes;
878
879 // Fill out the heartbeat header.
880 memset(&h, 0, sizeof(h));
881
882 h.version = HB_VERSION;
883 h.seq = config->cluster_seq_number;
884 h.basetime = basetime;
885 h.clusterid = config->bind_address; // Will this do??
886 h.basetime = basetime;
887 h.highsession = config->cluster_highest_sessionid;
888 h.freesession = sessionfree;
889 h.hightunnel = config->cluster_highest_tunnelid;
890 h.highbundle = config->cluster_highest_bundleid;
891 h.size_sess = sizeof(sessiont); // Just in case.
892 h.size_bund = sizeof(bundlet);
893 h.size_tunn = sizeof(tunnelt);
894 h.interval = config->cluster_hb_interval;
895 h.timeout = config->cluster_hb_timeout;
896 h.table_version = config->cluster_table_version;
897
898 add_type(&p, C_HEARTBEAT, HB_VERSION, (uint8_t *) &h, sizeof(h));
899
900 for (i = 0; i < config->cluster_num_changes; ++i) {
901 hb_add_type(&p, cluster_changes[i].type, cluster_changes[i].id);
902 }
903
904 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
905 LOG(0, 0, 0, "FATAL: Overran the heartbeat buffer! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
906 kill(0, SIGTERM);
907 exit(1);
908 }
909
910 //
911 // Fill out the packet with sessions from the session table...
912 // (not forgetting to leave space so we can get some tunnels in too )
913 while ( (p + sizeof(uint32_t) * 2 + sizeof(sessiont) * 2 ) < (buff + MAX_HEART_SIZE) ) {
914
915 if (!walk_session_number) // session #0 isn't valid.
916 ++walk_session_number;
917
918 if (count >= config->cluster_highest_sessionid) // If we're a small cluster, don't go wild.
919 break;
920
921 hb_add_type(&p, C_CSESSION, walk_session_number);
922 walk_session_number = (1+walk_session_number)%(config->cluster_highest_sessionid+1); // +1 avoids divide by zero.
923
924 ++count; // Count the number of extra sessions we're sending.
925 }
926
927 //
928 // Fill out the packet with tunnels from the tunnel table...
929 // This effectively means we walk the tunnel table more quickly
930 // than the session table. This is good because stuffing up a
931 // tunnel is a much bigger deal than stuffing up a session.
932 //
933 while ( (p + sizeof(uint32_t) * 2 + sizeof(tunnelt) ) < (buff + MAX_HEART_SIZE) ) {
934
935 if (!walk_tunnel_number) // tunnel #0 isn't valid.
936 ++walk_tunnel_number;
937
938 if (tcount >= config->cluster_highest_tunnelid)
939 break;
940
941 hb_add_type(&p, C_CTUNNEL, walk_tunnel_number);
942 walk_tunnel_number = (1+walk_tunnel_number)%(config->cluster_highest_tunnelid+1); // +1 avoids divide by zero.
943
944 ++tcount;
945 }
946
947 //
948 // Fill out the packet with bundles from the bundle table...
949 while ( (p + sizeof(uint32_t) * 2 + sizeof(bundlet) ) < (buff + MAX_HEART_SIZE) ) {
950
951 if (!walk_bundle_number) // bundle #0 isn't valid.
952 ++walk_bundle_number;
953
954 if (bcount >= config->cluster_highest_bundleid)
955 break;
956
957 hb_add_type(&p, C_CBUNDLE, walk_bundle_number);
958 walk_bundle_number = (1+walk_bundle_number)%(config->cluster_highest_bundleid+1); // +1 avoids divide by zero.
959 ++bcount;
960 }
961
962 //
963 // Did we do something wrong?
964 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
965 LOG(0, 0, 0, "Overran the heartbeat buffer now! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
966 kill(0, SIGTERM);
967 exit(1);
968 }
969
970 LOG(4, 0, 0, "Sending v%d heartbeat #%d, change #%" PRIu64 " with %d changes "
971 "(%d x-sess, %d x-bundles, %d x-tunnels, %d highsess, %d highbund, %d hightun, size %d)\n",
972 HB_VERSION, h.seq, h.table_version, config->cluster_num_changes,
973 count, bcount, tcount, config->cluster_highest_sessionid, config->cluster_highest_bundleid,
974 config->cluster_highest_tunnelid, (int) (p - buff));
975
976 config->cluster_num_changes = 0;
977
978 send_heartbeat(h.seq, buff, (p-buff) ); // Send out the heartbeat to the cluster, keeping a copy of it.
979
980 config->cluster_seq_number = (config->cluster_seq_number+1)%HB_MAX_SEQ; // Next seq number to use.
981 }
982
983 //
984 // A structure of type 'type' has changed; Add it to the queue to send.
985 //
986 static int type_changed(int type, int id)
987 {
988 int i;
989
990 for (i = 0 ; i < config->cluster_num_changes ; ++i)
991 if ( cluster_changes[i].id == id &&
992 cluster_changes[i].type == type)
993 return 0; // Already marked for change.
994
995 cluster_changes[i].type = type;
996 cluster_changes[i].id = id;
997 ++config->cluster_num_changes;
998
999 if (config->cluster_num_changes > MAX_CHANGES)
1000 cluster_heartbeat(); // flush now
1001
1002 return 1;
1003 }
1004
1005 // The deleted session, must be before the new session
1006 int cluster_listinvert_session(int sidnew, int sidtodel)
1007 {
1008 int i, inew = 0;
1009
1010 for (i = 0 ; i < config->cluster_num_changes ; ++i)
1011 {
1012 if ( cluster_changes[i].id == sidtodel && cluster_changes[i].type == C_CSESSION)
1013 return 0; // Deleted session already before the new session.
1014
1015 if ( cluster_changes[i].id == sidnew && cluster_changes[i].type == C_CSESSION)
1016 {
1017 if (session[i].tunnel != T_FREE)
1018 inew = i;
1019 else
1020 return 0; // This a free session no invert.
1021
1022 break;
1023 }
1024 }
1025
1026 for ( ; i < config->cluster_num_changes ; ++i)
1027 {
1028 if ( cluster_changes[i].id == sidtodel && cluster_changes[i].type == C_CSESSION)
1029 {
1030 // Reverse position
1031 cluster_changes[i].id = sidnew;
1032 cluster_changes[inew].id = sidtodel;
1033 return 1;
1034 }
1035 }
1036
1037 return 0;
1038 }
1039
1040 // A particular session has been changed!
1041 int cluster_send_session(int sid)
1042 {
1043 if (!config->cluster_iam_master) {
1044 LOG(0, sid, 0, "I'm not a master, but I just tried to change a session!\n");
1045 return -1;
1046 }
1047
1048 if (forked) {
1049 LOG(0, sid, 0, "cluster_send_session called from child process!\n");
1050 return -1;
1051 }
1052
1053 return type_changed(C_CSESSION, sid);
1054 }
1055
1056 // A particular bundle has been changed!
1057 int cluster_send_bundle(int bid)
1058 {
1059 if (!config->cluster_iam_master) {
1060 LOG(0, 0, bid, "I'm not a master, but I just tried to change a bundle!\n");
1061 return -1;
1062 }
1063
1064 return type_changed(C_CBUNDLE, bid);
1065 }
1066
1067 // A particular tunnel has been changed!
1068 int cluster_send_tunnel(int tid)
1069 {
1070 if (!config->cluster_iam_master) {
1071 LOG(0, 0, tid, "I'm not a master, but I just tried to change a tunnel!\n");
1072 return -1;
1073 }
1074
1075 return type_changed(C_CTUNNEL, tid);
1076 }
1077
1078
1079 //
1080 // We're a master, and a slave has just told us that it's
1081 // missed a packet. We'll resend it every packet since
1082 // the last one it's seen.
1083 //
1084 static int cluster_catchup_slave(int seq, in_addr_t slave)
1085 {
1086 int s;
1087 int diff;
1088
1089 LOG(1, 0, 0, "Slave %s sent LASTSEEN with seq %d\n", fmtaddr(slave, 0), seq);
1090 if (!config->cluster_iam_master) {
1091 LOG(1, 0, 0, "Got LASTSEEN but I'm not a master! Redirecting it to %s.\n",
1092 fmtaddr(config->cluster_master_address, 0));
1093
1094 peer_send_message(slave, C_MASTER, config->cluster_master_address, NULL, 0);
1095 return 0;
1096 }
1097
1098 diff = config->cluster_seq_number - seq; // How many packet do we need to send?
1099 if (diff < 0)
1100 diff += HB_MAX_SEQ;
1101
1102 if (diff >= HB_HISTORY_SIZE) { // Ouch. We don't have the packet to send it!
1103 LOG(0, 0, 0, "A slave asked for message %d when our seq number is %d. Killing it.\n",
1104 seq, config->cluster_seq_number);
1105 return peer_send_message(slave, C_KILL, seq, NULL, 0);// Kill the slave. Nothing else to do.
1106 }
1107
1108 LOG(1, 0, 0, "Sending %d catchup packets to slave %s\n", diff, fmtaddr(slave, 0) );
1109
1110 // Now resend every packet that it missed, in order.
1111 while (seq != config->cluster_seq_number) {
1112 s = seq % HB_HISTORY_SIZE;
1113 if (seq != past_hearts[s].seq) {
1114 LOG(0, 0, 0, "Tried to re-send heartbeat for %s but %d doesn't match %d! (%d,%d)\n",
1115 fmtaddr(slave, 0), seq, past_hearts[s].seq, s, config->cluster_seq_number);
1116 return -1; // What to do here!?
1117 }
1118 peer_send_data(slave, past_hearts[s].data, past_hearts[s].size);
1119 seq = (seq+1)%HB_MAX_SEQ; // Increment to next seq number.
1120 }
1121 return 0; // All good!
1122 }
1123
1124 //
1125 // We've heard from another peer! Add it to the list
1126 // that we select from at election time.
1127 //
1128 static int cluster_add_peer(in_addr_t peer, time_t basetime, pingt *pp, int size)
1129 {
1130 int i;
1131 in_addr_t clusterid;
1132 pingt p;
1133
1134 // Allow for backward compatability.
1135 // Just the ping packet into a new structure to allow
1136 // for the possibility that we might have received
1137 // more or fewer elements than we were expecting.
1138 if (size > sizeof(p))
1139 size = sizeof(p);
1140
1141 memset( (void *) &p, 0, sizeof(p) );
1142 memcpy( (void *) &p, (void *) pp, size);
1143
1144 clusterid = p.addr;
1145 if (clusterid != config->bind_address)
1146 {
1147 // Is this for us?
1148 LOG(4, 0, 0, "Skipping ping from %s (different cluster)\n", fmtaddr(peer, 0));
1149 return 0;
1150 }
1151
1152 for (i = 0; i < num_peers ; ++i)
1153 {
1154 if (peers[i].peer != peer)
1155 continue;
1156
1157 // This peer already exists. Just update the timestamp.
1158 peers[i].basetime = basetime;
1159 peers[i].timestamp = TIME;
1160 peers[i].uptodate = !p.undef;
1161 break;
1162 }
1163
1164 // Is this the master shutting down??
1165 if (peer == config->cluster_master_address) {
1166 LOG(3, 0, 0, "Master %s %s\n", fmtaddr(config->cluster_master_address, 0),
1167 basetime ? "has restarted!" : "shutting down...");
1168
1169 config->cluster_master_address = 0;
1170 config->cluster_last_hb = 0; // Force an election.
1171 cluster_check_master();
1172 }
1173
1174 if (i >= num_peers)
1175 {
1176 LOG(4, 0, 0, "Adding %s as a peer\n", fmtaddr(peer, 0));
1177
1178 // Not found. Is there a stale slot to re-use?
1179 for (i = 0; i < num_peers ; ++i)
1180 {
1181 if (!peers[i].basetime) // Shutdown
1182 break;
1183
1184 if ((peers[i].timestamp + config->cluster_hb_timeout * 10) < TIME) // Stale.
1185 break;
1186 }
1187
1188 if (i >= CLUSTER_MAX_SIZE)
1189 {
1190 // Too many peers!!
1191 LOG(0, 0, 0, "Tried to add %s as a peer, but I already have %d of them!\n", fmtaddr(peer, 0), i);
1192 return -1;
1193 }
1194
1195 peers[i].peer = peer;
1196 peers[i].basetime = basetime;
1197 peers[i].timestamp = TIME;
1198 peers[i].uptodate = !p.undef;
1199 if (i == num_peers)
1200 ++num_peers;
1201
1202 LOG(1, 0, 0, "Added %s as a new peer. Now %d peers\n", fmtaddr(peer, 0), num_peers);
1203 }
1204
1205 return 1;
1206 }
1207
1208 // A slave responds with C_MASTER when it gets a message which should have gone to a master.
1209 static int cluster_set_master(in_addr_t peer, in_addr_t master)
1210 {
1211 if (config->cluster_iam_master) // Sanity...
1212 return 0;
1213
1214 LOG(3, 0, 0, "Peer %s set the master to %s...\n", fmtaddr(peer, 0),
1215 fmtaddr(master, 1));
1216
1217 config->cluster_master_address = master;
1218 if (master)
1219 {
1220 // catchup with new master
1221 peer_send_message(master, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1222
1223 // delay next election
1224 config->cluster_last_hb = TIME;
1225 }
1226
1227 // run election (or reset "probed" if master was set)
1228 cluster_check_master();
1229 return 0;
1230 }
1231
1232 /* Handle the slave updating the byte counters for the master. */
1233 //
1234 // Note that we don't mark the session as dirty; We rely on
1235 // the slow table walk to propogate this back out to the slaves.
1236 //
1237 static int cluster_handle_bytes(uint8_t *data, int size)
1238 {
1239 bytest *b;
1240
1241 b = (bytest *) data;
1242
1243 LOG(3, 0, 0, "Got byte counter update (size %d)\n", size);
1244
1245 /* Loop around, adding the byte
1246 counts to each of the sessions. */
1247
1248 while (size >= sizeof(*b) ) {
1249 if (b->sid > MAXSESSION) {
1250 LOG(0, 0, 0, "Got C_BYTES with session #%d!\n", b->sid);
1251 return -1; /* Abort processing */
1252 }
1253
1254 session[b->sid].pin += b->pin;
1255 session[b->sid].pout += b->pout;
1256
1257 increment_counter(&session[b->sid].cin, &session[b->sid].cin_wrap, b->cin);
1258 increment_counter(&session[b->sid].cout, &session[b->sid].cout_wrap, b->cout);
1259
1260 session[b->sid].cin_delta += b->cin;
1261 session[b->sid].cout_delta += b->cout;
1262
1263 if (b->cin)
1264 session[b->sid].last_packet = session[b->sid].last_data = time_now;
1265 else if (b->cout)
1266 session[b->sid].last_data = time_now;
1267
1268 size -= sizeof(*b);
1269 ++b;
1270 }
1271
1272 if (size != 0)
1273 LOG(0, 0, 0, "Got C_BYTES with %d bytes of trailing junk!\n", size);
1274
1275 return size;
1276 }
1277
1278 //
1279 // Handle receiving a session structure in a heartbeat packet.
1280 //
1281 static int cluster_recv_session(int more, uint8_t *p)
1282 {
1283 if (more >= MAXSESSION) {
1284 LOG(0, 0, 0, "DANGER: Received a heartbeat session id > MAXSESSION!\n");
1285 return -1;
1286 }
1287
1288 if (session[more].tunnel == T_UNDEF) {
1289 if (config->cluster_iam_uptodate) { // Sanity.
1290 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined session!\n");
1291 } else {
1292 --config->cluster_undefined_sessions;
1293 }
1294 }
1295
1296 load_session(more, (sessiont *) p); // Copy session into session table..
1297
1298 LOG(5, more, 0, "Received session update (%d undef)\n", config->cluster_undefined_sessions);
1299
1300 if (!config->cluster_iam_uptodate)
1301 cluster_uptodate(); // Check to see if we're up to date.
1302
1303 return 0;
1304 }
1305
1306 static int cluster_recv_bundle(int more, uint8_t *p)
1307 {
1308 if (more >= MAXBUNDLE) {
1309 LOG(0, 0, 0, "DANGER: Received a bundle id > MAXBUNDLE!\n");
1310 return -1;
1311 }
1312
1313 if (bundle[more].state == BUNDLEUNDEF) {
1314 if (config->cluster_iam_uptodate) { // Sanity.
1315 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined bundle!\n");
1316 } else {
1317 --config->cluster_undefined_bundles;
1318 }
1319 }
1320
1321 memcpy(&bundle[more], p, sizeof(bundle[more]) );
1322
1323 LOG(5, 0, more, "Received bundle update\n");
1324
1325 if (!config->cluster_iam_uptodate)
1326 cluster_uptodate(); // Check to see if we're up to date.
1327
1328 return 0;
1329 }
1330
1331 static int cluster_recv_tunnel(int more, uint8_t *p)
1332 {
1333 if (more >= MAXTUNNEL) {
1334 LOG(0, 0, 0, "DANGER: Received a tunnel session id > MAXTUNNEL!\n");
1335 return -1;
1336 }
1337
1338 if (tunnel[more].state == TUNNELUNDEF) {
1339 if (config->cluster_iam_uptodate) { // Sanity.
1340 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined tunnel!\n");
1341 } else {
1342 --config->cluster_undefined_tunnels;
1343 }
1344 }
1345
1346 memcpy(&tunnel[more], p, sizeof(tunnel[more]) );
1347
1348 //
1349 // Clear tunnel control messages. These are dynamically allocated.
1350 // If we get unlucky, this may cause the tunnel to drop!
1351 //
1352 tunnel[more].controls = tunnel[more].controle = NULL;
1353 tunnel[more].controlc = 0;
1354
1355 LOG(5, 0, more, "Received tunnel update\n");
1356
1357 if (!config->cluster_iam_uptodate)
1358 cluster_uptodate(); // Check to see if we're up to date.
1359
1360 return 0;
1361 }
1362
1363
1364 // pre v6 heartbeat session structure
1365 struct oldsession {
1366 sessionidt next;
1367 sessionidt far;
1368 tunnelidt tunnel;
1369 uint8_t flags;
1370 struct {
1371 uint8_t phase;
1372 uint8_t lcp:4;
1373 uint8_t ipcp:4;
1374 uint8_t ipv6cp:4;
1375 uint8_t ccp:4;
1376 } ppp;
1377 char reserved_1[2];
1378 in_addr_t ip;
1379 int ip_pool_index;
1380 uint32_t unique_id;
1381 char reserved_2[4];
1382 uint32_t magic;
1383 uint32_t pin, pout;
1384 uint32_t cin, cout;
1385 uint32_t cin_wrap, cout_wrap;
1386 uint32_t cin_delta, cout_delta;
1387 uint16_t throttle_in;
1388 uint16_t throttle_out;
1389 uint8_t filter_in;
1390 uint8_t filter_out;
1391 uint16_t mru;
1392 clockt opened;
1393 clockt die;
1394 uint32_t session_timeout;
1395 uint32_t idle_timeout;
1396 time_t last_packet;
1397 time_t last_data;
1398 in_addr_t dns1, dns2;
1399 routet route[MAXROUTE];
1400 uint16_t tbf_in;
1401 uint16_t tbf_out;
1402 int random_vector_length;
1403 uint8_t random_vector[MAXTEL];
1404 char user[MAXUSER];
1405 char called[MAXTEL];
1406 char calling[MAXTEL];
1407 uint32_t tx_connect_speed;
1408 uint32_t rx_connect_speed;
1409 clockt timeout;
1410 uint32_t mrru;
1411 uint8_t mssf;
1412 epdist epdis;
1413 bundleidt bundle;
1414 in_addr_t snoop_ip;
1415 uint16_t snoop_port;
1416 uint8_t walled_garden;
1417 uint8_t ipv6prefixlen;
1418 struct in6_addr ipv6route;
1419 char reserved_3[11];
1420 };
1421
1422 static uint8_t *convert_session(struct oldsession *old)
1423 {
1424 static sessiont new;
1425 int i;
1426
1427 memset(&new, 0, sizeof(new));
1428
1429 new.next = old->next;
1430 new.far = old->far;
1431 new.tunnel = old->tunnel;
1432 new.flags = old->flags;
1433 new.ppp.phase = old->ppp.phase;
1434 new.ppp.lcp = old->ppp.lcp;
1435 new.ppp.ipcp = old->ppp.ipcp;
1436 new.ppp.ipv6cp = old->ppp.ipv6cp;
1437 new.ppp.ccp = old->ppp.ccp;
1438 new.ip = old->ip;
1439 new.ip_pool_index = old->ip_pool_index;
1440 new.unique_id = old->unique_id;
1441 new.magic = old->magic;
1442 new.pin = old->pin;
1443 new.pout = old->pout;
1444 new.cin = old->cin;
1445 new.cout = old->cout;
1446 new.cin_wrap = old->cin_wrap;
1447 new.cout_wrap = old->cout_wrap;
1448 new.cin_delta = old->cin_delta;
1449 new.cout_delta = old->cout_delta;
1450 new.throttle_in = old->throttle_in;
1451 new.throttle_out = old->throttle_out;
1452 new.filter_in = old->filter_in;
1453 new.filter_out = old->filter_out;
1454 new.mru = old->mru;
1455 new.opened = old->opened;
1456 new.die = old->die;
1457 new.session_timeout = old->session_timeout;
1458 new.idle_timeout = old->idle_timeout;
1459 new.last_packet = old->last_packet;
1460 new.last_data = old->last_data;
1461 new.dns1 = old->dns1;
1462 new.dns2 = old->dns2;
1463 new.tbf_in = old->tbf_in;
1464 new.tbf_out = old->tbf_out;
1465 new.random_vector_length = old->random_vector_length;
1466 new.tx_connect_speed = old->tx_connect_speed;
1467 new.rx_connect_speed = old->rx_connect_speed;
1468 new.timeout = old->timeout;
1469 new.mrru = old->mrru;
1470 new.mssf = old->mssf;
1471 new.epdis = old->epdis;
1472 new.bundle = old->bundle;
1473 new.snoop_ip = old->snoop_ip;
1474 new.snoop_port = old->snoop_port;
1475 new.walled_garden = old->walled_garden;
1476 new.ipv6prefixlen = old->ipv6prefixlen;
1477 new.ipv6route = old->ipv6route;
1478
1479 memcpy(new.random_vector, old->random_vector, sizeof(new.random_vector));
1480 memcpy(new.user, old->user, sizeof(new.user));
1481 memcpy(new.called, old->called, sizeof(new.called));
1482 memcpy(new.calling, old->calling, sizeof(new.calling));
1483
1484 for (i = 0; i < MAXROUTE; i++)
1485 memcpy(&new.route[i], &old->route[i], sizeof(new.route[i]));
1486
1487 return (uint8_t *) &new;
1488 }
1489
1490 //
1491 // Process a heartbeat..
1492 //
1493 // v6: added RADIUS class attribute, re-ordered session structure
1494 // v7: added tunnelt attribute at the end of struct (tunnelt size change)
1495 static int cluster_process_heartbeat(uint8_t *data, int size, int more, uint8_t *p, in_addr_t addr)
1496 {
1497 heartt *h;
1498 int s = size - (p-data);
1499 int i, type;
1500 int hb_ver = more;
1501
1502 #ifdef LAC
1503 #if HB_VERSION != 7
1504 # error "need to update cluster_process_heartbeat()"
1505 #endif
1506 #else
1507 #if HB_VERSION != 6
1508 # error "need to update cluster_process_heartbeat()"
1509 #endif
1510 #endif
1511
1512
1513 // we handle versions 5 through 7
1514 if (hb_ver < 5 || hb_ver > HB_VERSION) {
1515 LOG(0, 0, 0, "Received a heartbeat version that I don't support (%d)!\n", hb_ver);
1516 return -1; // Ignore it??
1517 }
1518
1519 if (size > sizeof(past_hearts[0].data)) {
1520 LOG(0, 0, 0, "Received an oversize heartbeat from %s (%d)!\n", fmtaddr(addr, 0), size);
1521 return -1;
1522 }
1523
1524 if (s < sizeof(*h))
1525 goto shortpacket;
1526
1527 h = (heartt *) p;
1528 p += sizeof(*h);
1529 s -= sizeof(*h);
1530
1531 if (h->clusterid != config->bind_address)
1532 return -1; // It's not part of our cluster.
1533
1534 if (config->cluster_iam_master) { // Sanity...
1535 // Note that this MUST match the election process above!
1536
1537 LOG(0, 0, 0, "I just got a heartbeat from master %s, but _I_ am the master!\n", fmtaddr(addr, 0));
1538 if (!h->basetime) {
1539 LOG(0, 0, 0, "Heartbeat with zero basetime! Ignoring\n");
1540 return -1; // Skip it.
1541 }
1542
1543 if (h->table_version > config->cluster_table_version) {
1544 LOG(0, 0, 0, "They've seen more state changes (%" PRIu64 " vs my %" PRIu64 ") so I'm gone!\n",
1545 h->table_version, config->cluster_table_version);
1546
1547 kill(0, SIGTERM);
1548 exit(1);
1549 }
1550
1551 if (h->table_version < config->cluster_table_version)
1552 return -1;
1553
1554 if (basetime > h->basetime) {
1555 LOG(0, 0, 0, "They're an older master than me so I'm gone!\n");
1556 kill(0, SIGTERM);
1557 exit(1);
1558 }
1559
1560 if (basetime < h->basetime)
1561 return -1;
1562
1563 if (my_address < addr) { // Tie breaker.
1564 LOG(0, 0, 0, "They're a higher IP address than me, so I'm gone!\n");
1565 kill(0, SIGTERM);
1566 exit(1);
1567 }
1568
1569 //
1570 // Send it a unicast heartbeat to see give it a chance to die.
1571 // NOTE: It's actually safe to do seq-number - 1 without checking
1572 // for wrap around.
1573 //
1574 cluster_catchup_slave(config->cluster_seq_number - 1, addr);
1575
1576 return -1; // Skip it.
1577 }
1578
1579 //
1580 // Try and guard against a stray master appearing.
1581 //
1582 // Ignore heartbeats received from another master before the
1583 // timeout (less a smidgen) for the old master has elapsed.
1584 //
1585 // Note that after a clean failover, the cluster_master_address
1586 // is cleared, so this doesn't run.
1587 //
1588 if (config->cluster_master_address && addr != config->cluster_master_address) {
1589 LOG(0, 0, 0, "Ignoring stray heartbeat from %s, current master %s has not yet timed out (last heartbeat %.1f seconds ago).\n",
1590 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1),
1591 0.1 * (TIME - config->cluster_last_hb));
1592 return -1; // ignore
1593 }
1594
1595 if (config->cluster_seq_number == -1) // Don't have one. Just align to the master...
1596 config->cluster_seq_number = h->seq;
1597
1598 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1599 config->cluster_last_hb_ver = hb_ver; // remember what cluster version the master is using
1600
1601 if (config->cluster_seq_number != h->seq) { // Out of sequence heartbeat!
1602 static int lastseen_seq = 0;
1603 static time_t lastseen_time = 0;
1604
1605 // limit to once per second for a particular seq#
1606 int ask = (config->cluster_seq_number != lastseen_seq || time_now != lastseen_time);
1607
1608 LOG(1, 0, 0, "HB: Got seq# %d but was expecting %d. %s.\n",
1609 h->seq, config->cluster_seq_number,
1610 ask ? "Asking for resend" : "Ignoring");
1611
1612 if (ask)
1613 {
1614 lastseen_seq = config->cluster_seq_number;
1615 lastseen_time = time_now;
1616 peer_send_message(addr, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1617 }
1618
1619 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1620
1621 // Just drop the packet. The master will resend it as part of the catchup.
1622
1623 return 0;
1624 }
1625 // Save the packet in our buffer.
1626 // This is needed in case we become the master.
1627 config->cluster_seq_number = (h->seq+1)%HB_MAX_SEQ;
1628 i = h->seq % HB_HISTORY_SIZE;
1629 past_hearts[i].seq = h->seq;
1630 past_hearts[i].size = size;
1631 memcpy(&past_hearts[i].data, data, size); // Save it.
1632
1633
1634 // Check that we don't have too many undefined sessions, and
1635 // that the free session pointer is correct.
1636 cluster_check_sessions(h->highsession, h->freesession, h->highbundle, h->hightunnel);
1637
1638 if (h->interval != config->cluster_hb_interval)
1639 {
1640 LOG(2, 0, 0, "Master set ping/heartbeat interval to %u (was %u)\n",
1641 h->interval, config->cluster_hb_interval);
1642
1643 config->cluster_hb_interval = h->interval;
1644 }
1645
1646 if (h->timeout != config->cluster_hb_timeout)
1647 {
1648 LOG(2, 0, 0, "Master set heartbeat timeout to %u (was %u)\n",
1649 h->timeout, config->cluster_hb_timeout);
1650
1651 config->cluster_hb_timeout = h->timeout;
1652 }
1653
1654 // Ok. process the packet...
1655 while ( s > 0) {
1656
1657 type = *((uint32_t *) p);
1658 p += sizeof(uint32_t);
1659 s -= sizeof(uint32_t);
1660
1661 more = *((uint32_t *) p);
1662 p += sizeof(uint32_t);
1663 s -= sizeof(uint32_t);
1664
1665 switch (type) {
1666 case C_CSESSION: { // Compressed session structure.
1667 uint8_t c[ sizeof(sessiont) + 2];
1668 int size;
1669 uint8_t *orig_p = p;
1670
1671 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c) );
1672 s -= (p - orig_p);
1673
1674 // session struct changed with v5
1675 if (hb_ver < 6)
1676 {
1677 if (size != sizeof(struct oldsession)) {
1678 LOG(0, 0, 0, "DANGER: Received a v%d CSESSION that didn't decompress correctly!\n", hb_ver);
1679 // Now what? Should exit! No-longer up to date!
1680 break;
1681 }
1682 cluster_recv_session(more, convert_session((struct oldsession *) c));
1683 break;
1684 }
1685
1686 if (size != sizeof(sessiont) ) { // Ouch! Very very bad!
1687 LOG(0, 0, 0, "DANGER: Received a CSESSION that didn't decompress correctly!\n");
1688 // Now what? Should exit! No-longer up to date!
1689 break;
1690 }
1691
1692 cluster_recv_session(more, c);
1693 break;
1694 }
1695 case C_SESSION:
1696 if (hb_ver < 6)
1697 {
1698 if (s < sizeof(struct oldsession))
1699 goto shortpacket;
1700
1701 cluster_recv_session(more, convert_session((struct oldsession *) p));
1702
1703 p += sizeof(struct oldsession);
1704 s -= sizeof(struct oldsession);
1705 break;
1706 }
1707
1708 if ( s < sizeof(session[more]))
1709 goto shortpacket;
1710
1711 cluster_recv_session(more, p);
1712
1713 p += sizeof(session[more]);
1714 s -= sizeof(session[more]);
1715 break;
1716
1717 case C_CTUNNEL: { // Compressed tunnel structure.
1718 uint8_t c[ sizeof(tunnelt) + 2];
1719 int size;
1720 uint8_t *orig_p = p;
1721
1722 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1723 s -= (p - orig_p);
1724
1725 #ifdef LAC
1726 if ( ((hb_ver >= HB_VERSION) && (size != sizeof(tunnelt))) ||
1727 ((hb_ver < HB_VERSION) && (size > sizeof(tunnelt))) )
1728 #else
1729 if (size != sizeof(tunnelt) )
1730 #endif
1731 { // Ouch! Very very bad!
1732 LOG(0, 0, 0, "DANGER: Received a CTUNNEL that didn't decompress correctly!\n");
1733 // Now what? Should exit! No-longer up to date!
1734 break;
1735 }
1736
1737 cluster_recv_tunnel(more, c);
1738 break;
1739
1740 }
1741 case C_TUNNEL:
1742 if ( s < sizeof(tunnel[more]))
1743 goto shortpacket;
1744
1745 cluster_recv_tunnel(more, p);
1746
1747 p += sizeof(tunnel[more]);
1748 s -= sizeof(tunnel[more]);
1749 break;
1750
1751 case C_CBUNDLE: { // Compressed bundle structure.
1752 uint8_t c[ sizeof(bundlet) + 2];
1753 int size;
1754 uint8_t *orig_p = p;
1755
1756 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1757 s -= (p - orig_p);
1758
1759 if (size != sizeof(bundlet) ) { // Ouch! Very very bad!
1760 LOG(0, 0, 0, "DANGER: Received a CBUNDLE that didn't decompress correctly!\n");
1761 // Now what? Should exit! No-longer up to date!
1762 break;
1763 }
1764
1765 cluster_recv_bundle(more, c);
1766 break;
1767
1768 }
1769 case C_BUNDLE:
1770 if ( s < sizeof(bundle[more]))
1771 goto shortpacket;
1772
1773 cluster_recv_bundle(more, p);
1774
1775 p += sizeof(bundle[more]);
1776 s -= sizeof(bundle[more]);
1777 break;
1778 default:
1779 LOG(0, 0, 0, "DANGER: I received a heartbeat element where I didn't understand the type! (%d)\n", type);
1780 return -1; // can't process any more of the packet!!
1781 }
1782 }
1783
1784 if (config->cluster_master_address != addr)
1785 {
1786 LOG(0, 0, 0, "My master just changed from %s to %s!\n",
1787 fmtaddr(config->cluster_master_address, 0), fmtaddr(addr, 1));
1788
1789 config->cluster_master_address = addr;
1790 }
1791
1792 config->cluster_last_hb = TIME; // Successfully received a heartbeat!
1793 config->cluster_table_version = h->table_version;
1794 return 0;
1795
1796 shortpacket:
1797 LOG(0, 0, 0, "I got an incomplete heartbeat packet! This means I'm probably out of sync!!\n");
1798 return -1;
1799 }
1800
1801 //
1802 // We got a packet on the cluster port!
1803 // Handle pings, lastseens, and heartbeats!
1804 //
1805 int processcluster(uint8_t *data, int size, in_addr_t addr)
1806 {
1807 int type, more;
1808 uint8_t *p = data;
1809 int s = size;
1810
1811 if (addr == my_address)
1812 return -1; // Ignore it. Something looped back the multicast!
1813
1814 LOG(5, 0, 0, "Process cluster: %d bytes from %s\n", size, fmtaddr(addr, 0));
1815
1816 if (s <= 0) // Any data there??
1817 return -1;
1818
1819 if (s < 8)
1820 goto shortpacket;
1821
1822 type = *((uint32_t *) p);
1823 p += sizeof(uint32_t);
1824 s -= sizeof(uint32_t);
1825
1826 more = *((uint32_t *) p);
1827 p += sizeof(uint32_t);
1828 s -= sizeof(uint32_t);
1829
1830 switch (type)
1831 {
1832 case C_PING: // Update the peers table.
1833 return cluster_add_peer(addr, more, (pingt *) p, s);
1834
1835 case C_MASTER: // Our master is wrong
1836 return cluster_set_master(addr, more);
1837
1838 case C_LASTSEEN: // Catch up a slave (slave missed a packet).
1839 return cluster_catchup_slave(more, addr);
1840
1841 case C_FORWARD: // Forwarded control packet. pass off to processudp.
1842 case C_FORWARD_DAE: // Forwarded DAE packet. pass off to processdae.
1843 if (!config->cluster_iam_master)
1844 {
1845 LOG(0, 0, 0, "I'm not the master, but I got a C_FORWARD%s from %s?\n",
1846 type == C_FORWARD_DAE ? "_DAE" : "", fmtaddr(addr, 0));
1847
1848 return -1;
1849 }
1850 else
1851 {
1852 struct sockaddr_in a;
1853 a.sin_addr.s_addr = more;
1854
1855 a.sin_port = *(int *) p;
1856 s -= sizeof(int);
1857 p += sizeof(int);
1858
1859 LOG(4, 0, 0, "Got a forwarded %spacket... (%s:%d)\n",
1860 type == C_FORWARD_DAE ? "DAE " : "", fmtaddr(more, 0), a.sin_port);
1861
1862 STAT(recv_forward);
1863 if (type == C_FORWARD_DAE)
1864 {
1865 struct in_addr local;
1866 local.s_addr = config->bind_address ? config->bind_address : my_address;
1867 processdae(p, s, &a, sizeof(a), &local);
1868 }
1869 else
1870 processudp(p, s, &a);
1871
1872 return 0;
1873 }
1874
1875 case C_MPPP_FORWARD:
1876 // Receive a MPPP packet from a slave.
1877 if (!config->cluster_iam_master) {
1878 LOG(0, 0, 0, "I'm not the master, but I got a C_MPPP_FORWARD from %s?\n", fmtaddr(addr, 0));
1879 return -1;
1880 }
1881
1882 processipout(p, s);
1883 return 0;
1884
1885 case C_THROTTLE: { // Receive a forwarded packet from a slave.
1886 if (!config->cluster_iam_master) {
1887 LOG(0, 0, 0, "I'm not the master, but I got a C_THROTTLE from %s?\n", fmtaddr(addr, 0));
1888 return -1;
1889 }
1890
1891 tbf_queue_packet(more, p, s); // The TBF id tells wether it goes in or out.
1892 return 0;
1893 }
1894 case C_GARDEN:
1895 // Receive a walled garden packet from a slave.
1896 if (!config->cluster_iam_master) {
1897 LOG(0, 0, 0, "I'm not the master, but I got a C_GARDEN from %s?\n", fmtaddr(addr, 0));
1898 return -1;
1899 }
1900
1901 tun_write(p, s);
1902 return 0;
1903
1904 case C_BYTES:
1905 if (!config->cluster_iam_master) {
1906 LOG(0, 0, 0, "I'm not the master, but I got a C_BYTES from %s?\n", fmtaddr(addr, 0));
1907 return -1;
1908 }
1909
1910 return cluster_handle_bytes(p, s);
1911
1912 case C_KILL: // The master asked us to die!? (usually because we're too out of date).
1913 if (config->cluster_iam_master) {
1914 LOG(0, 0, 0, "_I_ am master, but I received a C_KILL from %s! (Seq# %d)\n", fmtaddr(addr, 0), more);
1915 return -1;
1916 }
1917 if (more != config->cluster_seq_number) {
1918 LOG(0, 0, 0, "The master asked us to die but the seq number didn't match!?\n");
1919 return -1;
1920 }
1921
1922 if (addr != config->cluster_master_address) {
1923 LOG(0, 0, 0, "Received a C_KILL from %s which doesn't match config->cluster_master_address (%s)\n",
1924 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1));
1925 // We can only warn about it. The master might really have switched!
1926 }
1927
1928 LOG(0, 0, 0, "Received a valid C_KILL: I'm going to die now.\n");
1929 kill(0, SIGTERM);
1930 exit(0); // Lets be paranoid;
1931 return -1; // Just signalling the compiler.
1932
1933 case C_HEARTBEAT:
1934 LOG(4, 0, 0, "Got a heartbeat from %s\n", fmtaddr(addr, 0));
1935 return cluster_process_heartbeat(data, size, more, p, addr);
1936
1937 default:
1938 LOG(0, 0, 0, "Strange type packet received on cluster socket (%d)\n", type);
1939 return -1;
1940 }
1941 return 0;
1942
1943 shortpacket:
1944 LOG(0, 0, 0, "I got a _short_ cluster heartbeat packet! This means I'm probably out of sync!!\n");
1945 return -1;
1946 }
1947
1948 //====================================================================================================
1949
1950 int cmd_show_cluster(struct cli_def *cli, char *command, char **argv, int argc)
1951 {
1952 int i;
1953
1954 if (CLI_HELP_REQUESTED)
1955 return CLI_HELP_NO_ARGS;
1956
1957 cli_print(cli, "Cluster status : %s", config->cluster_iam_master ? "Master" : "Slave" );
1958 cli_print(cli, "My address : %s", fmtaddr(my_address, 0));
1959 cli_print(cli, "VIP address : %s", fmtaddr(config->bind_address, 0));
1960 cli_print(cli, "Multicast address: %s", fmtaddr(config->cluster_address, 0));
1961 cli_print(cli, "Multicast i'face : %s", config->cluster_interface);
1962
1963 if (!config->cluster_iam_master) {
1964 cli_print(cli, "My master : %s (last heartbeat %.1f seconds old)",
1965 config->cluster_master_address
1966 ? fmtaddr(config->cluster_master_address, 0)
1967 : "Not defined",
1968 0.1 * (TIME - config->cluster_last_hb));
1969 cli_print(cli, "Uptodate : %s", config->cluster_iam_uptodate ? "Yes" : "No");
1970 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1971 cli_print(cli, "Next sequence number expected: %d", config->cluster_seq_number);
1972 cli_print(cli, "%d sessions undefined of %d", config->cluster_undefined_sessions, config->cluster_highest_sessionid);
1973 cli_print(cli, "%d bundles undefined of %d", config->cluster_undefined_bundles, config->cluster_highest_bundleid);
1974 cli_print(cli, "%d tunnels undefined of %d", config->cluster_undefined_tunnels, config->cluster_highest_tunnelid);
1975 } else {
1976 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1977 cli_print(cli, "Next heartbeat # : %d", config->cluster_seq_number);
1978 cli_print(cli, "Highest session : %d", config->cluster_highest_sessionid);
1979 cli_print(cli, "Highest bundle : %d", config->cluster_highest_bundleid);
1980 cli_print(cli, "Highest tunnel : %d", config->cluster_highest_tunnelid);
1981 cli_print(cli, "%d changes queued for sending", config->cluster_num_changes);
1982 }
1983 cli_print(cli, "%d peers.", num_peers);
1984
1985 if (num_peers)
1986 cli_print(cli, "%20s %10s %8s", "Address", "Basetime", "Age");
1987 for (i = 0; i < num_peers; ++i) {
1988 cli_print(cli, "%20s %10u %8d", fmtaddr(peers[i].peer, 0),
1989 peers[i].basetime, TIME - peers[i].timestamp);
1990 }
1991 return CLI_OK;
1992 }
1993
1994 //
1995 // Simple run-length-encoding compression.
1996 // Format is
1997 // 1 byte < 128 = count of non-zero bytes following. // Not legal to be zero.
1998 // n non-zero bytes;
1999 // or
2000 // 1 byte > 128 = (count - 128) run of zero bytes. //
2001 // repeat.
2002 // count == 0 indicates end of compressed stream.
2003 //
2004 // Compress from 'src' into 'dst'. return number of bytes
2005 // used from 'dst'.
2006 // Updates *src_p to indicate 1 past last bytes used.
2007 //
2008 // We could get an extra byte in the zero runs by storing (count-1)
2009 // but I'm playing it safe.
2010 //
2011 // Worst case is a 50% expansion in space required (trying to
2012 // compress { 0x00, 0x01 } * N )
2013 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
2014 {
2015 int count;
2016 int orig_dsize = dsize;
2017 uint8_t *x, *src;
2018 src = *src_p;
2019
2020 while (ssize > 0 && dsize > 2) {
2021 count = 0;
2022 x = dst++; --dsize; // Reserve space for count byte..
2023
2024 if (*src) { // Copy a run of non-zero bytes.
2025 while (*src && count < 127 && ssize > 0 && dsize > 1) { // Count number of non-zero bytes.
2026 *dst++ = *src++;
2027 --dsize; --ssize;
2028 ++count;
2029 }
2030 *x = count; // Store number of non-zero bytes. Guarenteed to be non-zero!
2031
2032 } else { // Compress a run of zero bytes.
2033 while (*src == 0 && count < 127 && ssize > 0) {
2034 ++src;
2035 --ssize;
2036 ++count;
2037 }
2038 *x = count | 0x80 ;
2039 }
2040 }
2041
2042 *dst++ = 0x0; // Add Stop byte.
2043 --dsize;
2044
2045 *src_p = src;
2046 return (orig_dsize - dsize);
2047 }
2048
2049 //
2050 // Decompress the buffer into **p.
2051 // 'psize' is the size of the decompression buffer available.
2052 //
2053 // Returns the number of bytes decompressed.
2054 //
2055 // Decompresses from '*src_p' into 'dst'.
2056 // Return the number of dst bytes used.
2057 // Updates the 'src_p' pointer to point to the
2058 // first un-used byte.
2059 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
2060 {
2061 int count;
2062 int orig_dsize = dsize;
2063 uint8_t *src = *src_p;
2064
2065 while (ssize >0 && dsize > 0) { // While there's more to decompress, and there's room in the decompress buffer...
2066 count = *src++; --ssize; // get the count byte from the source.
2067 if (count == 0x0) // End marker reached? If so, finish.
2068 break;
2069
2070 if (count & 0x80) { // Decompress a run of zeros
2071 for (count &= 0x7f ; count > 0 && dsize > 0; --count) {
2072 *dst++ = 0x0;
2073 --dsize;
2074 }
2075 } else { // Copy run of non-zero bytes.
2076 for ( ; count > 0 && ssize && dsize; --count) { // Copy non-zero bytes across.
2077 *dst++ = *src++;
2078 --ssize; --dsize;
2079 }
2080 }
2081 }
2082 *src_p = src;
2083 return (orig_dsize - dsize);
2084 }