MLPPP not working in cluster mode.
[l2tpns.git] / cluster.c
1 // L2TPNS Clustering Stuff
2
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <stdarg.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <sys/file.h>
9 #include <sys/stat.h>
10 #include <sys/socket.h>
11 #include <netinet/in.h>
12 #include <arpa/inet.h>
13 #include <sys/ioctl.h>
14 #include <net/if.h>
15 #include <string.h>
16 #include <malloc.h>
17 #include <errno.h>
18 #include <libcli.h>
19
20 #include "l2tpns.h"
21 #include "cluster.h"
22 #include "util.h"
23 #include "tbf.h"
24
25 #ifdef BGP
26 #include "bgp.h"
27 #endif
28 /*
29 * All cluster packets have the same format.
30 *
31 * One or more instances of
32 * a 32 bit 'type' id.
33 * a 32 bit 'extra' data dependant on the 'type'.
34 * zero or more bytes of structure data, dependant on the type.
35 *
36 */
37
38 // Module variables.
39 extern int cluster_sockfd; // The filedescriptor for the cluster communications port.
40
41 in_addr_t my_address = 0; // The network address of my ethernet port.
42 static int walk_session_number = 0; // The next session to send when doing the slow table walk.
43 static int walk_bundle_number = 0; // The next bundle to send when doing the slow table walk.
44 static int walk_tunnel_number = 0; // The next tunnel to send when doing the slow table walk.
45 int forked = 0; // Sanity check: CLI must not diddle with heartbeat table
46
47 #define MAX_HEART_SIZE (8192) // Maximum size of heartbeat packet. Must be less than max IP packet size :)
48 #define MAX_CHANGES (MAX_HEART_SIZE/(sizeof(sessiont) + sizeof(int) ) - 2) // Assumes a session is the biggest type!
49
50 static struct {
51 int type;
52 int id;
53 } cluster_changes[MAX_CHANGES]; // Queue of changed structures that need to go out when next heartbeat.
54
55 static struct {
56 int seq;
57 int size;
58 uint8_t data[MAX_HEART_SIZE];
59 } past_hearts[HB_HISTORY_SIZE]; // Ring buffer of heartbeats that we've recently sent out. Needed so
60 // we can re-transmit if needed.
61
62 static struct {
63 in_addr_t peer;
64 uint32_t basetime;
65 clockt timestamp;
66 int uptodate;
67 } peers[CLUSTER_MAX_SIZE]; // List of all the peers we've heard from.
68 static int num_peers; // Number of peers in list.
69
70 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
71 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
72
73 //
74 // Create a listening socket
75 //
76 // This joins the cluster multi-cast group.
77 //
78 int cluster_init()
79 {
80 struct sockaddr_in addr;
81 struct sockaddr_in interface_addr;
82 struct ip_mreq mreq;
83 struct ifreq ifr;
84 int opt;
85
86 config->cluster_undefined_sessions = MAXSESSION-1;
87 config->cluster_undefined_bundles = MAXBUNDLE-1;
88 config->cluster_undefined_tunnels = MAXTUNNEL-1;
89
90 if (!config->cluster_address)
91 return 0;
92 if (!*config->cluster_interface)
93 return 0;
94
95 cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
96
97 memset(&addr, 0, sizeof(addr));
98 addr.sin_family = AF_INET;
99 addr.sin_port = htons(CLUSTERPORT);
100 addr.sin_addr.s_addr = INADDR_ANY;
101 setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
102
103 opt = fcntl(cluster_sockfd, F_GETFL, 0);
104 fcntl(cluster_sockfd, F_SETFL, opt | O_NONBLOCK);
105
106 if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
107 {
108 LOG(0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
109 return -1;
110 }
111
112 strcpy(ifr.ifr_name, config->cluster_interface);
113 if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0)
114 {
115 LOG(0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
116 return -1;
117 }
118
119 memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr));
120 my_address = interface_addr.sin_addr.s_addr;
121
122 // Join multicast group.
123 mreq.imr_multiaddr.s_addr = config->cluster_address;
124 mreq.imr_interface = interface_addr.sin_addr;
125
126
127 opt = 0; // Turn off multicast loopback.
128 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
129
130 if (config->cluster_mcast_ttl != 1)
131 {
132 uint8_t ttl = 0;
133 if (config->cluster_mcast_ttl > 0)
134 ttl = config->cluster_mcast_ttl < 256 ? config->cluster_mcast_ttl : 255;
135
136 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl));
137 }
138
139 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0)
140 {
141 LOG(0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
142 return -1;
143 }
144
145 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0)
146 {
147 LOG(0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
148 return -1;
149 }
150
151 config->cluster_last_hb = TIME;
152 config->cluster_seq_number = -1;
153
154 return cluster_sockfd;
155 }
156
157
158 //
159 // Send a chunk of data to the entire cluster (usually via the multicast
160 // address ).
161 //
162
163 static int cluster_send_data(void *data, int datalen)
164 {
165 struct sockaddr_in addr = {0};
166
167 if (!cluster_sockfd) return -1;
168 if (!config->cluster_address) return 0;
169
170 addr.sin_addr.s_addr = config->cluster_address;
171 addr.sin_port = htons(CLUSTERPORT);
172 addr.sin_family = AF_INET;
173
174 LOG(5, 0, 0, "Cluster send data: %d bytes\n", datalen);
175
176 if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
177 {
178 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
179 return -1;
180 }
181
182 return 0;
183 }
184
185 //
186 // Add a chunk of data to a heartbeat packet.
187 // Maintains the format. Assumes that the caller
188 // has passed in a big enough buffer!
189 //
190 static void add_type(uint8_t **p, int type, int more, uint8_t *data, int size)
191 {
192 *((uint32_t *) (*p)) = type;
193 *p += sizeof(uint32_t);
194
195 *((uint32_t *)(*p)) = more;
196 *p += sizeof(uint32_t);
197
198 if (data && size > 0) {
199 memcpy(*p, data, size);
200 *p += size;
201 }
202 }
203
204 // advertise our presence via BGP or gratuitous ARP
205 static void advertise_routes(void)
206 {
207 #ifdef BGP
208 if (bgp_configured)
209 bgp_enable_routing(1);
210 else
211 #endif /* BGP */
212 if (config->send_garp)
213 send_garp(config->bind_address); // Start taking traffic.
214 }
215
216 // withdraw our routes (BGP only)
217 static void withdraw_routes(void)
218 {
219 #ifdef BGP
220 if (bgp_configured)
221 bgp_enable_routing(0);
222 #endif /* BGP */
223 }
224
225 static void cluster_uptodate(void)
226 {
227 if (config->cluster_iam_uptodate)
228 return;
229
230 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles)
231 return;
232
233 config->cluster_iam_uptodate = 1;
234
235 LOG(0, 0, 0, "Now uptodate with master.\n");
236 advertise_routes();
237 }
238
239 //
240 // Send a unicast UDP packet to a peer with 'data' as the
241 // contents.
242 //
243 static int peer_send_data(in_addr_t peer, uint8_t *data, int size)
244 {
245 struct sockaddr_in addr = {0};
246
247 if (!cluster_sockfd) return -1;
248 if (!config->cluster_address) return 0;
249
250 if (!peer) // Odd??
251 return -1;
252
253 addr.sin_addr.s_addr = peer;
254 addr.sin_port = htons(CLUSTERPORT);
255 addr.sin_family = AF_INET;
256
257 LOG_HEX(5, "Peer send", data, size);
258
259 if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
260 {
261 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
262 return -1;
263 }
264
265 return 0;
266 }
267
268 //
269 // Send a structured message to a peer with a single element of type 'type'.
270 //
271 static int peer_send_message(in_addr_t peer, int type, int more, uint8_t *data, int size)
272 {
273 uint8_t buf[65536]; // Vast overkill.
274 uint8_t *p = buf;
275
276 LOG(4, 0, 0, "Sending message to peer (type %d, more %d, size %d)\n", type, more, size);
277 add_type(&p, type, more, data, size);
278
279 return peer_send_data(peer, buf, (p-buf) );
280 }
281
282 // send a packet to the master
283 static int _forward_packet(uint8_t *data, int size, in_addr_t addr, int port, int type)
284 {
285 uint8_t buf[65536]; // Vast overkill.
286 uint8_t *p = buf;
287
288 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
289 return -1;
290
291 LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
292
293 STAT(c_forwarded);
294 add_type(&p, type, addr, (uint8_t *) &port, sizeof(port)); // ick. should be uint16_t
295 memcpy(p, data, size);
296 p += size;
297
298 return peer_send_data(config->cluster_master_address, buf, (p - buf));
299 }
300
301 //
302 // Forward a state changing packet to the master.
303 //
304 // The master just processes the payload as if it had
305 // received it off the tun device.
306 //
307 int master_forward_packet(uint8_t *data, int size, in_addr_t addr, int port)
308 {
309 return _forward_packet(data, size, addr, port, C_FORWARD);
310 }
311
312 // Forward a DAE RADIUS packet to the master.
313 int master_forward_dae_packet(uint8_t *data, int size, in_addr_t addr, int port)
314 {
315 return _forward_packet(data, size, addr, port, C_FORWARD_DAE);
316 }
317
318 //
319 // Forward a throttled packet to the master for handling.
320 //
321 // The master just drops the packet into the appropriate
322 // token bucket queue, and lets normal processing take care
323 // of it.
324 //
325 int master_throttle_packet(int tbfid, uint8_t *data, int size)
326 {
327 uint8_t buf[65536]; // Vast overkill.
328 uint8_t *p = buf;
329
330 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
331 return -1;
332
333 LOG(4, 0, 0, "Throttling packet master (size %d, tbfid %d)\n", size, tbfid);
334
335 add_type(&p, C_THROTTLE, tbfid, data, size);
336
337 return peer_send_data(config->cluster_master_address, buf, (p-buf) );
338
339 }
340
341 //
342 // Forward a walled garden packet to the master for handling.
343 //
344 // The master just writes the packet straight to the tun
345 // device (where is will normally loop through the
346 // firewall rules, and come back in on the tun device)
347 //
348 // (Note that this must be called with the tun header
349 // as the start of the data).
350 int master_garden_packet(sessionidt s, uint8_t *data, int size)
351 {
352 uint8_t buf[65536]; // Vast overkill.
353 uint8_t *p = buf;
354
355 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
356 return -1;
357
358 LOG(4, 0, 0, "Walled garden packet to master (size %d)\n", size);
359
360 add_type(&p, C_GARDEN, s, data, size);
361
362 return peer_send_data(config->cluster_master_address, buf, (p-buf));
363
364 }
365
366 //
367 // Forward a MPPP packet to the master for handling.
368 //
369 // (Note that this must be called with the tun header
370 // as the start of the data).
371 // (i.e. this routine writes to data[-8]).
372 int master_forward_mppp_packet(sessionidt s, uint8_t *data, int size)
373 {
374 uint8_t *p = data - (2 * sizeof(uint32_t));
375 uint8_t *psave = p;
376
377 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
378 return -1;
379
380 LOG(4, 0, 0, "Forward MPPP packet to master (size %d)\n", size);
381
382 add_type(&p, C_MPPP_FORWARD, s, NULL, 0);
383
384 return peer_send_data(config->cluster_master_address, psave, size + (2 * sizeof(uint32_t)));
385
386 }
387
388 //
389 // Send a chunk of data as a heartbeat..
390 // We save it in the history buffer as we do so.
391 //
392 static void send_heartbeat(int seq, uint8_t *data, int size)
393 {
394 int i;
395
396 if (size > sizeof(past_hearts[0].data))
397 {
398 LOG(0, 0, 0, "Tried to heartbeat something larger than the maximum packet!\n");
399 kill(0, SIGTERM);
400 exit(1);
401 }
402 i = seq % HB_HISTORY_SIZE;
403 past_hearts[i].seq = seq;
404 past_hearts[i].size = size;
405 memcpy(&past_hearts[i].data, data, size); // Save it.
406 cluster_send_data(data, size);
407 }
408
409 //
410 // Send an 'i am alive' message to every machine in the cluster.
411 //
412 void cluster_send_ping(time_t basetime)
413 {
414 uint8_t buff[100 + sizeof(pingt)];
415 uint8_t *p = buff;
416 pingt x;
417
418 if (config->cluster_iam_master && basetime) // We're heartbeating so no need to ping.
419 return;
420
421 LOG(5, 0, 0, "Sending cluster ping...\n");
422
423 x.ver = 1;
424 x.addr = config->bind_address;
425 x.undef = config->cluster_undefined_sessions + config->cluster_undefined_tunnels + config->cluster_undefined_bundles;
426 x.basetime = basetime;
427
428 add_type(&p, C_PING, basetime, (uint8_t *) &x, sizeof(x));
429 cluster_send_data(buff, (p-buff) );
430 }
431
432 //
433 // Walk the session counters looking for non-zero ones to send
434 // to the master. We send up to 600 of them at one time.
435 // We examine a maximum of 3000 sessions.
436 // (50k max session should mean that we normally
437 // examine the entire session table every 25 seconds).
438
439 #define MAX_B_RECS (600)
440 void master_update_counts(void)
441 {
442 int i, c;
443 bytest b[MAX_B_RECS+1];
444
445 if (config->cluster_iam_master) // Only happens on the slaves.
446 return;
447
448 if (!config->cluster_master_address) // If we don't have a master, skip it for a while.
449 return;
450
451 i = MAX_B_RECS * 5; // Examine max 3000 sessions;
452 if (config->cluster_highest_sessionid > i)
453 i = config->cluster_highest_sessionid;
454
455 for ( c = 0; i > 0 ; --i) {
456 // Next session to look at.
457 walk_session_number++;
458 if ( walk_session_number > config->cluster_highest_sessionid)
459 walk_session_number = 1;
460
461 if (!sess_local[walk_session_number].cin && !sess_local[walk_session_number].cout)
462 continue; // Unchanged. Skip it.
463
464 b[c].sid = walk_session_number;
465 b[c].pin = sess_local[walk_session_number].pin;
466 b[c].pout = sess_local[walk_session_number].pout;
467 b[c].cin = sess_local[walk_session_number].cin;
468 b[c].cout = sess_local[walk_session_number].cout;
469
470 // Reset counters.
471 sess_local[walk_session_number].pin = sess_local[walk_session_number].pout = 0;
472 sess_local[walk_session_number].cin = sess_local[walk_session_number].cout = 0;
473
474 if (++c > MAX_B_RECS) // Send a max of 600 elements in a packet.
475 break;
476 }
477
478 if (!c) // Didn't find any that changes. Get out of here!
479 return;
480
481
482 // Forward the data to the master.
483 LOG(4, 0, 0, "Sending byte counters to master (%d elements)\n", c);
484 peer_send_message(config->cluster_master_address, C_BYTES, c, (uint8_t *) &b, sizeof(b[0]) * c);
485 return;
486 }
487
488 //
489 // On the master, check how our slaves are going. If
490 // one of them's not up-to-date we'll heartbeat faster.
491 // If we don't have any of them, then we need to turn
492 // on our own packet handling!
493 //
494 void cluster_check_slaves(void)
495 {
496 int i;
497 static int have_peers = 0;
498 int had_peers = have_peers;
499 clockt t = TIME;
500
501 if (!config->cluster_iam_master)
502 return; // Only runs on the master...
503
504 config->cluster_iam_uptodate = 1; // cleared in loop below
505
506 for (i = have_peers = 0; i < num_peers; i++)
507 {
508 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
509 continue; // Stale peer! Skip them.
510
511 if (!peers[i].basetime)
512 continue; // Shutdown peer! Skip them.
513
514 if (peers[i].uptodate)
515 have_peers++;
516 else
517 config->cluster_iam_uptodate = 0; // Start fast heartbeats
518 }
519
520 // in a cluster, withdraw/add routes when we get a peer/lose peers
521 if (have_peers != had_peers)
522 {
523 if (had_peers < config->cluster_master_min_adv &&
524 have_peers >= config->cluster_master_min_adv)
525 withdraw_routes();
526
527 else if (had_peers >= config->cluster_master_min_adv &&
528 have_peers < config->cluster_master_min_adv)
529 advertise_routes();
530 }
531 }
532
533 //
534 // Check that we have a master. If it's been too
535 // long since we heard from a master then hold an election.
536 //
537 void cluster_check_master(void)
538 {
539 int i, count, high_unique_id = 0;
540 int last_free = 0;
541 clockt t = TIME;
542 static int probed = 0;
543 int have_peers;
544
545 if (config->cluster_iam_master)
546 return; // Only runs on the slaves...
547
548 // If the master is late (missed 2 hearbeats by a second and a
549 // hair) it may be that the switch has dropped us from the
550 // multicast group, try unicasting probes to the master
551 // which will hopefully respond with a unicast heartbeat that
552 // will allow us to limp along until the querier next runs.
553 if (config->cluster_master_address
554 && TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
555 {
556 if (!probed || (TIME > (probed + 2 * config->cluster_hb_interval)))
557 {
558 probed = TIME;
559 LOG(1, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
560 0.1 * (TIME - (config->cluster_last_hb + config->cluster_hb_interval)));
561
562 peer_send_message(config->cluster_master_address,
563 C_LASTSEEN, config->cluster_seq_number, NULL, 0);
564 }
565 } else { // We got a recent heartbeat; reset the probe flag.
566 probed = 0;
567 }
568
569 if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
570 return; // Everything's ok!
571
572 config->cluster_last_hb = TIME + 1; // Just the one election thanks.
573 config->cluster_master_address = 0;
574
575 LOG(0, 0, 0, "Master timed out! Holding election...\n");
576
577 // In the process of shutting down, can't be master
578 if (main_quit)
579 return;
580
581 for (i = have_peers = 0; i < num_peers; i++)
582 {
583 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
584 continue; // Stale peer! Skip them.
585
586 if (!peers[i].basetime)
587 continue; // Shutdown peer! Skip them.
588
589 if (peers[i].basetime < basetime) {
590 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
591 return; // They'll win the election. Get out of here.
592 }
593
594 if (peers[i].basetime == basetime &&
595 peers[i].peer > my_address) {
596 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
597 return; // They'll win the election. Wait for them to come up.
598 }
599
600 if (peers[i].uptodate)
601 have_peers++;
602 }
603
604 // Wow. it's been ages since I last heard a heartbeat
605 // and I'm better than an of my peers so it's time
606 // to become a master!!!
607
608 config->cluster_iam_master = 1;
609
610 LOG(0, 0, 0, "I am declaring myself the master!\n");
611
612 if (have_peers < config->cluster_master_min_adv)
613 advertise_routes();
614 else
615 withdraw_routes();
616
617 if (config->cluster_seq_number == -1)
618 config->cluster_seq_number = 0;
619
620 //
621 // Go through and mark all the tunnels as defined.
622 // Count the highest used tunnel number as well.
623 //
624 config->cluster_highest_tunnelid = 0;
625 for (i = 0; i < MAXTUNNEL; ++i) {
626 if (tunnel[i].state == TUNNELUNDEF)
627 tunnel[i].state = TUNNELFREE;
628
629 if (tunnel[i].state != TUNNELFREE && i > config->cluster_highest_tunnelid)
630 config->cluster_highest_tunnelid = i;
631 }
632
633 //
634 // Go through and mark all the bundles as defined.
635 // Count the highest used bundle number as well.
636 //
637 config->cluster_highest_bundleid = 0;
638 for (i = 0; i < MAXBUNDLE; ++i) {
639 if (bundle[i].state == BUNDLEUNDEF)
640 bundle[i].state = BUNDLEFREE;
641
642 if (bundle[i].state != BUNDLEFREE && i > config->cluster_highest_bundleid)
643 config->cluster_highest_bundleid = i;
644 }
645
646 //
647 // Go through and mark all the sessions as being defined.
648 // reset the idle timeouts.
649 // add temporary byte counters to permanent ones.
650 // Re-string the free list.
651 // Find the ID of the highest session.
652 last_free = 0;
653 high_unique_id = 0;
654 config->cluster_highest_sessionid = 0;
655 for (i = 0, count = 0; i < MAXSESSION; ++i) {
656 if (session[i].tunnel == T_UNDEF) {
657 session[i].tunnel = T_FREE;
658 ++count;
659 }
660
661 if (!session[i].opened) { // Unused session. Add to free list.
662 memset(&session[i], 0, sizeof(session[i]));
663 session[i].tunnel = T_FREE;
664 session[last_free].next = i;
665 session[i].next = 0;
666 last_free = i;
667 continue;
668 }
669
670 // Reset idle timeouts..
671 session[i].last_packet = session[i].last_data = time_now;
672
673 // Reset die relative to our uptime rather than the old master's
674 if (session[i].die) session[i].die = TIME;
675
676 // Accumulate un-sent byte/packet counters.
677 increment_counter(&session[i].cin, &session[i].cin_wrap, sess_local[i].cin);
678 increment_counter(&session[i].cout, &session[i].cout_wrap, sess_local[i].cout);
679 session[i].cin_delta += sess_local[i].cin;
680 session[i].cout_delta += sess_local[i].cout;
681
682 session[i].pin += sess_local[i].pin;
683 session[i].pout += sess_local[i].pout;
684
685 sess_local[i].cin = sess_local[i].cout = 0;
686 sess_local[i].pin = sess_local[i].pout = 0;
687
688 sess_local[i].radius = 0; // Reset authentication as the radius blocks aren't up to date.
689
690 if (session[i].unique_id >= high_unique_id) // This is different to the index into the session table!!!
691 high_unique_id = session[i].unique_id+1;
692
693 session[i].tbf_in = session[i].tbf_out = 0; // Remove stale pointers from old master.
694 throttle_session(i, session[i].throttle_in, session[i].throttle_out);
695
696 config->cluster_highest_sessionid = i;
697 }
698
699 session[last_free].next = 0; // End of chain.
700 last_id = high_unique_id; // Keep track of the highest used session ID.
701
702 become_master();
703
704 rebuild_address_pool();
705
706 // If we're not the very first master, this is a big issue!
707 if (count > 0)
708 LOG(0, 0, 0, "Warning: Fixed %d uninitialized sessions in becoming master!\n", count);
709
710 config->cluster_undefined_sessions = 0;
711 config->cluster_undefined_bundles = 0;
712 config->cluster_undefined_tunnels = 0;
713 config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
714
715 // FIXME. We need to fix up the tunnel control message
716 // queue here! There's a number of other variables we
717 // should also update.
718 }
719
720
721 //
722 // Check that our session table is validly matching what the
723 // master has in mind.
724 //
725 // In particular, if we have too many sessions marked 'undefined'
726 // we fix it up here, and we ensure that the 'first free session'
727 // pointer is valid.
728 //
729 static void cluster_check_sessions(int highsession, int freesession_ptr, int highbundle, int hightunnel)
730 {
731 int i;
732
733 sessionfree = freesession_ptr; // Keep the freesession ptr valid.
734
735 if (config->cluster_iam_uptodate)
736 return;
737
738 if (highsession > config->cluster_undefined_sessions && highbundle > config->cluster_undefined_bundles && hightunnel > config->cluster_undefined_tunnels)
739 return;
740
741 // Clear out defined sessions, counting the number of
742 // undefs remaining.
743 config->cluster_undefined_sessions = 0;
744 for (i = 1 ; i < MAXSESSION; ++i) {
745 if (i > highsession) {
746 if (session[i].tunnel == T_UNDEF) session[i].tunnel = T_FREE; // Defined.
747 continue;
748 }
749
750 if (session[i].tunnel == T_UNDEF)
751 ++config->cluster_undefined_sessions;
752 }
753
754 // Clear out defined bundles, counting the number of
755 // undefs remaining.
756 config->cluster_undefined_bundles = 0;
757 for (i = 1 ; i < MAXBUNDLE; ++i) {
758 if (i > highbundle) {
759 if (bundle[i].state == BUNDLEUNDEF) bundle[i].state = BUNDLEFREE; // Defined.
760 continue;
761 }
762
763 if (bundle[i].state == BUNDLEUNDEF)
764 ++config->cluster_undefined_bundles;
765 }
766
767 // Clear out defined tunnels, counting the number of
768 // undefs remaining.
769 config->cluster_undefined_tunnels = 0;
770 for (i = 1 ; i < MAXTUNNEL; ++i) {
771 if (i > hightunnel) {
772 if (tunnel[i].state == TUNNELUNDEF) tunnel[i].state = TUNNELFREE; // Defined.
773 continue;
774 }
775
776 if (tunnel[i].state == TUNNELUNDEF)
777 ++config->cluster_undefined_tunnels;
778 }
779
780
781 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles) {
782 LOG(2, 0, 0, "Cleared undefined sessions/bundles/tunnels. %d sess (high %d), %d bund (high %d), %d tunn (high %d)\n",
783 config->cluster_undefined_sessions, highsession, config->cluster_undefined_bundles, highbundle, config->cluster_undefined_tunnels, hightunnel);
784 return;
785 }
786
787 // Are we up to date?
788
789 if (!config->cluster_iam_uptodate)
790 cluster_uptodate();
791 }
792
793 static int hb_add_type(uint8_t **p, int type, int id)
794 {
795 switch (type) {
796 case C_CSESSION: { // Compressed C_SESSION.
797 uint8_t c[sizeof(sessiont) * 2]; // Bigger than worst case.
798 uint8_t *d = (uint8_t *) &session[id];
799 uint8_t *orig = d;
800 int size;
801
802 size = rle_compress( &d, sizeof(sessiont), c, sizeof(c) );
803
804 // Did we compress the full structure, and is the size actually
805 // reduced??
806 if ( (d - orig) == sizeof(sessiont) && size < sizeof(sessiont) ) {
807 add_type(p, C_CSESSION, id, c, size);
808 break;
809 }
810 // Failed to compress : Fall through.
811 }
812 case C_SESSION:
813 add_type(p, C_SESSION, id, (uint8_t *) &session[id], sizeof(sessiont));
814 break;
815
816 case C_CBUNDLE: { // Compressed C_BUNDLE
817 uint8_t c[sizeof(bundlet) * 2]; // Bigger than worst case.
818 uint8_t *d = (uint8_t *) &bundle[id];
819 uint8_t *orig = d;
820 int size;
821
822 size = rle_compress( &d, sizeof(bundlet), c, sizeof(c) );
823
824 // Did we compress the full structure, and is the size actually
825 // reduced??
826 if ( (d - orig) == sizeof(bundlet) && size < sizeof(bundlet) ) {
827 add_type(p, C_CBUNDLE, id, c, size);
828 break;
829 }
830 // Failed to compress : Fall through.
831 }
832
833 case C_BUNDLE:
834 add_type(p, C_BUNDLE, id, (uint8_t *) &bundle[id], sizeof(bundlet));
835 break;
836
837 case C_CTUNNEL: { // Compressed C_TUNNEL
838 uint8_t c[sizeof(tunnelt) * 2]; // Bigger than worst case.
839 uint8_t *d = (uint8_t *) &tunnel[id];
840 uint8_t *orig = d;
841 int size;
842
843 size = rle_compress( &d, sizeof(tunnelt), c, sizeof(c) );
844
845 // Did we compress the full structure, and is the size actually
846 // reduced??
847 if ( (d - orig) == sizeof(tunnelt) && size < sizeof(tunnelt) ) {
848 add_type(p, C_CTUNNEL, id, c, size);
849 break;
850 }
851 // Failed to compress : Fall through.
852 }
853 case C_TUNNEL:
854 add_type(p, C_TUNNEL, id, (uint8_t *) &tunnel[id], sizeof(tunnelt));
855 break;
856 default:
857 LOG(0, 0, 0, "Found an invalid type in heart queue! (%d)\n", type);
858 kill(0, SIGTERM);
859 exit(1);
860 }
861 return 0;
862 }
863
864 //
865 // Send a heartbeat, incidently sending out any queued changes..
866 //
867 void cluster_heartbeat()
868 {
869 int i, count = 0, tcount = 0, bcount = 0;
870 uint8_t buff[MAX_HEART_SIZE + sizeof(heartt) + sizeof(int) ];
871 heartt h;
872 uint8_t *p = buff;
873
874 if (!config->cluster_iam_master) // Only the master does this.
875 return;
876
877 config->cluster_table_version += config->cluster_num_changes;
878
879 // Fill out the heartbeat header.
880 memset(&h, 0, sizeof(h));
881
882 h.version = HB_VERSION;
883 h.seq = config->cluster_seq_number;
884 h.basetime = basetime;
885 h.clusterid = config->bind_address; // Will this do??
886 h.basetime = basetime;
887 h.highsession = config->cluster_highest_sessionid;
888 h.freesession = sessionfree;
889 h.hightunnel = config->cluster_highest_tunnelid;
890 h.highbundle = config->cluster_highest_bundleid;
891 h.size_sess = sizeof(sessiont); // Just in case.
892 h.size_bund = sizeof(bundlet);
893 h.size_tunn = sizeof(tunnelt);
894 h.interval = config->cluster_hb_interval;
895 h.timeout = config->cluster_hb_timeout;
896 h.table_version = config->cluster_table_version;
897
898 add_type(&p, C_HEARTBEAT, HB_VERSION, (uint8_t *) &h, sizeof(h));
899
900 for (i = 0; i < config->cluster_num_changes; ++i) {
901 hb_add_type(&p, cluster_changes[i].type, cluster_changes[i].id);
902 }
903
904 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
905 LOG(0, 0, 0, "FATAL: Overran the heartbeat buffer! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
906 kill(0, SIGTERM);
907 exit(1);
908 }
909
910 //
911 // Fill out the packet with sessions from the session table...
912 // (not forgetting to leave space so we can get some tunnels in too )
913 while ( (p + sizeof(uint32_t) * 2 + sizeof(sessiont) * 2 ) < (buff + MAX_HEART_SIZE) ) {
914
915 if (!walk_session_number) // session #0 isn't valid.
916 ++walk_session_number;
917
918 if (count >= config->cluster_highest_sessionid) // If we're a small cluster, don't go wild.
919 break;
920
921 hb_add_type(&p, C_CSESSION, walk_session_number);
922 walk_session_number = (1+walk_session_number)%(config->cluster_highest_sessionid+1); // +1 avoids divide by zero.
923
924 ++count; // Count the number of extra sessions we're sending.
925 }
926
927 //
928 // Fill out the packet with tunnels from the tunnel table...
929 // This effectively means we walk the tunnel table more quickly
930 // than the session table. This is good because stuffing up a
931 // tunnel is a much bigger deal than stuffing up a session.
932 //
933 while ( (p + sizeof(uint32_t) * 2 + sizeof(tunnelt) ) < (buff + MAX_HEART_SIZE) ) {
934
935 if (!walk_tunnel_number) // tunnel #0 isn't valid.
936 ++walk_tunnel_number;
937
938 if (tcount >= config->cluster_highest_tunnelid)
939 break;
940
941 hb_add_type(&p, C_CTUNNEL, walk_tunnel_number);
942 walk_tunnel_number = (1+walk_tunnel_number)%(config->cluster_highest_tunnelid+1); // +1 avoids divide by zero.
943
944 ++tcount;
945 }
946
947 //
948 // Fill out the packet with bundles from the bundle table...
949 while ( (p + sizeof(uint32_t) * 2 + sizeof(bundlet) ) < (buff + MAX_HEART_SIZE) ) {
950
951 if (!walk_bundle_number) // bundle #0 isn't valid.
952 ++walk_bundle_number;
953
954 if (bcount >= config->cluster_highest_bundleid)
955 break;
956
957 hb_add_type(&p, C_CBUNDLE, walk_bundle_number);
958 walk_bundle_number = (1+walk_bundle_number)%(config->cluster_highest_bundleid+1); // +1 avoids divide by zero.
959 ++bcount;
960 }
961
962 //
963 // Did we do something wrong?
964 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
965 LOG(0, 0, 0, "Overran the heartbeat buffer now! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
966 kill(0, SIGTERM);
967 exit(1);
968 }
969
970 LOG(4, 0, 0, "Sending v%d heartbeat #%d, change #%" PRIu64 " with %d changes "
971 "(%d x-sess, %d x-bundles, %d x-tunnels, %d highsess, %d highbund, %d hightun, size %d)\n",
972 HB_VERSION, h.seq, h.table_version, config->cluster_num_changes,
973 count, bcount, tcount, config->cluster_highest_sessionid, config->cluster_highest_bundleid,
974 config->cluster_highest_tunnelid, (int) (p - buff));
975
976 config->cluster_num_changes = 0;
977
978 send_heartbeat(h.seq, buff, (p-buff) ); // Send out the heartbeat to the cluster, keeping a copy of it.
979
980 config->cluster_seq_number = (config->cluster_seq_number+1)%HB_MAX_SEQ; // Next seq number to use.
981 }
982
983 //
984 // A structure of type 'type' has changed; Add it to the queue to send.
985 //
986 static int type_changed(int type, int id)
987 {
988 int i;
989
990 for (i = 0 ; i < config->cluster_num_changes ; ++i)
991 if ( cluster_changes[i].id == id &&
992 cluster_changes[i].type == type)
993 return 0; // Already marked for change.
994
995 cluster_changes[i].type = type;
996 cluster_changes[i].id = id;
997 ++config->cluster_num_changes;
998
999 if (config->cluster_num_changes > MAX_CHANGES)
1000 cluster_heartbeat(); // flush now
1001
1002 return 1;
1003 }
1004
1005
1006 // A particular session has been changed!
1007 int cluster_send_session(int sid)
1008 {
1009 if (!config->cluster_iam_master) {
1010 LOG(0, sid, 0, "I'm not a master, but I just tried to change a session!\n");
1011 return -1;
1012 }
1013
1014 if (forked) {
1015 LOG(0, sid, 0, "cluster_send_session called from child process!\n");
1016 return -1;
1017 }
1018
1019 return type_changed(C_CSESSION, sid);
1020 }
1021
1022 // A particular bundle has been changed!
1023 int cluster_send_bundle(int bid)
1024 {
1025 if (!config->cluster_iam_master) {
1026 LOG(0, 0, bid, "I'm not a master, but I just tried to change a bundle!\n");
1027 return -1;
1028 }
1029
1030 return type_changed(C_CBUNDLE, bid);
1031 }
1032
1033 // A particular tunnel has been changed!
1034 int cluster_send_tunnel(int tid)
1035 {
1036 if (!config->cluster_iam_master) {
1037 LOG(0, 0, tid, "I'm not a master, but I just tried to change a tunnel!\n");
1038 return -1;
1039 }
1040
1041 return type_changed(C_CTUNNEL, tid);
1042 }
1043
1044
1045 //
1046 // We're a master, and a slave has just told us that it's
1047 // missed a packet. We'll resend it every packet since
1048 // the last one it's seen.
1049 //
1050 static int cluster_catchup_slave(int seq, in_addr_t slave)
1051 {
1052 int s;
1053 int diff;
1054
1055 LOG(1, 0, 0, "Slave %s sent LASTSEEN with seq %d\n", fmtaddr(slave, 0), seq);
1056 if (!config->cluster_iam_master) {
1057 LOG(1, 0, 0, "Got LASTSEEN but I'm not a master! Redirecting it to %s.\n",
1058 fmtaddr(config->cluster_master_address, 0));
1059
1060 peer_send_message(slave, C_MASTER, config->cluster_master_address, NULL, 0);
1061 return 0;
1062 }
1063
1064 diff = config->cluster_seq_number - seq; // How many packet do we need to send?
1065 if (diff < 0)
1066 diff += HB_MAX_SEQ;
1067
1068 if (diff >= HB_HISTORY_SIZE) { // Ouch. We don't have the packet to send it!
1069 LOG(0, 0, 0, "A slave asked for message %d when our seq number is %d. Killing it.\n",
1070 seq, config->cluster_seq_number);
1071 return peer_send_message(slave, C_KILL, seq, NULL, 0);// Kill the slave. Nothing else to do.
1072 }
1073
1074 LOG(1, 0, 0, "Sending %d catchup packets to slave %s\n", diff, fmtaddr(slave, 0) );
1075
1076 // Now resend every packet that it missed, in order.
1077 while (seq != config->cluster_seq_number) {
1078 s = seq % HB_HISTORY_SIZE;
1079 if (seq != past_hearts[s].seq) {
1080 LOG(0, 0, 0, "Tried to re-send heartbeat for %s but %d doesn't match %d! (%d,%d)\n",
1081 fmtaddr(slave, 0), seq, past_hearts[s].seq, s, config->cluster_seq_number);
1082 return -1; // What to do here!?
1083 }
1084 peer_send_data(slave, past_hearts[s].data, past_hearts[s].size);
1085 seq = (seq+1)%HB_MAX_SEQ; // Increment to next seq number.
1086 }
1087 return 0; // All good!
1088 }
1089
1090 //
1091 // We've heard from another peer! Add it to the list
1092 // that we select from at election time.
1093 //
1094 static int cluster_add_peer(in_addr_t peer, time_t basetime, pingt *pp, int size)
1095 {
1096 int i;
1097 in_addr_t clusterid;
1098 pingt p;
1099
1100 // Allow for backward compatability.
1101 // Just the ping packet into a new structure to allow
1102 // for the possibility that we might have received
1103 // more or fewer elements than we were expecting.
1104 if (size > sizeof(p))
1105 size = sizeof(p);
1106
1107 memset( (void *) &p, 0, sizeof(p) );
1108 memcpy( (void *) &p, (void *) pp, size);
1109
1110 clusterid = p.addr;
1111 if (clusterid != config->bind_address)
1112 {
1113 // Is this for us?
1114 LOG(4, 0, 0, "Skipping ping from %s (different cluster)\n", fmtaddr(peer, 0));
1115 return 0;
1116 }
1117
1118 for (i = 0; i < num_peers ; ++i)
1119 {
1120 if (peers[i].peer != peer)
1121 continue;
1122
1123 // This peer already exists. Just update the timestamp.
1124 peers[i].basetime = basetime;
1125 peers[i].timestamp = TIME;
1126 peers[i].uptodate = !p.undef;
1127 break;
1128 }
1129
1130 // Is this the master shutting down??
1131 if (peer == config->cluster_master_address) {
1132 LOG(3, 0, 0, "Master %s %s\n", fmtaddr(config->cluster_master_address, 0),
1133 basetime ? "has restarted!" : "shutting down...");
1134
1135 config->cluster_master_address = 0;
1136 config->cluster_last_hb = 0; // Force an election.
1137 cluster_check_master();
1138 }
1139
1140 if (i >= num_peers)
1141 {
1142 LOG(4, 0, 0, "Adding %s as a peer\n", fmtaddr(peer, 0));
1143
1144 // Not found. Is there a stale slot to re-use?
1145 for (i = 0; i < num_peers ; ++i)
1146 {
1147 if (!peers[i].basetime) // Shutdown
1148 break;
1149
1150 if ((peers[i].timestamp + config->cluster_hb_timeout * 10) < TIME) // Stale.
1151 break;
1152 }
1153
1154 if (i >= CLUSTER_MAX_SIZE)
1155 {
1156 // Too many peers!!
1157 LOG(0, 0, 0, "Tried to add %s as a peer, but I already have %d of them!\n", fmtaddr(peer, 0), i);
1158 return -1;
1159 }
1160
1161 peers[i].peer = peer;
1162 peers[i].basetime = basetime;
1163 peers[i].timestamp = TIME;
1164 peers[i].uptodate = !p.undef;
1165 if (i == num_peers)
1166 ++num_peers;
1167
1168 LOG(1, 0, 0, "Added %s as a new peer. Now %d peers\n", fmtaddr(peer, 0), num_peers);
1169 }
1170
1171 return 1;
1172 }
1173
1174 // A slave responds with C_MASTER when it gets a message which should have gone to a master.
1175 static int cluster_set_master(in_addr_t peer, in_addr_t master)
1176 {
1177 if (config->cluster_iam_master) // Sanity...
1178 return 0;
1179
1180 LOG(3, 0, 0, "Peer %s set the master to %s...\n", fmtaddr(peer, 0),
1181 fmtaddr(master, 1));
1182
1183 config->cluster_master_address = master;
1184 if (master)
1185 {
1186 // catchup with new master
1187 peer_send_message(master, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1188
1189 // delay next election
1190 config->cluster_last_hb = TIME;
1191 }
1192
1193 // run election (or reset "probed" if master was set)
1194 cluster_check_master();
1195 return 0;
1196 }
1197
1198 /* Handle the slave updating the byte counters for the master. */
1199 //
1200 // Note that we don't mark the session as dirty; We rely on
1201 // the slow table walk to propogate this back out to the slaves.
1202 //
1203 static int cluster_handle_bytes(uint8_t *data, int size)
1204 {
1205 bytest *b;
1206
1207 b = (bytest *) data;
1208
1209 LOG(3, 0, 0, "Got byte counter update (size %d)\n", size);
1210
1211 /* Loop around, adding the byte
1212 counts to each of the sessions. */
1213
1214 while (size >= sizeof(*b) ) {
1215 if (b->sid > MAXSESSION) {
1216 LOG(0, 0, 0, "Got C_BYTES with session #%d!\n", b->sid);
1217 return -1; /* Abort processing */
1218 }
1219
1220 session[b->sid].pin += b->pin;
1221 session[b->sid].pout += b->pout;
1222
1223 increment_counter(&session[b->sid].cin, &session[b->sid].cin_wrap, b->cin);
1224 increment_counter(&session[b->sid].cout, &session[b->sid].cout_wrap, b->cout);
1225
1226 session[b->sid].cin_delta += b->cin;
1227 session[b->sid].cout_delta += b->cout;
1228
1229 if (b->cin)
1230 session[b->sid].last_packet = session[b->sid].last_data = time_now;
1231 else if (b->cout)
1232 session[b->sid].last_data = time_now;
1233
1234 size -= sizeof(*b);
1235 ++b;
1236 }
1237
1238 if (size != 0)
1239 LOG(0, 0, 0, "Got C_BYTES with %d bytes of trailing junk!\n", size);
1240
1241 return size;
1242 }
1243
1244 //
1245 // Handle receiving a session structure in a heartbeat packet.
1246 //
1247 static int cluster_recv_session(int more, uint8_t *p)
1248 {
1249 if (more >= MAXSESSION) {
1250 LOG(0, 0, 0, "DANGER: Received a heartbeat session id > MAXSESSION!\n");
1251 return -1;
1252 }
1253
1254 if (session[more].tunnel == T_UNDEF) {
1255 if (config->cluster_iam_uptodate) { // Sanity.
1256 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined session!\n");
1257 } else {
1258 --config->cluster_undefined_sessions;
1259 }
1260 }
1261
1262 load_session(more, (sessiont *) p); // Copy session into session table..
1263
1264 LOG(5, more, 0, "Received session update (%d undef)\n", config->cluster_undefined_sessions);
1265
1266 if (!config->cluster_iam_uptodate)
1267 cluster_uptodate(); // Check to see if we're up to date.
1268
1269 return 0;
1270 }
1271
1272 static int cluster_recv_bundle(int more, uint8_t *p)
1273 {
1274 if (more >= MAXBUNDLE) {
1275 LOG(0, 0, 0, "DANGER: Received a bundle id > MAXBUNDLE!\n");
1276 return -1;
1277 }
1278
1279 if (bundle[more].state == BUNDLEUNDEF) {
1280 if (config->cluster_iam_uptodate) { // Sanity.
1281 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined bundle!\n");
1282 } else {
1283 --config->cluster_undefined_bundles;
1284 }
1285 }
1286
1287 memcpy(&bundle[more], p, sizeof(bundle[more]) );
1288
1289 LOG(5, 0, more, "Received bundle update\n");
1290
1291 if (!config->cluster_iam_uptodate)
1292 cluster_uptodate(); // Check to see if we're up to date.
1293
1294 return 0;
1295 }
1296
1297 static int cluster_recv_tunnel(int more, uint8_t *p)
1298 {
1299 if (more >= MAXTUNNEL) {
1300 LOG(0, 0, 0, "DANGER: Received a tunnel session id > MAXTUNNEL!\n");
1301 return -1;
1302 }
1303
1304 if (tunnel[more].state == TUNNELUNDEF) {
1305 if (config->cluster_iam_uptodate) { // Sanity.
1306 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined tunnel!\n");
1307 } else {
1308 --config->cluster_undefined_tunnels;
1309 }
1310 }
1311
1312 memcpy(&tunnel[more], p, sizeof(tunnel[more]) );
1313
1314 //
1315 // Clear tunnel control messages. These are dynamically allocated.
1316 // If we get unlucky, this may cause the tunnel to drop!
1317 //
1318 tunnel[more].controls = tunnel[more].controle = NULL;
1319 tunnel[more].controlc = 0;
1320
1321 LOG(5, 0, more, "Received tunnel update\n");
1322
1323 if (!config->cluster_iam_uptodate)
1324 cluster_uptodate(); // Check to see if we're up to date.
1325
1326 return 0;
1327 }
1328
1329
1330 // pre v6 heartbeat session structure
1331 struct oldsession {
1332 sessionidt next;
1333 sessionidt far;
1334 tunnelidt tunnel;
1335 uint8_t flags;
1336 struct {
1337 uint8_t phase;
1338 uint8_t lcp:4;
1339 uint8_t ipcp:4;
1340 uint8_t ipv6cp:4;
1341 uint8_t ccp:4;
1342 } ppp;
1343 char reserved_1[2];
1344 in_addr_t ip;
1345 int ip_pool_index;
1346 uint32_t unique_id;
1347 char reserved_2[4];
1348 uint32_t magic;
1349 uint32_t pin, pout;
1350 uint32_t cin, cout;
1351 uint32_t cin_wrap, cout_wrap;
1352 uint32_t cin_delta, cout_delta;
1353 uint16_t throttle_in;
1354 uint16_t throttle_out;
1355 uint8_t filter_in;
1356 uint8_t filter_out;
1357 uint16_t mru;
1358 clockt opened;
1359 clockt die;
1360 uint32_t session_timeout;
1361 uint32_t idle_timeout;
1362 time_t last_packet;
1363 time_t last_data;
1364 in_addr_t dns1, dns2;
1365 routet route[MAXROUTE];
1366 uint16_t tbf_in;
1367 uint16_t tbf_out;
1368 int random_vector_length;
1369 uint8_t random_vector[MAXTEL];
1370 char user[MAXUSER];
1371 char called[MAXTEL];
1372 char calling[MAXTEL];
1373 uint32_t tx_connect_speed;
1374 uint32_t rx_connect_speed;
1375 clockt timeout;
1376 uint32_t mrru;
1377 uint8_t mssf;
1378 epdist epdis;
1379 bundleidt bundle;
1380 in_addr_t snoop_ip;
1381 uint16_t snoop_port;
1382 uint8_t walled_garden;
1383 uint8_t ipv6prefixlen;
1384 struct in6_addr ipv6route;
1385 char reserved_3[11];
1386 };
1387
1388 static uint8_t *convert_session(struct oldsession *old)
1389 {
1390 static sessiont new;
1391 int i;
1392
1393 memset(&new, 0, sizeof(new));
1394
1395 new.next = old->next;
1396 new.far = old->far;
1397 new.tunnel = old->tunnel;
1398 new.flags = old->flags;
1399 new.ppp.phase = old->ppp.phase;
1400 new.ppp.lcp = old->ppp.lcp;
1401 new.ppp.ipcp = old->ppp.ipcp;
1402 new.ppp.ipv6cp = old->ppp.ipv6cp;
1403 new.ppp.ccp = old->ppp.ccp;
1404 new.ip = old->ip;
1405 new.ip_pool_index = old->ip_pool_index;
1406 new.unique_id = old->unique_id;
1407 new.magic = old->magic;
1408 new.pin = old->pin;
1409 new.pout = old->pout;
1410 new.cin = old->cin;
1411 new.cout = old->cout;
1412 new.cin_wrap = old->cin_wrap;
1413 new.cout_wrap = old->cout_wrap;
1414 new.cin_delta = old->cin_delta;
1415 new.cout_delta = old->cout_delta;
1416 new.throttle_in = old->throttle_in;
1417 new.throttle_out = old->throttle_out;
1418 new.filter_in = old->filter_in;
1419 new.filter_out = old->filter_out;
1420 new.mru = old->mru;
1421 new.opened = old->opened;
1422 new.die = old->die;
1423 new.session_timeout = old->session_timeout;
1424 new.idle_timeout = old->idle_timeout;
1425 new.last_packet = old->last_packet;
1426 new.last_data = old->last_data;
1427 new.dns1 = old->dns1;
1428 new.dns2 = old->dns2;
1429 new.tbf_in = old->tbf_in;
1430 new.tbf_out = old->tbf_out;
1431 new.random_vector_length = old->random_vector_length;
1432 new.tx_connect_speed = old->tx_connect_speed;
1433 new.rx_connect_speed = old->rx_connect_speed;
1434 new.timeout = old->timeout;
1435 new.mrru = old->mrru;
1436 new.mssf = old->mssf;
1437 new.epdis = old->epdis;
1438 new.bundle = old->bundle;
1439 new.snoop_ip = old->snoop_ip;
1440 new.snoop_port = old->snoop_port;
1441 new.walled_garden = old->walled_garden;
1442 new.ipv6prefixlen = old->ipv6prefixlen;
1443 new.ipv6route = old->ipv6route;
1444
1445 memcpy(new.random_vector, old->random_vector, sizeof(new.random_vector));
1446 memcpy(new.user, old->user, sizeof(new.user));
1447 memcpy(new.called, old->called, sizeof(new.called));
1448 memcpy(new.calling, old->calling, sizeof(new.calling));
1449
1450 for (i = 0; i < MAXROUTE; i++)
1451 memcpy(&new.route[i], &old->route[i], sizeof(new.route[i]));
1452
1453 return (uint8_t *) &new;
1454 }
1455
1456 //
1457 // Process a heartbeat..
1458 //
1459 // v6: added RADIUS class attribute, re-ordered session structure
1460 static int cluster_process_heartbeat(uint8_t *data, int size, int more, uint8_t *p, in_addr_t addr)
1461 {
1462 heartt *h;
1463 int s = size - (p-data);
1464 int i, type;
1465 int hb_ver = more;
1466
1467 #if HB_VERSION != 6
1468 # error "need to update cluster_process_heartbeat()"
1469 #endif
1470
1471 // we handle versions 5 through 6
1472 if (hb_ver < 5 || hb_ver > HB_VERSION) {
1473 LOG(0, 0, 0, "Received a heartbeat version that I don't support (%d)!\n", hb_ver);
1474 return -1; // Ignore it??
1475 }
1476
1477 if (size > sizeof(past_hearts[0].data)) {
1478 LOG(0, 0, 0, "Received an oversize heartbeat from %s (%d)!\n", fmtaddr(addr, 0), size);
1479 return -1;
1480 }
1481
1482 if (s < sizeof(*h))
1483 goto shortpacket;
1484
1485 h = (heartt *) p;
1486 p += sizeof(*h);
1487 s -= sizeof(*h);
1488
1489 if (h->clusterid != config->bind_address)
1490 return -1; // It's not part of our cluster.
1491
1492 if (config->cluster_iam_master) { // Sanity...
1493 // Note that this MUST match the election process above!
1494
1495 LOG(0, 0, 0, "I just got a heartbeat from master %s, but _I_ am the master!\n", fmtaddr(addr, 0));
1496 if (!h->basetime) {
1497 LOG(0, 0, 0, "Heartbeat with zero basetime! Ignoring\n");
1498 return -1; // Skip it.
1499 }
1500
1501 if (h->table_version > config->cluster_table_version) {
1502 LOG(0, 0, 0, "They've seen more state changes (%" PRIu64 " vs my %" PRIu64 ") so I'm gone!\n",
1503 h->table_version, config->cluster_table_version);
1504
1505 kill(0, SIGTERM);
1506 exit(1);
1507 }
1508
1509 if (h->table_version < config->cluster_table_version)
1510 return -1;
1511
1512 if (basetime > h->basetime) {
1513 LOG(0, 0, 0, "They're an older master than me so I'm gone!\n");
1514 kill(0, SIGTERM);
1515 exit(1);
1516 }
1517
1518 if (basetime < h->basetime)
1519 return -1;
1520
1521 if (my_address < addr) { // Tie breaker.
1522 LOG(0, 0, 0, "They're a higher IP address than me, so I'm gone!\n");
1523 kill(0, SIGTERM);
1524 exit(1);
1525 }
1526
1527 //
1528 // Send it a unicast heartbeat to see give it a chance to die.
1529 // NOTE: It's actually safe to do seq-number - 1 without checking
1530 // for wrap around.
1531 //
1532 cluster_catchup_slave(config->cluster_seq_number - 1, addr);
1533
1534 return -1; // Skip it.
1535 }
1536
1537 //
1538 // Try and guard against a stray master appearing.
1539 //
1540 // Ignore heartbeats received from another master before the
1541 // timeout (less a smidgen) for the old master has elapsed.
1542 //
1543 // Note that after a clean failover, the cluster_master_address
1544 // is cleared, so this doesn't run.
1545 //
1546 if (config->cluster_master_address && addr != config->cluster_master_address) {
1547 LOG(0, 0, 0, "Ignoring stray heartbeat from %s, current master %s has not yet timed out (last heartbeat %.1f seconds ago).\n",
1548 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1),
1549 0.1 * (TIME - config->cluster_last_hb));
1550 return -1; // ignore
1551 }
1552
1553 if (config->cluster_seq_number == -1) // Don't have one. Just align to the master...
1554 config->cluster_seq_number = h->seq;
1555
1556 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1557 config->cluster_last_hb_ver = hb_ver; // remember what cluster version the master is using
1558
1559 if (config->cluster_seq_number != h->seq) { // Out of sequence heartbeat!
1560 static int lastseen_seq = 0;
1561 static time_t lastseen_time = 0;
1562
1563 // limit to once per second for a particular seq#
1564 int ask = (config->cluster_seq_number != lastseen_seq || time_now != lastseen_time);
1565
1566 LOG(1, 0, 0, "HB: Got seq# %d but was expecting %d. %s.\n",
1567 h->seq, config->cluster_seq_number,
1568 ask ? "Asking for resend" : "Ignoring");
1569
1570 if (ask)
1571 {
1572 lastseen_seq = config->cluster_seq_number;
1573 lastseen_time = time_now;
1574 peer_send_message(addr, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1575 }
1576
1577 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1578
1579 // Just drop the packet. The master will resend it as part of the catchup.
1580
1581 return 0;
1582 }
1583 // Save the packet in our buffer.
1584 // This is needed in case we become the master.
1585 config->cluster_seq_number = (h->seq+1)%HB_MAX_SEQ;
1586 i = h->seq % HB_HISTORY_SIZE;
1587 past_hearts[i].seq = h->seq;
1588 past_hearts[i].size = size;
1589 memcpy(&past_hearts[i].data, data, size); // Save it.
1590
1591
1592 // Check that we don't have too many undefined sessions, and
1593 // that the free session pointer is correct.
1594 cluster_check_sessions(h->highsession, h->freesession, h->highbundle, h->hightunnel);
1595
1596 if (h->interval != config->cluster_hb_interval)
1597 {
1598 LOG(2, 0, 0, "Master set ping/heartbeat interval to %u (was %u)\n",
1599 h->interval, config->cluster_hb_interval);
1600
1601 config->cluster_hb_interval = h->interval;
1602 }
1603
1604 if (h->timeout != config->cluster_hb_timeout)
1605 {
1606 LOG(2, 0, 0, "Master set heartbeat timeout to %u (was %u)\n",
1607 h->timeout, config->cluster_hb_timeout);
1608
1609 config->cluster_hb_timeout = h->timeout;
1610 }
1611
1612 // Ok. process the packet...
1613 while ( s > 0) {
1614
1615 type = *((uint32_t *) p);
1616 p += sizeof(uint32_t);
1617 s -= sizeof(uint32_t);
1618
1619 more = *((uint32_t *) p);
1620 p += sizeof(uint32_t);
1621 s -= sizeof(uint32_t);
1622
1623 switch (type) {
1624 case C_CSESSION: { // Compressed session structure.
1625 uint8_t c[ sizeof(sessiont) + 2];
1626 int size;
1627 uint8_t *orig_p = p;
1628
1629 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c) );
1630 s -= (p - orig_p);
1631
1632 // session struct changed with v5
1633 if (hb_ver < 6)
1634 {
1635 if (size != sizeof(struct oldsession)) {
1636 LOG(0, 0, 0, "DANGER: Received a v%d CSESSION that didn't decompress correctly!\n", hb_ver);
1637 // Now what? Should exit! No-longer up to date!
1638 break;
1639 }
1640 cluster_recv_session(more, convert_session((struct oldsession *) c));
1641 break;
1642 }
1643
1644 if (size != sizeof(sessiont) ) { // Ouch! Very very bad!
1645 LOG(0, 0, 0, "DANGER: Received a CSESSION that didn't decompress correctly!\n");
1646 // Now what? Should exit! No-longer up to date!
1647 break;
1648 }
1649
1650 cluster_recv_session(more, c);
1651 break;
1652 }
1653 case C_SESSION:
1654 if (hb_ver < 6)
1655 {
1656 if (s < sizeof(struct oldsession))
1657 goto shortpacket;
1658
1659 cluster_recv_session(more, convert_session((struct oldsession *) p));
1660
1661 p += sizeof(struct oldsession);
1662 s -= sizeof(struct oldsession);
1663 break;
1664 }
1665
1666 if ( s < sizeof(session[more]))
1667 goto shortpacket;
1668
1669 cluster_recv_session(more, p);
1670
1671 p += sizeof(session[more]);
1672 s -= sizeof(session[more]);
1673 break;
1674
1675 case C_CTUNNEL: { // Compressed tunnel structure.
1676 uint8_t c[ sizeof(tunnelt) + 2];
1677 int size;
1678 uint8_t *orig_p = p;
1679
1680 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1681 s -= (p - orig_p);
1682
1683 if (size != sizeof(tunnelt) ) { // Ouch! Very very bad!
1684 LOG(0, 0, 0, "DANGER: Received a CTUNNEL that didn't decompress correctly!\n");
1685 // Now what? Should exit! No-longer up to date!
1686 break;
1687 }
1688
1689 cluster_recv_tunnel(more, c);
1690 break;
1691
1692 }
1693 case C_TUNNEL:
1694 if ( s < sizeof(tunnel[more]))
1695 goto shortpacket;
1696
1697 cluster_recv_tunnel(more, p);
1698
1699 p += sizeof(tunnel[more]);
1700 s -= sizeof(tunnel[more]);
1701 break;
1702
1703 case C_CBUNDLE: { // Compressed bundle structure.
1704 uint8_t c[ sizeof(bundlet) + 2];
1705 int size;
1706 uint8_t *orig_p = p;
1707
1708 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1709 s -= (p - orig_p);
1710
1711 if (size != sizeof(bundlet) ) { // Ouch! Very very bad!
1712 LOG(0, 0, 0, "DANGER: Received a CBUNDLE that didn't decompress correctly!\n");
1713 // Now what? Should exit! No-longer up to date!
1714 break;
1715 }
1716
1717 cluster_recv_bundle(more, c);
1718 break;
1719
1720 }
1721 case C_BUNDLE:
1722 if ( s < sizeof(bundle[more]))
1723 goto shortpacket;
1724
1725 cluster_recv_bundle(more, p);
1726
1727 p += sizeof(bundle[more]);
1728 s -= sizeof(bundle[more]);
1729 break;
1730 default:
1731 LOG(0, 0, 0, "DANGER: I received a heartbeat element where I didn't understand the type! (%d)\n", type);
1732 return -1; // can't process any more of the packet!!
1733 }
1734 }
1735
1736 if (config->cluster_master_address != addr)
1737 {
1738 LOG(0, 0, 0, "My master just changed from %s to %s!\n",
1739 fmtaddr(config->cluster_master_address, 0), fmtaddr(addr, 1));
1740
1741 config->cluster_master_address = addr;
1742 }
1743
1744 config->cluster_last_hb = TIME; // Successfully received a heartbeat!
1745 config->cluster_table_version = h->table_version;
1746 return 0;
1747
1748 shortpacket:
1749 LOG(0, 0, 0, "I got an incomplete heartbeat packet! This means I'm probably out of sync!!\n");
1750 return -1;
1751 }
1752
1753 //
1754 // We got a packet on the cluster port!
1755 // Handle pings, lastseens, and heartbeats!
1756 //
1757 int processcluster(uint8_t *data, int size, in_addr_t addr)
1758 {
1759 int type, more;
1760 uint8_t *p = data;
1761 int s = size;
1762
1763 if (addr == my_address)
1764 return -1; // Ignore it. Something looped back the multicast!
1765
1766 LOG(5, 0, 0, "Process cluster: %d bytes from %s\n", size, fmtaddr(addr, 0));
1767
1768 if (s <= 0) // Any data there??
1769 return -1;
1770
1771 if (s < 8)
1772 goto shortpacket;
1773
1774 type = *((uint32_t *) p);
1775 p += sizeof(uint32_t);
1776 s -= sizeof(uint32_t);
1777
1778 more = *((uint32_t *) p);
1779 p += sizeof(uint32_t);
1780 s -= sizeof(uint32_t);
1781
1782 switch (type)
1783 {
1784 case C_PING: // Update the peers table.
1785 return cluster_add_peer(addr, more, (pingt *) p, s);
1786
1787 case C_MASTER: // Our master is wrong
1788 return cluster_set_master(addr, more);
1789
1790 case C_LASTSEEN: // Catch up a slave (slave missed a packet).
1791 return cluster_catchup_slave(more, addr);
1792
1793 case C_FORWARD: // Forwarded control packet. pass off to processudp.
1794 case C_FORWARD_DAE: // Forwarded DAE packet. pass off to processdae.
1795 if (!config->cluster_iam_master)
1796 {
1797 LOG(0, 0, 0, "I'm not the master, but I got a C_FORWARD%s from %s?\n",
1798 type == C_FORWARD_DAE ? "_DAE" : "", fmtaddr(addr, 0));
1799
1800 return -1;
1801 }
1802 else
1803 {
1804 struct sockaddr_in a;
1805 a.sin_addr.s_addr = more;
1806
1807 a.sin_port = *(int *) p;
1808 s -= sizeof(int);
1809 p += sizeof(int);
1810
1811 LOG(4, 0, 0, "Got a forwarded %spacket... (%s:%d)\n",
1812 type == C_FORWARD_DAE ? "DAE " : "", fmtaddr(more, 0), a.sin_port);
1813
1814 STAT(recv_forward);
1815 if (type == C_FORWARD_DAE)
1816 {
1817 struct in_addr local;
1818 local.s_addr = config->bind_address ? config->bind_address : my_address;
1819 processdae(p, s, &a, sizeof(a), &local);
1820 }
1821 else
1822 processudp(p, s, &a);
1823
1824 return 0;
1825 }
1826
1827 case C_MPPP_FORWARD:
1828 // Receive a MPPP packet from a slave.
1829 if (!config->cluster_iam_master) {
1830 LOG(0, 0, 0, "I'm not the master, but I got a C_MPPP_FORWARD from %s?\n", fmtaddr(addr, 0));
1831 return -1;
1832 }
1833
1834 processipout(p, s);
1835 return 0;
1836
1837 case C_THROTTLE: { // Receive a forwarded packet from a slave.
1838 if (!config->cluster_iam_master) {
1839 LOG(0, 0, 0, "I'm not the master, but I got a C_THROTTLE from %s?\n", fmtaddr(addr, 0));
1840 return -1;
1841 }
1842
1843 tbf_queue_packet(more, p, s); // The TBF id tells wether it goes in or out.
1844 return 0;
1845 }
1846 case C_GARDEN:
1847 // Receive a walled garden packet from a slave.
1848 if (!config->cluster_iam_master) {
1849 LOG(0, 0, 0, "I'm not the master, but I got a C_GARDEN from %s?\n", fmtaddr(addr, 0));
1850 return -1;
1851 }
1852
1853 tun_write(p, s);
1854 return 0;
1855
1856 case C_BYTES:
1857 if (!config->cluster_iam_master) {
1858 LOG(0, 0, 0, "I'm not the master, but I got a C_BYTES from %s?\n", fmtaddr(addr, 0));
1859 return -1;
1860 }
1861
1862 return cluster_handle_bytes(p, s);
1863
1864 case C_KILL: // The master asked us to die!? (usually because we're too out of date).
1865 if (config->cluster_iam_master) {
1866 LOG(0, 0, 0, "_I_ am master, but I received a C_KILL from %s! (Seq# %d)\n", fmtaddr(addr, 0), more);
1867 return -1;
1868 }
1869 if (more != config->cluster_seq_number) {
1870 LOG(0, 0, 0, "The master asked us to die but the seq number didn't match!?\n");
1871 return -1;
1872 }
1873
1874 if (addr != config->cluster_master_address) {
1875 LOG(0, 0, 0, "Received a C_KILL from %s which doesn't match config->cluster_master_address (%s)\n",
1876 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1));
1877 // We can only warn about it. The master might really have switched!
1878 }
1879
1880 LOG(0, 0, 0, "Received a valid C_KILL: I'm going to die now.\n");
1881 kill(0, SIGTERM);
1882 exit(0); // Lets be paranoid;
1883 return -1; // Just signalling the compiler.
1884
1885 case C_HEARTBEAT:
1886 LOG(4, 0, 0, "Got a heartbeat from %s\n", fmtaddr(addr, 0));
1887 return cluster_process_heartbeat(data, size, more, p, addr);
1888
1889 default:
1890 LOG(0, 0, 0, "Strange type packet received on cluster socket (%d)\n", type);
1891 return -1;
1892 }
1893 return 0;
1894
1895 shortpacket:
1896 LOG(0, 0, 0, "I got a _short_ cluster heartbeat packet! This means I'm probably out of sync!!\n");
1897 return -1;
1898 }
1899
1900 //====================================================================================================
1901
1902 int cmd_show_cluster(struct cli_def *cli, char *command, char **argv, int argc)
1903 {
1904 int i;
1905
1906 if (CLI_HELP_REQUESTED)
1907 return CLI_HELP_NO_ARGS;
1908
1909 cli_print(cli, "Cluster status : %s", config->cluster_iam_master ? "Master" : "Slave" );
1910 cli_print(cli, "My address : %s", fmtaddr(my_address, 0));
1911 cli_print(cli, "VIP address : %s", fmtaddr(config->bind_address, 0));
1912 cli_print(cli, "Multicast address: %s", fmtaddr(config->cluster_address, 0));
1913 cli_print(cli, "Multicast i'face : %s", config->cluster_interface);
1914
1915 if (!config->cluster_iam_master) {
1916 cli_print(cli, "My master : %s (last heartbeat %.1f seconds old)",
1917 config->cluster_master_address
1918 ? fmtaddr(config->cluster_master_address, 0)
1919 : "Not defined",
1920 0.1 * (TIME - config->cluster_last_hb));
1921 cli_print(cli, "Uptodate : %s", config->cluster_iam_uptodate ? "Yes" : "No");
1922 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1923 cli_print(cli, "Next sequence number expected: %d", config->cluster_seq_number);
1924 cli_print(cli, "%d sessions undefined of %d", config->cluster_undefined_sessions, config->cluster_highest_sessionid);
1925 cli_print(cli, "%d bundles undefined of %d", config->cluster_undefined_bundles, config->cluster_highest_bundleid);
1926 cli_print(cli, "%d tunnels undefined of %d", config->cluster_undefined_tunnels, config->cluster_highest_tunnelid);
1927 } else {
1928 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1929 cli_print(cli, "Next heartbeat # : %d", config->cluster_seq_number);
1930 cli_print(cli, "Highest session : %d", config->cluster_highest_sessionid);
1931 cli_print(cli, "Highest bundle : %d", config->cluster_highest_bundleid);
1932 cli_print(cli, "Highest tunnel : %d", config->cluster_highest_tunnelid);
1933 cli_print(cli, "%d changes queued for sending", config->cluster_num_changes);
1934 }
1935 cli_print(cli, "%d peers.", num_peers);
1936
1937 if (num_peers)
1938 cli_print(cli, "%20s %10s %8s", "Address", "Basetime", "Age");
1939 for (i = 0; i < num_peers; ++i) {
1940 cli_print(cli, "%20s %10u %8d", fmtaddr(peers[i].peer, 0),
1941 peers[i].basetime, TIME - peers[i].timestamp);
1942 }
1943 return CLI_OK;
1944 }
1945
1946 //
1947 // Simple run-length-encoding compression.
1948 // Format is
1949 // 1 byte < 128 = count of non-zero bytes following. // Not legal to be zero.
1950 // n non-zero bytes;
1951 // or
1952 // 1 byte > 128 = (count - 128) run of zero bytes. //
1953 // repeat.
1954 // count == 0 indicates end of compressed stream.
1955 //
1956 // Compress from 'src' into 'dst'. return number of bytes
1957 // used from 'dst'.
1958 // Updates *src_p to indicate 1 past last bytes used.
1959 //
1960 // We could get an extra byte in the zero runs by storing (count-1)
1961 // but I'm playing it safe.
1962 //
1963 // Worst case is a 50% expansion in space required (trying to
1964 // compress { 0x00, 0x01 } * N )
1965 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
1966 {
1967 int count;
1968 int orig_dsize = dsize;
1969 uint8_t *x, *src;
1970 src = *src_p;
1971
1972 while (ssize > 0 && dsize > 2) {
1973 count = 0;
1974 x = dst++; --dsize; // Reserve space for count byte..
1975
1976 if (*src) { // Copy a run of non-zero bytes.
1977 while (*src && count < 127 && ssize > 0 && dsize > 1) { // Count number of non-zero bytes.
1978 *dst++ = *src++;
1979 --dsize; --ssize;
1980 ++count;
1981 }
1982 *x = count; // Store number of non-zero bytes. Guarenteed to be non-zero!
1983
1984 } else { // Compress a run of zero bytes.
1985 while (*src == 0 && count < 127 && ssize > 0) {
1986 ++src;
1987 --ssize;
1988 ++count;
1989 }
1990 *x = count | 0x80 ;
1991 }
1992 }
1993
1994 *dst++ = 0x0; // Add Stop byte.
1995 --dsize;
1996
1997 *src_p = src;
1998 return (orig_dsize - dsize);
1999 }
2000
2001 //
2002 // Decompress the buffer into **p.
2003 // 'psize' is the size of the decompression buffer available.
2004 //
2005 // Returns the number of bytes decompressed.
2006 //
2007 // Decompresses from '*src_p' into 'dst'.
2008 // Return the number of dst bytes used.
2009 // Updates the 'src_p' pointer to point to the
2010 // first un-used byte.
2011 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
2012 {
2013 int count;
2014 int orig_dsize = dsize;
2015 uint8_t *src = *src_p;
2016
2017 while (ssize >0 && dsize > 0) { // While there's more to decompress, and there's room in the decompress buffer...
2018 count = *src++; --ssize; // get the count byte from the source.
2019 if (count == 0x0) // End marker reached? If so, finish.
2020 break;
2021
2022 if (count & 0x80) { // Decompress a run of zeros
2023 for (count &= 0x7f ; count > 0 && dsize > 0; --count) {
2024 *dst++ = 0x0;
2025 --dsize;
2026 }
2027 } else { // Copy run of non-zero bytes.
2028 for ( ; count > 0 && ssize && dsize; --count) { // Copy non-zero bytes across.
2029 *dst++ = *src++;
2030 --ssize; --dsize;
2031 }
2032 }
2033 }
2034 *src_p = src;
2035 return (orig_dsize - dsize);
2036 }