Add a debian/changelog entry for version 2.2.1-1fdn3
[l2tpns.git] / cluster.c
1 // L2TPNS Clustering Stuff
2
3 char const *cvs_id_cluster = "$Id: cluster.c,v 1.55 2009/12/08 14:49:28 bodea Exp $";
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdarg.h>
8 #include <unistd.h>
9 #include <inttypes.h>
10 #include <sys/file.h>
11 #include <sys/stat.h>
12 #include <sys/socket.h>
13 #include <netinet/in.h>
14 #include <arpa/inet.h>
15 #include <sys/ioctl.h>
16 #include <net/if.h>
17 #include <string.h>
18 #include <malloc.h>
19 #include <errno.h>
20 #include <libcli.h>
21
22 #include "l2tpns.h"
23 #include "cluster.h"
24 #include "util.h"
25 #include "tbf.h"
26
27 #ifdef BGP
28 #include "bgp.h"
29 #endif
30 /*
31 * All cluster packets have the same format.
32 *
33 * One or more instances of
34 * a 32 bit 'type' id.
35 * a 32 bit 'extra' data dependant on the 'type'.
36 * zero or more bytes of structure data, dependant on the type.
37 *
38 */
39
40 // Module variables.
41 extern int cluster_sockfd; // The filedescriptor for the cluster communications port.
42
43 in_addr_t my_address = 0; // The network address of my ethernet port.
44 static int walk_session_number = 0; // The next session to send when doing the slow table walk.
45 static int walk_bundle_number = 0; // The next bundle to send when doing the slow table walk.
46 static int walk_tunnel_number = 0; // The next tunnel to send when doing the slow table walk.
47 int forked = 0; // Sanity check: CLI must not diddle with heartbeat table
48
49 #define MAX_HEART_SIZE (8192) // Maximum size of heartbeat packet. Must be less than max IP packet size :)
50 #define MAX_CHANGES (MAX_HEART_SIZE/(sizeof(sessiont) + sizeof(int) ) - 2) // Assumes a session is the biggest type!
51
52 static struct {
53 int type;
54 int id;
55 } cluster_changes[MAX_CHANGES]; // Queue of changed structures that need to go out when next heartbeat.
56
57 static struct {
58 int seq;
59 int size;
60 uint8_t data[MAX_HEART_SIZE];
61 } past_hearts[HB_HISTORY_SIZE]; // Ring buffer of heartbeats that we've recently sent out. Needed so
62 // we can re-transmit if needed.
63
64 static struct {
65 in_addr_t peer;
66 uint32_t basetime;
67 clockt timestamp;
68 int uptodate;
69 } peers[CLUSTER_MAX_SIZE]; // List of all the peers we've heard from.
70 static int num_peers; // Number of peers in list.
71
72 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
73 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
74
75 //
76 // Create a listening socket
77 //
78 // This joins the cluster multi-cast group.
79 //
80 int cluster_init()
81 {
82 struct sockaddr_in addr;
83 struct sockaddr_in interface_addr;
84 struct ip_mreq mreq;
85 struct ifreq ifr;
86 int opt;
87
88 config->cluster_undefined_sessions = MAXSESSION-1;
89 config->cluster_undefined_bundles = MAXBUNDLE-1;
90 config->cluster_undefined_tunnels = MAXTUNNEL-1;
91
92 if (!config->cluster_address)
93 return 0;
94 if (!*config->cluster_interface)
95 return 0;
96
97 cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
98
99 memset(&addr, 0, sizeof(addr));
100 addr.sin_family = AF_INET;
101 addr.sin_port = htons(CLUSTERPORT);
102 addr.sin_addr.s_addr = INADDR_ANY;
103 setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
104
105 opt = fcntl(cluster_sockfd, F_GETFL, 0);
106 fcntl(cluster_sockfd, F_SETFL, opt | O_NONBLOCK);
107
108 if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
109 {
110 LOG(0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
111 return -1;
112 }
113
114 strcpy(ifr.ifr_name, config->cluster_interface);
115 if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0)
116 {
117 LOG(0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
118 return -1;
119 }
120
121 memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr));
122 my_address = interface_addr.sin_addr.s_addr;
123
124 // Join multicast group.
125 mreq.imr_multiaddr.s_addr = config->cluster_address;
126 mreq.imr_interface = interface_addr.sin_addr;
127
128
129 opt = 0; // Turn off multicast loopback.
130 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
131
132 if (config->cluster_mcast_ttl != 1)
133 {
134 uint8_t ttl = 0;
135 if (config->cluster_mcast_ttl > 0)
136 ttl = config->cluster_mcast_ttl < 256 ? config->cluster_mcast_ttl : 255;
137
138 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl));
139 }
140
141 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0)
142 {
143 LOG(0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
144 return -1;
145 }
146
147 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0)
148 {
149 LOG(0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
150 return -1;
151 }
152
153 config->cluster_last_hb = TIME;
154 config->cluster_seq_number = -1;
155
156 return cluster_sockfd;
157 }
158
159
160 //
161 // Send a chunk of data to the entire cluster (usually via the multicast
162 // address ).
163 //
164
165 static int cluster_send_data(void *data, int datalen)
166 {
167 struct sockaddr_in addr = {0};
168
169 if (!cluster_sockfd) return -1;
170 if (!config->cluster_address) return 0;
171
172 addr.sin_addr.s_addr = config->cluster_address;
173 addr.sin_port = htons(CLUSTERPORT);
174 addr.sin_family = AF_INET;
175
176 LOG(5, 0, 0, "Cluster send data: %d bytes\n", datalen);
177
178 if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
179 {
180 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
181 return -1;
182 }
183
184 return 0;
185 }
186
187 //
188 // Add a chunk of data to a heartbeat packet.
189 // Maintains the format. Assumes that the caller
190 // has passed in a big enough buffer!
191 //
192 static void add_type(uint8_t **p, int type, int more, uint8_t *data, int size)
193 {
194 *((uint32_t *) (*p)) = type;
195 *p += sizeof(uint32_t);
196
197 *((uint32_t *)(*p)) = more;
198 *p += sizeof(uint32_t);
199
200 if (data && size > 0) {
201 memcpy(*p, data, size);
202 *p += size;
203 }
204 }
205
206 // advertise our presence via BGP or gratuitous ARP
207 static void advertise_routes(void)
208 {
209 #ifdef BGP
210 if (bgp_configured)
211 bgp_enable_routing(1);
212 else
213 #endif /* BGP */
214 if (config->send_garp)
215 send_garp(config->bind_address); // Start taking traffic.
216 }
217
218 // withdraw our routes (BGP only)
219 static void withdraw_routes(void)
220 {
221 #ifdef BGP
222 if (bgp_configured)
223 bgp_enable_routing(0);
224 #endif /* BGP */
225 }
226
227 static void cluster_uptodate(void)
228 {
229 if (config->cluster_iam_uptodate)
230 return;
231
232 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles)
233 return;
234
235 config->cluster_iam_uptodate = 1;
236
237 LOG(0, 0, 0, "Now uptodate with master.\n");
238 advertise_routes();
239 }
240
241 //
242 // Send a unicast UDP packet to a peer with 'data' as the
243 // contents.
244 //
245 static int peer_send_data(in_addr_t peer, uint8_t *data, int size)
246 {
247 struct sockaddr_in addr = {0};
248
249 if (!cluster_sockfd) return -1;
250 if (!config->cluster_address) return 0;
251
252 if (!peer) // Odd??
253 return -1;
254
255 addr.sin_addr.s_addr = peer;
256 addr.sin_port = htons(CLUSTERPORT);
257 addr.sin_family = AF_INET;
258
259 LOG_HEX(5, "Peer send", data, size);
260
261 if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
262 {
263 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
264 return -1;
265 }
266
267 return 0;
268 }
269
270 //
271 // Send a structured message to a peer with a single element of type 'type'.
272 //
273 static int peer_send_message(in_addr_t peer, int type, int more, uint8_t *data, int size)
274 {
275 uint8_t buf[65536]; // Vast overkill.
276 uint8_t *p = buf;
277
278 LOG(4, 0, 0, "Sending message to peer (type %d, more %d, size %d)\n", type, more, size);
279 add_type(&p, type, more, data, size);
280
281 return peer_send_data(peer, buf, (p-buf) );
282 }
283
284 // send a packet to the master
285 static int _forward_packet(uint8_t *data, int size, in_addr_t addr, int port, int type)
286 {
287 uint8_t buf[65536]; // Vast overkill.
288 uint8_t *p = buf;
289
290 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
291 return -1;
292
293 LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
294
295 STAT(c_forwarded);
296 add_type(&p, type, addr, (uint8_t *) &port, sizeof(port)); // ick. should be uint16_t
297 memcpy(p, data, size);
298 p += size;
299
300 return peer_send_data(config->cluster_master_address, buf, (p - buf));
301 }
302
303 //
304 // Forward a state changing packet to the master.
305 //
306 // The master just processes the payload as if it had
307 // received it off the tun device.
308 //
309 int master_forward_packet(uint8_t *data, int size, in_addr_t addr, int port)
310 {
311 return _forward_packet(data, size, addr, port, C_FORWARD);
312 }
313
314 // Forward a DAE RADIUS packet to the master.
315 int master_forward_dae_packet(uint8_t *data, int size, in_addr_t addr, int port)
316 {
317 return _forward_packet(data, size, addr, port, C_FORWARD_DAE);
318 }
319
320 //
321 // Forward a throttled packet to the master for handling.
322 //
323 // The master just drops the packet into the appropriate
324 // token bucket queue, and lets normal processing take care
325 // of it.
326 //
327 int master_throttle_packet(int tbfid, uint8_t *data, int size)
328 {
329 uint8_t buf[65536]; // Vast overkill.
330 uint8_t *p = buf;
331
332 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
333 return -1;
334
335 LOG(4, 0, 0, "Throttling packet master (size %d, tbfid %d)\n", size, tbfid);
336
337 add_type(&p, C_THROTTLE, tbfid, data, size);
338
339 return peer_send_data(config->cluster_master_address, buf, (p-buf) );
340
341 }
342
343 //
344 // Forward a walled garden packet to the master for handling.
345 //
346 // The master just writes the packet straight to the tun
347 // device (where is will normally loop through the
348 // firewall rules, and come back in on the tun device)
349 //
350 // (Note that this must be called with the tun header
351 // as the start of the data).
352 int master_garden_packet(sessionidt s, uint8_t *data, int size)
353 {
354 uint8_t buf[65536]; // Vast overkill.
355 uint8_t *p = buf;
356
357 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
358 return -1;
359
360 LOG(4, 0, 0, "Walled garden packet to master (size %d)\n", size);
361
362 add_type(&p, C_GARDEN, s, data, size);
363
364 return peer_send_data(config->cluster_master_address, buf, (p-buf));
365
366 }
367
368 //
369 // Send a chunk of data as a heartbeat..
370 // We save it in the history buffer as we do so.
371 //
372 static void send_heartbeat(int seq, uint8_t *data, int size)
373 {
374 int i;
375
376 if (size > sizeof(past_hearts[0].data))
377 {
378 LOG(0, 0, 0, "Tried to heartbeat something larger than the maximum packet!\n");
379 kill(0, SIGTERM);
380 exit(1);
381 }
382 i = seq % HB_HISTORY_SIZE;
383 past_hearts[i].seq = seq;
384 past_hearts[i].size = size;
385 memcpy(&past_hearts[i].data, data, size); // Save it.
386 cluster_send_data(data, size);
387 }
388
389 //
390 // Send an 'i am alive' message to every machine in the cluster.
391 //
392 void cluster_send_ping(time_t basetime)
393 {
394 uint8_t buff[100 + sizeof(pingt)];
395 uint8_t *p = buff;
396 pingt x;
397
398 if (config->cluster_iam_master && basetime) // We're heartbeating so no need to ping.
399 return;
400
401 LOG(5, 0, 0, "Sending cluster ping...\n");
402
403 x.ver = 1;
404 x.addr = config->bind_address;
405 x.undef = config->cluster_undefined_sessions + config->cluster_undefined_tunnels + config->cluster_undefined_bundles;
406 x.basetime = basetime;
407
408 add_type(&p, C_PING, basetime, (uint8_t *) &x, sizeof(x));
409 cluster_send_data(buff, (p-buff) );
410 }
411
412 //
413 // Walk the session counters looking for non-zero ones to send
414 // to the master. We send up to 600 of them at one time.
415 // We examine a maximum of 3000 sessions.
416 // (50k max session should mean that we normally
417 // examine the entire session table every 25 seconds).
418
419 #define MAX_B_RECS (600)
420 void master_update_counts(void)
421 {
422 int i, c;
423 bytest b[MAX_B_RECS+1];
424
425 if (config->cluster_iam_master) // Only happens on the slaves.
426 return;
427
428 if (!config->cluster_master_address) // If we don't have a master, skip it for a while.
429 return;
430
431 // C_BYTES format changed in 2.1.0 (cluster version 5)
432 // during upgrade from previous versions, hang onto our counters
433 // for a bit until the new master comes up
434 if (config->cluster_last_hb_ver < 5)
435 return;
436
437 i = MAX_B_RECS * 5; // Examine max 3000 sessions;
438 if (config->cluster_highest_sessionid > i)
439 i = config->cluster_highest_sessionid;
440
441 for ( c = 0; i > 0 ; --i) {
442 // Next session to look at.
443 walk_session_number++;
444 if ( walk_session_number > config->cluster_highest_sessionid)
445 walk_session_number = 1;
446
447 if (!sess_local[walk_session_number].cin && !sess_local[walk_session_number].cout)
448 continue; // Unchanged. Skip it.
449
450 b[c].sid = walk_session_number;
451 b[c].pin = sess_local[walk_session_number].pin;
452 b[c].pout = sess_local[walk_session_number].pout;
453 b[c].cin = sess_local[walk_session_number].cin;
454 b[c].cout = sess_local[walk_session_number].cout;
455
456 // Reset counters.
457 sess_local[walk_session_number].pin = sess_local[walk_session_number].pout = 0;
458 sess_local[walk_session_number].cin = sess_local[walk_session_number].cout = 0;
459
460 if (++c > MAX_B_RECS) // Send a max of 600 elements in a packet.
461 break;
462 }
463
464 if (!c) // Didn't find any that changes. Get out of here!
465 return;
466
467
468 // Forward the data to the master.
469 LOG(4, 0, 0, "Sending byte counters to master (%d elements)\n", c);
470 peer_send_message(config->cluster_master_address, C_BYTES, c, (uint8_t *) &b, sizeof(b[0]) * c);
471 return;
472 }
473
474 //
475 // On the master, check how our slaves are going. If
476 // one of them's not up-to-date we'll heartbeat faster.
477 // If we don't have any of them, then we need to turn
478 // on our own packet handling!
479 //
480 void cluster_check_slaves(void)
481 {
482 int i;
483 static int have_peers = 0;
484 int had_peers = have_peers;
485 clockt t = TIME;
486
487 if (!config->cluster_iam_master)
488 return; // Only runs on the master...
489
490 config->cluster_iam_uptodate = 1; // cleared in loop below
491
492 for (i = have_peers = 0; i < num_peers; i++)
493 {
494 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
495 continue; // Stale peer! Skip them.
496
497 if (!peers[i].basetime)
498 continue; // Shutdown peer! Skip them.
499
500 if (peers[i].uptodate)
501 have_peers++;
502 else
503 config->cluster_iam_uptodate = 0; // Start fast heartbeats
504 }
505
506 // in a cluster, withdraw/add routes when we get a peer/lose peers
507 if (have_peers != had_peers)
508 {
509 if (had_peers < config->cluster_master_min_adv &&
510 have_peers >= config->cluster_master_min_adv)
511 withdraw_routes();
512
513 else if (had_peers >= config->cluster_master_min_adv &&
514 have_peers < config->cluster_master_min_adv)
515 advertise_routes();
516 }
517 }
518
519 //
520 // Check that we have a master. If it's been too
521 // long since we heard from a master then hold an election.
522 //
523 void cluster_check_master(void)
524 {
525 int i, count, high_unique_id = 0;
526 int last_free = 0;
527 clockt t = TIME;
528 static int probed = 0;
529 int have_peers;
530
531 if (config->cluster_iam_master)
532 return; // Only runs on the slaves...
533
534 // If the master is late (missed 2 hearbeats by a second and a
535 // hair) it may be that the switch has dropped us from the
536 // multicast group, try unicasting probes to the master
537 // which will hopefully respond with a unicast heartbeat that
538 // will allow us to limp along until the querier next runs.
539 if (config->cluster_master_address
540 && TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
541 {
542 if (!probed || (TIME > (probed + 2 * config->cluster_hb_interval)))
543 {
544 probed = TIME;
545 LOG(1, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
546 0.1 * (TIME - (config->cluster_last_hb + config->cluster_hb_interval)));
547
548 peer_send_message(config->cluster_master_address,
549 C_LASTSEEN, config->cluster_seq_number, NULL, 0);
550 }
551 } else { // We got a recent heartbeat; reset the probe flag.
552 probed = 0;
553 }
554
555 if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
556 return; // Everything's ok!
557
558 config->cluster_last_hb = TIME + 1; // Just the one election thanks.
559 config->cluster_master_address = 0;
560
561 LOG(0, 0, 0, "Master timed out! Holding election...\n");
562
563 // In the process of shutting down, can't be master
564 if (main_quit)
565 return;
566
567 for (i = have_peers = 0; i < num_peers; i++)
568 {
569 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
570 continue; // Stale peer! Skip them.
571
572 if (!peers[i].basetime)
573 continue; // Shutdown peer! Skip them.
574
575 if (peers[i].basetime < basetime) {
576 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
577 return; // They'll win the election. Get out of here.
578 }
579
580 if (peers[i].basetime == basetime &&
581 peers[i].peer > my_address) {
582 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
583 return; // They'll win the election. Wait for them to come up.
584 }
585
586 if (peers[i].uptodate)
587 have_peers++;
588 }
589
590 // Wow. it's been ages since I last heard a heartbeat
591 // and I'm better than an of my peers so it's time
592 // to become a master!!!
593
594 config->cluster_iam_master = 1;
595
596 LOG(0, 0, 0, "I am declaring myself the master!\n");
597
598 if (have_peers < config->cluster_master_min_adv)
599 advertise_routes();
600 else
601 withdraw_routes();
602
603 if (config->cluster_seq_number == -1)
604 config->cluster_seq_number = 0;
605
606 //
607 // Go through and mark all the tunnels as defined.
608 // Count the highest used tunnel number as well.
609 //
610 config->cluster_highest_tunnelid = 0;
611 for (i = 0; i < MAXTUNNEL; ++i) {
612 if (tunnel[i].state == TUNNELUNDEF)
613 tunnel[i].state = TUNNELFREE;
614
615 if (tunnel[i].state != TUNNELFREE && i > config->cluster_highest_tunnelid)
616 config->cluster_highest_tunnelid = i;
617 }
618
619 //
620 // Go through and mark all the bundles as defined.
621 // Count the highest used bundle number as well.
622 //
623 config->cluster_highest_bundleid = 0;
624 for (i = 0; i < MAXBUNDLE; ++i) {
625 if (bundle[i].state == BUNDLEUNDEF)
626 bundle[i].state = BUNDLEFREE;
627
628 if (bundle[i].state != BUNDLEFREE && i > config->cluster_highest_bundleid)
629 config->cluster_highest_bundleid = i;
630 }
631
632 //
633 // Go through and mark all the sessions as being defined.
634 // reset the idle timeouts.
635 // add temporary byte counters to permanent ones.
636 // Re-string the free list.
637 // Find the ID of the highest session.
638 last_free = 0;
639 high_unique_id = 0;
640 config->cluster_highest_sessionid = 0;
641 for (i = 0, count = 0; i < MAXSESSION; ++i) {
642 if (session[i].tunnel == T_UNDEF) {
643 session[i].tunnel = T_FREE;
644 ++count;
645 }
646
647 if (!session[i].opened) { // Unused session. Add to free list.
648 memset(&session[i], 0, sizeof(session[i]));
649 session[i].tunnel = T_FREE;
650 session[last_free].next = i;
651 session[i].next = 0;
652 last_free = i;
653 continue;
654 }
655
656 // Reset idle timeouts..
657 session[i].last_packet = session[i].last_data = time_now;
658
659 // Reset die relative to our uptime rather than the old master's
660 if (session[i].die) session[i].die = TIME;
661
662 // Accumulate un-sent byte/packet counters.
663 increment_counter(&session[i].cin, &session[i].cin_wrap, sess_local[i].cin);
664 increment_counter(&session[i].cout, &session[i].cout_wrap, sess_local[i].cout);
665 session[i].cin_delta += sess_local[i].cin;
666 session[i].cout_delta += sess_local[i].cout;
667
668 session[i].pin += sess_local[i].pin;
669 session[i].pout += sess_local[i].pout;
670
671 sess_local[i].cin = sess_local[i].cout = 0;
672 sess_local[i].pin = sess_local[i].pout = 0;
673
674 sess_local[i].radius = 0; // Reset authentication as the radius blocks aren't up to date.
675
676 if (session[i].unique_id >= high_unique_id) // This is different to the index into the session table!!!
677 high_unique_id = session[i].unique_id+1;
678
679 session[i].tbf_in = session[i].tbf_out = 0; // Remove stale pointers from old master.
680 throttle_session(i, session[i].throttle_in, session[i].throttle_out);
681
682 config->cluster_highest_sessionid = i;
683 }
684
685 session[last_free].next = 0; // End of chain.
686 last_id = high_unique_id; // Keep track of the highest used session ID.
687
688 become_master();
689
690 rebuild_address_pool();
691
692 // If we're not the very first master, this is a big issue!
693 if (count > 0)
694 LOG(0, 0, 0, "Warning: Fixed %d uninitialized sessions in becoming master!\n", count);
695
696 config->cluster_undefined_sessions = 0;
697 config->cluster_undefined_bundles = 0;
698 config->cluster_undefined_tunnels = 0;
699 config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
700
701 // FIXME. We need to fix up the tunnel control message
702 // queue here! There's a number of other variables we
703 // should also update.
704 }
705
706
707 //
708 // Check that our session table is validly matching what the
709 // master has in mind.
710 //
711 // In particular, if we have too many sessions marked 'undefined'
712 // we fix it up here, and we ensure that the 'first free session'
713 // pointer is valid.
714 //
715 static void cluster_check_sessions(int highsession, int freesession_ptr, int highbundle, int hightunnel)
716 {
717 int i;
718
719 sessionfree = freesession_ptr; // Keep the freesession ptr valid.
720
721 if (config->cluster_iam_uptodate)
722 return;
723
724 if (highsession > config->cluster_undefined_sessions && highbundle > config->cluster_undefined_bundles && hightunnel > config->cluster_undefined_tunnels)
725 return;
726
727 // Clear out defined sessions, counting the number of
728 // undefs remaining.
729 config->cluster_undefined_sessions = 0;
730 for (i = 1 ; i < MAXSESSION; ++i) {
731 if (i > highsession) {
732 if (session[i].tunnel == T_UNDEF) session[i].tunnel = T_FREE; // Defined.
733 continue;
734 }
735
736 if (session[i].tunnel == T_UNDEF)
737 ++config->cluster_undefined_sessions;
738 }
739
740 // Clear out defined bundles, counting the number of
741 // undefs remaining.
742 config->cluster_undefined_bundles = 0;
743 for (i = 1 ; i < MAXBUNDLE; ++i) {
744 if (i > highbundle) {
745 if (bundle[i].state == BUNDLEUNDEF) bundle[i].state = BUNDLEFREE; // Defined.
746 continue;
747 }
748
749 if (bundle[i].state == BUNDLEUNDEF)
750 ++config->cluster_undefined_bundles;
751 }
752
753 // Clear out defined tunnels, counting the number of
754 // undefs remaining.
755 config->cluster_undefined_tunnels = 0;
756 for (i = 1 ; i < MAXTUNNEL; ++i) {
757 if (i > hightunnel) {
758 if (tunnel[i].state == TUNNELUNDEF) tunnel[i].state = TUNNELFREE; // Defined.
759 continue;
760 }
761
762 if (tunnel[i].state == TUNNELUNDEF)
763 ++config->cluster_undefined_tunnels;
764 }
765
766
767 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles) {
768 LOG(2, 0, 0, "Cleared undefined sessions/bundles/tunnels. %d sess (high %d), %d bund (high %d), %d tunn (high %d)\n",
769 config->cluster_undefined_sessions, highsession, config->cluster_undefined_bundles, highbundle, config->cluster_undefined_tunnels, hightunnel);
770 return;
771 }
772
773 // Are we up to date?
774
775 if (!config->cluster_iam_uptodate)
776 cluster_uptodate();
777 }
778
779 static int hb_add_type(uint8_t **p, int type, int id)
780 {
781 switch (type) {
782 case C_CSESSION: { // Compressed C_SESSION.
783 uint8_t c[sizeof(sessiont) * 2]; // Bigger than worst case.
784 uint8_t *d = (uint8_t *) &session[id];
785 uint8_t *orig = d;
786 int size;
787
788 size = rle_compress( &d, sizeof(sessiont), c, sizeof(c) );
789
790 // Did we compress the full structure, and is the size actually
791 // reduced??
792 if ( (d - orig) == sizeof(sessiont) && size < sizeof(sessiont) ) {
793 add_type(p, C_CSESSION, id, c, size);
794 break;
795 }
796 // Failed to compress : Fall through.
797 }
798 case C_SESSION:
799 add_type(p, C_SESSION, id, (uint8_t *) &session[id], sizeof(sessiont));
800 break;
801
802 case C_CBUNDLE: { // Compressed C_BUNDLE
803 uint8_t c[sizeof(bundlet) * 2]; // Bigger than worst case.
804 uint8_t *d = (uint8_t *) &bundle[id];
805 uint8_t *orig = d;
806 int size;
807
808 size = rle_compress( &d, sizeof(bundlet), c, sizeof(c) );
809
810 // Did we compress the full structure, and is the size actually
811 // reduced??
812 if ( (d - orig) == sizeof(bundlet) && size < sizeof(bundlet) ) {
813 add_type(p, C_CBUNDLE, id, c, size);
814 break;
815 }
816 // Failed to compress : Fall through.
817 }
818
819 case C_BUNDLE:
820 add_type(p, C_BUNDLE, id, (uint8_t *) &bundle[id], sizeof(bundlet));
821 break;
822
823 case C_CTUNNEL: { // Compressed C_TUNNEL
824 uint8_t c[sizeof(tunnelt) * 2]; // Bigger than worst case.
825 uint8_t *d = (uint8_t *) &tunnel[id];
826 uint8_t *orig = d;
827 int size;
828
829 size = rle_compress( &d, sizeof(tunnelt), c, sizeof(c) );
830
831 // Did we compress the full structure, and is the size actually
832 // reduced??
833 if ( (d - orig) == sizeof(tunnelt) && size < sizeof(tunnelt) ) {
834 add_type(p, C_CTUNNEL, id, c, size);
835 break;
836 }
837 // Failed to compress : Fall through.
838 }
839 case C_TUNNEL:
840 add_type(p, C_TUNNEL, id, (uint8_t *) &tunnel[id], sizeof(tunnelt));
841 break;
842 default:
843 LOG(0, 0, 0, "Found an invalid type in heart queue! (%d)\n", type);
844 kill(0, SIGTERM);
845 exit(1);
846 }
847 return 0;
848 }
849
850 //
851 // Send a heartbeat, incidently sending out any queued changes..
852 //
853 void cluster_heartbeat()
854 {
855 int i, count = 0, tcount = 0, bcount = 0;
856 uint8_t buff[MAX_HEART_SIZE + sizeof(heartt) + sizeof(int) ];
857 heartt h;
858 uint8_t *p = buff;
859
860 if (!config->cluster_iam_master) // Only the master does this.
861 return;
862
863 config->cluster_table_version += config->cluster_num_changes;
864
865 // Fill out the heartbeat header.
866 memset(&h, 0, sizeof(h));
867
868 h.version = HB_VERSION;
869 h.seq = config->cluster_seq_number;
870 h.basetime = basetime;
871 h.clusterid = config->bind_address; // Will this do??
872 h.basetime = basetime;
873 h.highsession = config->cluster_highest_sessionid;
874 h.freesession = sessionfree;
875 h.hightunnel = config->cluster_highest_tunnelid;
876 h.highbundle = config->cluster_highest_bundleid;
877 h.size_sess = sizeof(sessiont); // Just in case.
878 h.size_bund = sizeof(bundlet);
879 h.size_tunn = sizeof(tunnelt);
880 h.interval = config->cluster_hb_interval;
881 h.timeout = config->cluster_hb_timeout;
882 h.table_version = config->cluster_table_version;
883
884 add_type(&p, C_HEARTBEAT, HB_VERSION, (uint8_t *) &h, sizeof(h));
885
886 for (i = 0; i < config->cluster_num_changes; ++i) {
887 hb_add_type(&p, cluster_changes[i].type, cluster_changes[i].id);
888 }
889
890 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
891 LOG(0, 0, 0, "FATAL: Overran the heartbeat buffer! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
892 kill(0, SIGTERM);
893 exit(1);
894 }
895
896 //
897 // Fill out the packet with sessions from the session table...
898 // (not forgetting to leave space so we can get some tunnels in too )
899 while ( (p + sizeof(uint32_t) * 2 + sizeof(sessiont) * 2 ) < (buff + MAX_HEART_SIZE) ) {
900
901 if (!walk_session_number) // session #0 isn't valid.
902 ++walk_session_number;
903
904 if (count >= config->cluster_highest_sessionid) // If we're a small cluster, don't go wild.
905 break;
906
907 hb_add_type(&p, C_CSESSION, walk_session_number);
908 walk_session_number = (1+walk_session_number)%(config->cluster_highest_sessionid+1); // +1 avoids divide by zero.
909
910 ++count; // Count the number of extra sessions we're sending.
911 }
912
913 //
914 // Fill out the packet with tunnels from the tunnel table...
915 // This effectively means we walk the tunnel table more quickly
916 // than the session table. This is good because stuffing up a
917 // tunnel is a much bigger deal than stuffing up a session.
918 //
919 while ( (p + sizeof(uint32_t) * 2 + sizeof(tunnelt) ) < (buff + MAX_HEART_SIZE) ) {
920
921 if (!walk_tunnel_number) // tunnel #0 isn't valid.
922 ++walk_tunnel_number;
923
924 if (tcount >= config->cluster_highest_tunnelid)
925 break;
926
927 hb_add_type(&p, C_CTUNNEL, walk_tunnel_number);
928 walk_tunnel_number = (1+walk_tunnel_number)%(config->cluster_highest_tunnelid+1); // +1 avoids divide by zero.
929
930 ++tcount;
931 }
932
933 //
934 // Fill out the packet with bundles from the bundle table...
935 while ( (p + sizeof(uint32_t) * 2 + sizeof(bundlet) ) < (buff + MAX_HEART_SIZE) ) {
936
937 if (!walk_bundle_number) // bundle #0 isn't valid.
938 ++walk_bundle_number;
939
940 if (bcount >= config->cluster_highest_bundleid)
941 break;
942
943 hb_add_type(&p, C_CBUNDLE, walk_bundle_number);
944 walk_bundle_number = (1+walk_bundle_number)%(config->cluster_highest_bundleid+1); // +1 avoids divide by zero.
945 ++bcount;
946 }
947
948 //
949 // Did we do something wrong?
950 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
951 LOG(0, 0, 0, "Overran the heartbeat buffer now! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
952 kill(0, SIGTERM);
953 exit(1);
954 }
955
956 LOG(3, 0, 0, "Sending v%d heartbeat #%d, change #%" PRIu64 " with %d changes "
957 "(%d x-sess, %d x-bundles, %d x-tunnels, %d highsess, %d highbund, %d hightun, size %d)\n",
958 HB_VERSION, h.seq, h.table_version, config->cluster_num_changes,
959 count, bcount, tcount, config->cluster_highest_sessionid, config->cluster_highest_bundleid,
960 config->cluster_highest_tunnelid, (int) (p - buff));
961
962 config->cluster_num_changes = 0;
963
964 send_heartbeat(h.seq, buff, (p-buff) ); // Send out the heartbeat to the cluster, keeping a copy of it.
965
966 config->cluster_seq_number = (config->cluster_seq_number+1)%HB_MAX_SEQ; // Next seq number to use.
967 }
968
969 //
970 // A structure of type 'type' has changed; Add it to the queue to send.
971 //
972 static int type_changed(int type, int id)
973 {
974 int i;
975
976 for (i = 0 ; i < config->cluster_num_changes ; ++i)
977 if ( cluster_changes[i].id == id &&
978 cluster_changes[i].type == type)
979 return 0; // Already marked for change.
980
981 cluster_changes[i].type = type;
982 cluster_changes[i].id = id;
983 ++config->cluster_num_changes;
984
985 if (config->cluster_num_changes > MAX_CHANGES)
986 cluster_heartbeat(); // flush now
987
988 return 1;
989 }
990
991
992 // A particular session has been changed!
993 int cluster_send_session(int sid)
994 {
995 if (!config->cluster_iam_master) {
996 LOG(0, sid, 0, "I'm not a master, but I just tried to change a session!\n");
997 return -1;
998 }
999
1000 if (forked) {
1001 LOG(0, sid, 0, "cluster_send_session called from child process!\n");
1002 return -1;
1003 }
1004
1005 return type_changed(C_CSESSION, sid);
1006 }
1007
1008 // A particular bundle has been changed!
1009 int cluster_send_bundle(int bid)
1010 {
1011 if (!config->cluster_iam_master) {
1012 LOG(0, 0, bid, "I'm not a master, but I just tried to change a bundle!\n");
1013 return -1;
1014 }
1015
1016 return type_changed(C_CBUNDLE, bid);
1017 }
1018
1019 // A particular tunnel has been changed!
1020 int cluster_send_tunnel(int tid)
1021 {
1022 if (!config->cluster_iam_master) {
1023 LOG(0, 0, tid, "I'm not a master, but I just tried to change a tunnel!\n");
1024 return -1;
1025 }
1026
1027 return type_changed(C_CTUNNEL, tid);
1028 }
1029
1030
1031 //
1032 // We're a master, and a slave has just told us that it's
1033 // missed a packet. We'll resend it every packet since
1034 // the last one it's seen.
1035 //
1036 static int cluster_catchup_slave(int seq, in_addr_t slave)
1037 {
1038 int s;
1039 int diff;
1040
1041 LOG(1, 0, 0, "Slave %s sent LASTSEEN with seq %d\n", fmtaddr(slave, 0), seq);
1042 if (!config->cluster_iam_master) {
1043 LOG(1, 0, 0, "Got LASTSEEN but I'm not a master! Redirecting it to %s.\n",
1044 fmtaddr(config->cluster_master_address, 0));
1045
1046 peer_send_message(slave, C_MASTER, config->cluster_master_address, NULL, 0);
1047 return 0;
1048 }
1049
1050 diff = config->cluster_seq_number - seq; // How many packet do we need to send?
1051 if (diff < 0)
1052 diff += HB_MAX_SEQ;
1053
1054 if (diff >= HB_HISTORY_SIZE) { // Ouch. We don't have the packet to send it!
1055 LOG(0, 0, 0, "A slave asked for message %d when our seq number is %d. Killing it.\n",
1056 seq, config->cluster_seq_number);
1057 return peer_send_message(slave, C_KILL, seq, NULL, 0);// Kill the slave. Nothing else to do.
1058 }
1059
1060 LOG(1, 0, 0, "Sending %d catchup packets to slave %s\n", diff, fmtaddr(slave, 0) );
1061
1062 // Now resend every packet that it missed, in order.
1063 while (seq != config->cluster_seq_number) {
1064 s = seq % HB_HISTORY_SIZE;
1065 if (seq != past_hearts[s].seq) {
1066 LOG(0, 0, 0, "Tried to re-send heartbeat for %s but %d doesn't match %d! (%d,%d)\n",
1067 fmtaddr(slave, 0), seq, past_hearts[s].seq, s, config->cluster_seq_number);
1068 return -1; // What to do here!?
1069 }
1070 peer_send_data(slave, past_hearts[s].data, past_hearts[s].size);
1071 seq = (seq+1)%HB_MAX_SEQ; // Increment to next seq number.
1072 }
1073 return 0; // All good!
1074 }
1075
1076 //
1077 // We've heard from another peer! Add it to the list
1078 // that we select from at election time.
1079 //
1080 static int cluster_add_peer(in_addr_t peer, time_t basetime, pingt *pp, int size)
1081 {
1082 int i;
1083 in_addr_t clusterid;
1084 pingt p;
1085
1086 // Allow for backward compatability.
1087 // Just the ping packet into a new structure to allow
1088 // for the possibility that we might have received
1089 // more or fewer elements than we were expecting.
1090 if (size > sizeof(p))
1091 size = sizeof(p);
1092
1093 memset( (void *) &p, 0, sizeof(p) );
1094 memcpy( (void *) &p, (void *) pp, size);
1095
1096 clusterid = p.addr;
1097 if (clusterid != config->bind_address)
1098 {
1099 // Is this for us?
1100 LOG(4, 0, 0, "Skipping ping from %s (different cluster)\n", fmtaddr(peer, 0));
1101 return 0;
1102 }
1103
1104 for (i = 0; i < num_peers ; ++i)
1105 {
1106 if (peers[i].peer != peer)
1107 continue;
1108
1109 // This peer already exists. Just update the timestamp.
1110 peers[i].basetime = basetime;
1111 peers[i].timestamp = TIME;
1112 peers[i].uptodate = !p.undef;
1113 break;
1114 }
1115
1116 // Is this the master shutting down??
1117 if (peer == config->cluster_master_address) {
1118 LOG(3, 0, 0, "Master %s %s\n", fmtaddr(config->cluster_master_address, 0),
1119 basetime ? "has restarted!" : "shutting down...");
1120
1121 config->cluster_master_address = 0;
1122 config->cluster_last_hb = 0; // Force an election.
1123 cluster_check_master();
1124 }
1125
1126 if (i >= num_peers)
1127 {
1128 LOG(4, 0, 0, "Adding %s as a peer\n", fmtaddr(peer, 0));
1129
1130 // Not found. Is there a stale slot to re-use?
1131 for (i = 0; i < num_peers ; ++i)
1132 {
1133 if (!peers[i].basetime) // Shutdown
1134 break;
1135
1136 if ((peers[i].timestamp + config->cluster_hb_timeout * 10) < TIME) // Stale.
1137 break;
1138 }
1139
1140 if (i >= CLUSTER_MAX_SIZE)
1141 {
1142 // Too many peers!!
1143 LOG(0, 0, 0, "Tried to add %s as a peer, but I already have %d of them!\n", fmtaddr(peer, 0), i);
1144 return -1;
1145 }
1146
1147 peers[i].peer = peer;
1148 peers[i].basetime = basetime;
1149 peers[i].timestamp = TIME;
1150 peers[i].uptodate = !p.undef;
1151 if (i == num_peers)
1152 ++num_peers;
1153
1154 LOG(1, 0, 0, "Added %s as a new peer. Now %d peers\n", fmtaddr(peer, 0), num_peers);
1155 }
1156
1157 return 1;
1158 }
1159
1160 // A slave responds with C_MASTER when it gets a message which should have gone to a master.
1161 static int cluster_set_master(in_addr_t peer, in_addr_t master)
1162 {
1163 if (config->cluster_iam_master) // Sanity...
1164 return 0;
1165
1166 LOG(3, 0, 0, "Peer %s set the master to %s...\n", fmtaddr(peer, 0),
1167 fmtaddr(master, 1));
1168
1169 config->cluster_master_address = master;
1170 if (master)
1171 {
1172 // catchup with new master
1173 peer_send_message(master, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1174
1175 // delay next election
1176 config->cluster_last_hb = TIME;
1177 }
1178
1179 // run election (or reset "probed" if master was set)
1180 cluster_check_master();
1181 return 0;
1182 }
1183
1184 /* Handle the slave updating the byte counters for the master. */
1185 //
1186 // Note that we don't mark the session as dirty; We rely on
1187 // the slow table walk to propogate this back out to the slaves.
1188 //
1189 static int cluster_handle_bytes(uint8_t *data, int size)
1190 {
1191 bytest *b;
1192
1193 b = (bytest *) data;
1194
1195 LOG(3, 0, 0, "Got byte counter update (size %d)\n", size);
1196
1197 /* Loop around, adding the byte
1198 counts to each of the sessions. */
1199
1200 while (size >= sizeof(*b) ) {
1201 if (b->sid > MAXSESSION) {
1202 LOG(0, 0, 0, "Got C_BYTES with session #%d!\n", b->sid);
1203 return -1; /* Abort processing */
1204 }
1205
1206 session[b->sid].pin += b->pin;
1207 session[b->sid].pout += b->pout;
1208
1209 increment_counter(&session[b->sid].cin, &session[b->sid].cin_wrap, b->cin);
1210 increment_counter(&session[b->sid].cout, &session[b->sid].cout_wrap, b->cout);
1211
1212 session[b->sid].cin_delta += b->cin;
1213 session[b->sid].cout_delta += b->cout;
1214
1215 if (b->cin)
1216 session[b->sid].last_packet = session[b->sid].last_data = time_now;
1217 else if (b->cout)
1218 session[b->sid].last_data = time_now;
1219
1220 size -= sizeof(*b);
1221 ++b;
1222 }
1223
1224 if (size != 0)
1225 LOG(0, 0, 0, "Got C_BYTES with %d bytes of trailing junk!\n", size);
1226
1227 return size;
1228 }
1229
1230 //
1231 // Handle receiving a session structure in a heartbeat packet.
1232 //
1233 static int cluster_recv_session(int more, uint8_t *p)
1234 {
1235 if (more >= MAXSESSION) {
1236 LOG(0, 0, 0, "DANGER: Received a heartbeat session id > MAXSESSION!\n");
1237 return -1;
1238 }
1239
1240 if (session[more].tunnel == T_UNDEF) {
1241 if (config->cluster_iam_uptodate) { // Sanity.
1242 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined session!\n");
1243 } else {
1244 --config->cluster_undefined_sessions;
1245 }
1246 }
1247
1248 load_session(more, (sessiont *) p); // Copy session into session table..
1249
1250 LOG(5, more, 0, "Received session update (%d undef)\n", config->cluster_undefined_sessions);
1251
1252 if (!config->cluster_iam_uptodate)
1253 cluster_uptodate(); // Check to see if we're up to date.
1254
1255 return 0;
1256 }
1257
1258 static int cluster_recv_bundle(int more, uint8_t *p)
1259 {
1260 if (more >= MAXBUNDLE) {
1261 LOG(0, 0, 0, "DANGER: Received a bundle id > MAXBUNDLE!\n");
1262 return -1;
1263 }
1264
1265 if (bundle[more].state == BUNDLEUNDEF) {
1266 if (config->cluster_iam_uptodate) { // Sanity.
1267 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined bundle!\n");
1268 } else {
1269 --config->cluster_undefined_bundles;
1270 }
1271 }
1272
1273 memcpy(&bundle[more], p, sizeof(bundle[more]) );
1274
1275 LOG(5, 0, more, "Received bundle update\n");
1276
1277 if (!config->cluster_iam_uptodate)
1278 cluster_uptodate(); // Check to see if we're up to date.
1279
1280 return 0;
1281 }
1282
1283 static int cluster_recv_tunnel(int more, uint8_t *p)
1284 {
1285 if (more >= MAXTUNNEL) {
1286 LOG(0, 0, 0, "DANGER: Received a tunnel session id > MAXTUNNEL!\n");
1287 return -1;
1288 }
1289
1290 if (tunnel[more].state == TUNNELUNDEF) {
1291 if (config->cluster_iam_uptodate) { // Sanity.
1292 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined tunnel!\n");
1293 } else {
1294 --config->cluster_undefined_tunnels;
1295 }
1296 }
1297
1298 memcpy(&tunnel[more], p, sizeof(tunnel[more]) );
1299
1300 //
1301 // Clear tunnel control messages. These are dynamically allocated.
1302 // If we get unlucky, this may cause the tunnel to drop!
1303 //
1304 tunnel[more].controls = tunnel[more].controle = NULL;
1305 tunnel[more].controlc = 0;
1306
1307 LOG(5, 0, more, "Received tunnel update\n");
1308
1309 if (!config->cluster_iam_uptodate)
1310 cluster_uptodate(); // Check to see if we're up to date.
1311
1312 return 0;
1313 }
1314
1315
1316 // pre v5 heartbeat session structure
1317 struct oldsession {
1318 sessionidt next;
1319 sessionidt far;
1320 tunnelidt tunnel;
1321 in_addr_t ip;
1322 int ip_pool_index;
1323 unsigned long unique_id;
1324 uint16_t nr;
1325 uint16_t ns;
1326 uint32_t magic;
1327 uint32_t cin, cout;
1328 uint32_t pin, pout;
1329 uint32_t total_cin;
1330 uint32_t total_cout;
1331 uint32_t id;
1332 uint16_t throttle_in;
1333 uint16_t throttle_out;
1334 clockt opened;
1335 clockt die;
1336 time_t last_packet;
1337 in_addr_t dns1, dns2;
1338 routet route[MAXROUTE];
1339 uint16_t radius;
1340 uint16_t mru;
1341 uint16_t tbf_in;
1342 uint16_t tbf_out;
1343 uint8_t l2tp_flags;
1344 uint8_t reserved_old_snoop;
1345 uint8_t walled_garden;
1346 uint8_t flags1;
1347 char random_vector[MAXTEL];
1348 int random_vector_length;
1349 char user[129];
1350 char called[MAXTEL];
1351 char calling[MAXTEL];
1352 uint32_t tx_connect_speed;
1353 uint32_t rx_connect_speed;
1354 uint32_t flags;
1355 #define SF_IPCP_ACKED 1 // Has this session seen an IPCP Ack?
1356 #define SF_LCP_ACKED 2 // LCP negotiated
1357 #define SF_CCP_ACKED 4 // CCP negotiated
1358 in_addr_t snoop_ip;
1359 uint16_t snoop_port;
1360 uint16_t sid;
1361 uint8_t filter_in;
1362 uint8_t filter_out;
1363 char reserved[18];
1364 };
1365
1366 static uint8_t *convert_session(struct oldsession *old)
1367 {
1368 static sessiont new;
1369 int i;
1370
1371 memset(&new, 0, sizeof(new));
1372
1373 new.next = old->next;
1374 new.far = old->far;
1375 new.tunnel = old->tunnel;
1376 new.flags = old->l2tp_flags;
1377 new.ip = old->ip;
1378 new.ip_pool_index = old->ip_pool_index;
1379 new.unique_id = old->unique_id;
1380 new.magic = old->magic;
1381 new.pin = old->pin;
1382 new.pout = old->pout;
1383 new.cin = old->total_cin;
1384 new.cout = old->total_cout;
1385 new.cin_delta = old->cin;
1386 new.cout_delta = old->cout;
1387 new.throttle_in = old->throttle_in;
1388 new.throttle_out = old->throttle_out;
1389 new.filter_in = old->filter_in;
1390 new.filter_out = old->filter_out;
1391 new.mru = old->mru;
1392 new.opened = old->opened;
1393 new.die = old->die;
1394 new.last_packet = old->last_packet;
1395 new.dns1 = old->dns1;
1396 new.dns2 = old->dns2;
1397 new.tbf_in = old->tbf_in;
1398 new.tbf_out = old->tbf_out;
1399 new.random_vector_length = old->random_vector_length;
1400 new.tx_connect_speed = old->tx_connect_speed;
1401 new.rx_connect_speed = old->rx_connect_speed;
1402 new.snoop_ip = old->snoop_ip;
1403 new.snoop_port = old->snoop_port;
1404 new.walled_garden = old->walled_garden;
1405
1406 memcpy(new.random_vector, old->random_vector, sizeof(new.random_vector));
1407 memcpy(new.user, old->user, sizeof(new.user));
1408 memcpy(new.called, old->called, sizeof(new.called));
1409 memcpy(new.calling, old->calling, sizeof(new.calling));
1410
1411 for (i = 0; i < MAXROUTE; i++)
1412 memcpy(&new.route[i], &old->route[i], sizeof(new.route[i]));
1413
1414 if (new.opened)
1415 {
1416 new.ppp.phase = Establish;
1417 if (old->flags & (SF_IPCP_ACKED|SF_LCP_ACKED))
1418 {
1419 new.ppp.phase = Network;
1420 new.ppp.lcp = Opened;
1421 new.ppp.ipcp = (old->flags & SF_IPCP_ACKED) ? Opened : Starting;
1422 new.ppp.ccp = (old->flags & SF_CCP_ACKED) ? Opened : Stopped;
1423 }
1424
1425 // no PPPv6 in old session
1426 new.ppp.ipv6cp = Stopped;
1427 }
1428
1429 return (uint8_t *) &new;
1430 }
1431
1432 //
1433 // Process a heartbeat..
1434 //
1435 // v3: added interval, timeout
1436 // v4: added table_version
1437 // v5: added ipv6, re-ordered session structure
1438 static int cluster_process_heartbeat(uint8_t *data, int size, int more, uint8_t *p, in_addr_t addr)
1439 {
1440 heartt *h;
1441 int s = size - (p-data);
1442 int i, type;
1443 int hb_ver = more;
1444
1445 #if HB_VERSION != 5
1446 # error "need to update cluster_process_heartbeat()"
1447 #endif
1448
1449 // we handle versions 3 through 5
1450 if (hb_ver < 3 || hb_ver > HB_VERSION) {
1451 LOG(0, 0, 0, "Received a heartbeat version that I don't support (%d)!\n", hb_ver);
1452 return -1; // Ignore it??
1453 }
1454
1455 if (size > sizeof(past_hearts[0].data)) {
1456 LOG(0, 0, 0, "Received an oversize heartbeat from %s (%d)!\n", fmtaddr(addr, 0), size);
1457 return -1;
1458 }
1459
1460 if (s < sizeof(*h))
1461 goto shortpacket;
1462
1463 h = (heartt *) p;
1464 p += sizeof(*h);
1465 s -= sizeof(*h);
1466
1467 if (h->clusterid != config->bind_address)
1468 return -1; // It's not part of our cluster.
1469
1470 if (config->cluster_iam_master) { // Sanity...
1471 // Note that this MUST match the election process above!
1472
1473 LOG(0, 0, 0, "I just got a heartbeat from master %s, but _I_ am the master!\n", fmtaddr(addr, 0));
1474 if (!h->basetime) {
1475 LOG(0, 0, 0, "Heartbeat with zero basetime! Ignoring\n");
1476 return -1; // Skip it.
1477 }
1478
1479 if (hb_ver >= 4) {
1480 if (h->table_version > config->cluster_table_version) {
1481 LOG(0, 0, 0, "They've seen more state changes (%" PRIu64 " vs my %" PRIu64 ") so I'm gone!\n",
1482 h->table_version, config->cluster_table_version);
1483
1484 kill(0, SIGTERM);
1485 exit(1);
1486 }
1487 if (h->table_version < config->cluster_table_version)
1488 return -1;
1489 }
1490
1491 if (basetime > h->basetime) {
1492 LOG(0, 0, 0, "They're an older master than me so I'm gone!\n");
1493 kill(0, SIGTERM);
1494 exit(1);
1495 }
1496
1497 if (basetime < h->basetime)
1498 return -1;
1499
1500 if (my_address < addr) { // Tie breaker.
1501 LOG(0, 0, 0, "They're a higher IP address than me, so I'm gone!\n");
1502 kill(0, SIGTERM);
1503 exit(1);
1504 }
1505
1506 //
1507 // Send it a unicast heartbeat to see give it a chance to die.
1508 // NOTE: It's actually safe to do seq-number - 1 without checking
1509 // for wrap around.
1510 //
1511 cluster_catchup_slave(config->cluster_seq_number - 1, addr);
1512
1513 return -1; // Skip it.
1514 }
1515
1516 //
1517 // Try and guard against a stray master appearing.
1518 //
1519 // Ignore heartbeats received from another master before the
1520 // timeout (less a smidgen) for the old master has elapsed.
1521 //
1522 // Note that after a clean failover, the cluster_master_address
1523 // is cleared, so this doesn't run.
1524 //
1525 if (config->cluster_master_address && addr != config->cluster_master_address) {
1526 LOG(0, 0, 0, "Ignoring stray heartbeat from %s, current master %s has not yet timed out (last heartbeat %.1f seconds ago).\n",
1527 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1),
1528 0.1 * (TIME - config->cluster_last_hb));
1529 return -1; // ignore
1530 }
1531
1532 if (config->cluster_seq_number == -1) // Don't have one. Just align to the master...
1533 config->cluster_seq_number = h->seq;
1534
1535 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1536 config->cluster_last_hb_ver = hb_ver; // remember what cluster version the master is using
1537
1538 if (config->cluster_seq_number != h->seq) { // Out of sequence heartbeat!
1539 static int lastseen_seq = 0;
1540 static time_t lastseen_time = 0;
1541
1542 // limit to once per second for a particular seq#
1543 int ask = (config->cluster_seq_number != lastseen_seq || time_now != lastseen_time);
1544
1545 LOG(1, 0, 0, "HB: Got seq# %d but was expecting %d. %s.\n",
1546 h->seq, config->cluster_seq_number,
1547 ask ? "Asking for resend" : "Ignoring");
1548
1549 if (ask)
1550 {
1551 lastseen_seq = config->cluster_seq_number;
1552 lastseen_time = time_now;
1553 peer_send_message(addr, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1554 }
1555
1556 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1557
1558 // Just drop the packet. The master will resend it as part of the catchup.
1559
1560 return 0;
1561 }
1562 // Save the packet in our buffer.
1563 // This is needed in case we become the master.
1564 config->cluster_seq_number = (h->seq+1)%HB_MAX_SEQ;
1565 i = h->seq % HB_HISTORY_SIZE;
1566 past_hearts[i].seq = h->seq;
1567 past_hearts[i].size = size;
1568 memcpy(&past_hearts[i].data, data, size); // Save it.
1569
1570
1571 // Check that we don't have too many undefined sessions, and
1572 // that the free session pointer is correct.
1573 cluster_check_sessions(h->highsession, h->freesession, h->highbundle, h->hightunnel);
1574
1575 if (h->interval != config->cluster_hb_interval)
1576 {
1577 LOG(2, 0, 0, "Master set ping/heartbeat interval to %u (was %u)\n",
1578 h->interval, config->cluster_hb_interval);
1579
1580 config->cluster_hb_interval = h->interval;
1581 }
1582
1583 if (h->timeout != config->cluster_hb_timeout)
1584 {
1585 LOG(2, 0, 0, "Master set heartbeat timeout to %u (was %u)\n",
1586 h->timeout, config->cluster_hb_timeout);
1587
1588 config->cluster_hb_timeout = h->timeout;
1589 }
1590
1591 // Ok. process the packet...
1592 while ( s > 0) {
1593
1594 type = *((uint32_t *) p);
1595 p += sizeof(uint32_t);
1596 s -= sizeof(uint32_t);
1597
1598 more = *((uint32_t *) p);
1599 p += sizeof(uint32_t);
1600 s -= sizeof(uint32_t);
1601
1602 switch (type) {
1603 case C_CSESSION: { // Compressed session structure.
1604 uint8_t c[ sizeof(sessiont) + 2];
1605 int size;
1606 uint8_t *orig_p = p;
1607
1608 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c) );
1609 s -= (p - orig_p);
1610
1611 // session struct changed with v5
1612 if (hb_ver < 5)
1613 {
1614 if (size != sizeof(struct oldsession)) {
1615 LOG(0, 0, 0, "DANGER: Received a v%d CSESSION that didn't decompress correctly!\n", hb_ver);
1616 // Now what? Should exit! No-longer up to date!
1617 break;
1618 }
1619 cluster_recv_session(more, convert_session((struct oldsession *) c));
1620 break;
1621 }
1622
1623 if (size != sizeof(sessiont) ) { // Ouch! Very very bad!
1624 LOG(0, 0, 0, "DANGER: Received a CSESSION that didn't decompress correctly!\n");
1625 // Now what? Should exit! No-longer up to date!
1626 break;
1627 }
1628
1629 cluster_recv_session(more, c);
1630 break;
1631 }
1632 case C_SESSION:
1633 if (hb_ver < 5)
1634 {
1635 if (s < sizeof(struct oldsession))
1636 goto shortpacket;
1637
1638 cluster_recv_session(more, convert_session((struct oldsession *) p));
1639
1640 p += sizeof(struct oldsession);
1641 s -= sizeof(struct oldsession);
1642 break;
1643 }
1644
1645 if ( s < sizeof(session[more]))
1646 goto shortpacket;
1647
1648 cluster_recv_session(more, p);
1649
1650 p += sizeof(session[more]);
1651 s -= sizeof(session[more]);
1652 break;
1653
1654 case C_CTUNNEL: { // Compressed tunnel structure.
1655 uint8_t c[ sizeof(tunnelt) + 2];
1656 int size;
1657 uint8_t *orig_p = p;
1658
1659 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1660 s -= (p - orig_p);
1661
1662 if (size != sizeof(tunnelt) ) { // Ouch! Very very bad!
1663 LOG(0, 0, 0, "DANGER: Received a CTUNNEL that didn't decompress correctly!\n");
1664 // Now what? Should exit! No-longer up to date!
1665 break;
1666 }
1667
1668 cluster_recv_tunnel(more, c);
1669 break;
1670
1671 }
1672 case C_TUNNEL:
1673 if ( s < sizeof(tunnel[more]))
1674 goto shortpacket;
1675
1676 cluster_recv_tunnel(more, p);
1677
1678 p += sizeof(tunnel[more]);
1679 s -= sizeof(tunnel[more]);
1680 break;
1681
1682 case C_CBUNDLE: { // Compressed bundle structure.
1683 uint8_t c[ sizeof(bundlet) + 2];
1684 int size;
1685 uint8_t *orig_p = p;
1686
1687 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1688 s -= (p - orig_p);
1689
1690 if (size != sizeof(bundlet) ) { // Ouch! Very very bad!
1691 LOG(0, 0, 0, "DANGER: Received a CBUNDLE that didn't decompress correctly!\n");
1692 // Now what? Should exit! No-longer up to date!
1693 break;
1694 }
1695
1696 cluster_recv_bundle(more, c);
1697 break;
1698
1699 }
1700 case C_BUNDLE:
1701 if ( s < sizeof(bundle[more]))
1702 goto shortpacket;
1703
1704 cluster_recv_bundle(more, p);
1705
1706 p += sizeof(bundle[more]);
1707 s -= sizeof(bundle[more]);
1708 break;
1709 default:
1710 LOG(0, 0, 0, "DANGER: I received a heartbeat element where I didn't understand the type! (%d)\n", type);
1711 return -1; // can't process any more of the packet!!
1712 }
1713 }
1714
1715 if (config->cluster_master_address != addr)
1716 {
1717 LOG(0, 0, 0, "My master just changed from %s to %s!\n",
1718 fmtaddr(config->cluster_master_address, 0), fmtaddr(addr, 1));
1719
1720 config->cluster_master_address = addr;
1721 }
1722
1723 config->cluster_last_hb = TIME; // Successfully received a heartbeat!
1724 config->cluster_table_version = h->table_version;
1725 return 0;
1726
1727 shortpacket:
1728 LOG(0, 0, 0, "I got an incomplete heartbeat packet! This means I'm probably out of sync!!\n");
1729 return -1;
1730 }
1731
1732 //
1733 // We got a packet on the cluster port!
1734 // Handle pings, lastseens, and heartbeats!
1735 //
1736 int processcluster(uint8_t *data, int size, in_addr_t addr)
1737 {
1738 int type, more;
1739 uint8_t *p = data;
1740 int s = size;
1741
1742 if (addr == my_address)
1743 return -1; // Ignore it. Something looped back the multicast!
1744
1745 LOG(5, 0, 0, "Process cluster: %d bytes from %s\n", size, fmtaddr(addr, 0));
1746
1747 if (s <= 0) // Any data there??
1748 return -1;
1749
1750 if (s < 8)
1751 goto shortpacket;
1752
1753 type = *((uint32_t *) p);
1754 p += sizeof(uint32_t);
1755 s -= sizeof(uint32_t);
1756
1757 more = *((uint32_t *) p);
1758 p += sizeof(uint32_t);
1759 s -= sizeof(uint32_t);
1760
1761 switch (type)
1762 {
1763 case C_PING: // Update the peers table.
1764 return cluster_add_peer(addr, more, (pingt *) p, s);
1765
1766 case C_MASTER: // Our master is wrong
1767 return cluster_set_master(addr, more);
1768
1769 case C_LASTSEEN: // Catch up a slave (slave missed a packet).
1770 return cluster_catchup_slave(more, addr);
1771
1772 case C_FORWARD: // Forwarded control packet. pass off to processudp.
1773 case C_FORWARD_DAE: // Forwarded DAE packet. pass off to processdae.
1774 if (!config->cluster_iam_master)
1775 {
1776 LOG(0, 0, 0, "I'm not the master, but I got a C_FORWARD%s from %s?\n",
1777 type == C_FORWARD_DAE ? "_DAE" : "", fmtaddr(addr, 0));
1778
1779 return -1;
1780 }
1781 else
1782 {
1783 struct sockaddr_in a;
1784 a.sin_addr.s_addr = more;
1785
1786 a.sin_port = *(int *) p;
1787 s -= sizeof(int);
1788 p += sizeof(int);
1789
1790 LOG(4, 0, 0, "Got a forwarded %spacket... (%s:%d)\n",
1791 type == C_FORWARD_DAE ? "DAE " : "", fmtaddr(more, 0), a.sin_port);
1792
1793 STAT(recv_forward);
1794 if (type == C_FORWARD_DAE)
1795 {
1796 struct in_addr local;
1797 local.s_addr = config->bind_address ? config->bind_address : my_address;
1798 processdae(p, s, &a, sizeof(a), &local);
1799 }
1800 else
1801 processudp(p, s, &a);
1802
1803 return 0;
1804 }
1805
1806 case C_THROTTLE: { // Receive a forwarded packet from a slave.
1807 if (!config->cluster_iam_master) {
1808 LOG(0, 0, 0, "I'm not the master, but I got a C_THROTTLE from %s?\n", fmtaddr(addr, 0));
1809 return -1;
1810 }
1811
1812 tbf_queue_packet(more, p, s); // The TBF id tells wether it goes in or out.
1813 return 0;
1814 }
1815 case C_GARDEN:
1816 // Receive a walled garden packet from a slave.
1817 if (!config->cluster_iam_master) {
1818 LOG(0, 0, 0, "I'm not the master, but I got a C_GARDEN from %s?\n", fmtaddr(addr, 0));
1819 return -1;
1820 }
1821
1822 tun_write(p, s);
1823 return 0;
1824
1825 case C_BYTES:
1826 if (!config->cluster_iam_master) {
1827 LOG(0, 0, 0, "I'm not the master, but I got a C_BYTES from %s?\n", fmtaddr(addr, 0));
1828 return -1;
1829 }
1830
1831 return cluster_handle_bytes(p, s);
1832
1833 case C_KILL: // The master asked us to die!? (usually because we're too out of date).
1834 if (config->cluster_iam_master) {
1835 LOG(0, 0, 0, "_I_ am master, but I received a C_KILL from %s! (Seq# %d)\n", fmtaddr(addr, 0), more);
1836 return -1;
1837 }
1838 if (more != config->cluster_seq_number) {
1839 LOG(0, 0, 0, "The master asked us to die but the seq number didn't match!?\n");
1840 return -1;
1841 }
1842
1843 if (addr != config->cluster_master_address) {
1844 LOG(0, 0, 0, "Received a C_KILL from %s which doesn't match config->cluster_master_address (%s)\n",
1845 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1));
1846 // We can only warn about it. The master might really have switched!
1847 }
1848
1849 LOG(0, 0, 0, "Received a valid C_KILL: I'm going to die now.\n");
1850 kill(0, SIGTERM);
1851 exit(0); // Lets be paranoid;
1852 return -1; // Just signalling the compiler.
1853
1854 case C_HEARTBEAT:
1855 LOG(4, 0, 0, "Got a heartbeat from %s\n", fmtaddr(addr, 0));
1856 return cluster_process_heartbeat(data, size, more, p, addr);
1857
1858 default:
1859 LOG(0, 0, 0, "Strange type packet received on cluster socket (%d)\n", type);
1860 return -1;
1861 }
1862 return 0;
1863
1864 shortpacket:
1865 LOG(0, 0, 0, "I got a _short_ cluster heartbeat packet! This means I'm probably out of sync!!\n");
1866 return -1;
1867 }
1868
1869 //====================================================================================================
1870
1871 int cmd_show_cluster(struct cli_def *cli, char *command, char **argv, int argc)
1872 {
1873 int i;
1874
1875 if (CLI_HELP_REQUESTED)
1876 return CLI_HELP_NO_ARGS;
1877
1878 cli_print(cli, "Cluster status : %s", config->cluster_iam_master ? "Master" : "Slave" );
1879 cli_print(cli, "My address : %s", fmtaddr(my_address, 0));
1880 cli_print(cli, "VIP address : %s", fmtaddr(config->bind_address, 0));
1881 cli_print(cli, "Multicast address: %s", fmtaddr(config->cluster_address, 0));
1882 cli_print(cli, "Multicast i'face : %s", config->cluster_interface);
1883
1884 if (!config->cluster_iam_master) {
1885 cli_print(cli, "My master : %s (last heartbeat %.1f seconds old)",
1886 config->cluster_master_address
1887 ? fmtaddr(config->cluster_master_address, 0)
1888 : "Not defined",
1889 0.1 * (TIME - config->cluster_last_hb));
1890 cli_print(cli, "Uptodate : %s", config->cluster_iam_uptodate ? "Yes" : "No");
1891 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1892 cli_print(cli, "Next sequence number expected: %d", config->cluster_seq_number);
1893 cli_print(cli, "%d sessions undefined of %d", config->cluster_undefined_sessions, config->cluster_highest_sessionid);
1894 cli_print(cli, "%d bundles undefined of %d", config->cluster_undefined_bundles, config->cluster_highest_bundleid);
1895 cli_print(cli, "%d tunnels undefined of %d", config->cluster_undefined_tunnels, config->cluster_highest_tunnelid);
1896 } else {
1897 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1898 cli_print(cli, "Next heartbeat # : %d", config->cluster_seq_number);
1899 cli_print(cli, "Highest session : %d", config->cluster_highest_sessionid);
1900 cli_print(cli, "Highest bundle : %d", config->cluster_highest_bundleid);
1901 cli_print(cli, "Highest tunnel : %d", config->cluster_highest_tunnelid);
1902 cli_print(cli, "%d changes queued for sending", config->cluster_num_changes);
1903 }
1904 cli_print(cli, "%d peers.", num_peers);
1905
1906 if (num_peers)
1907 cli_print(cli, "%20s %10s %8s", "Address", "Basetime", "Age");
1908 for (i = 0; i < num_peers; ++i) {
1909 cli_print(cli, "%20s %10u %8d", fmtaddr(peers[i].peer, 0),
1910 peers[i].basetime, TIME - peers[i].timestamp);
1911 }
1912 return CLI_OK;
1913 }
1914
1915 //
1916 // Simple run-length-encoding compression.
1917 // Format is
1918 // 1 byte < 128 = count of non-zero bytes following. // Not legal to be zero.
1919 // n non-zero bytes;
1920 // or
1921 // 1 byte > 128 = (count - 128) run of zero bytes. //
1922 // repeat.
1923 // count == 0 indicates end of compressed stream.
1924 //
1925 // Compress from 'src' into 'dst'. return number of bytes
1926 // used from 'dst'.
1927 // Updates *src_p to indicate 1 past last bytes used.
1928 //
1929 // We could get an extra byte in the zero runs by storing (count-1)
1930 // but I'm playing it safe.
1931 //
1932 // Worst case is a 50% expansion in space required (trying to
1933 // compress { 0x00, 0x01 } * N )
1934 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
1935 {
1936 int count;
1937 int orig_dsize = dsize;
1938 uint8_t *x, *src;
1939 src = *src_p;
1940
1941 while (ssize > 0 && dsize > 2) {
1942 count = 0;
1943 x = dst++; --dsize; // Reserve space for count byte..
1944
1945 if (*src) { // Copy a run of non-zero bytes.
1946 while (*src && count < 127 && ssize > 0 && dsize > 1) { // Count number of non-zero bytes.
1947 *dst++ = *src++;
1948 --dsize; --ssize;
1949 ++count;
1950 }
1951 *x = count; // Store number of non-zero bytes. Guarenteed to be non-zero!
1952
1953 } else { // Compress a run of zero bytes.
1954 while (*src == 0 && count < 127 && ssize > 0) {
1955 ++src;
1956 --ssize;
1957 ++count;
1958 }
1959 *x = count | 0x80 ;
1960 }
1961 }
1962
1963 *dst++ = 0x0; // Add Stop byte.
1964 --dsize;
1965
1966 *src_p = src;
1967 return (orig_dsize - dsize);
1968 }
1969
1970 //
1971 // Decompress the buffer into **p.
1972 // 'psize' is the size of the decompression buffer available.
1973 //
1974 // Returns the number of bytes decompressed.
1975 //
1976 // Decompresses from '*src_p' into 'dst'.
1977 // Return the number of dst bytes used.
1978 // Updates the 'src_p' pointer to point to the
1979 // first un-used byte.
1980 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
1981 {
1982 int count;
1983 int orig_dsize = dsize;
1984 uint8_t *src = *src_p;
1985
1986 while (ssize >0 && dsize > 0) { // While there's more to decompress, and there's room in the decompress buffer...
1987 count = *src++; --ssize; // get the count byte from the source.
1988 if (count == 0x0) // End marker reached? If so, finish.
1989 break;
1990
1991 if (count & 0x80) { // Decompress a run of zeros
1992 for (count &= 0x7f ; count > 0 && dsize > 0; --count) {
1993 *dst++ = 0x0;
1994 --dsize;
1995 }
1996 } else { // Copy run of non-zero bytes.
1997 for ( ; count > 0 && ssize && dsize; --count) { // Copy non-zero bytes across.
1998 *dst++ = *src++;
1999 --ssize; --dsize;
2000 }
2001 }
2002 }
2003 *src_p = src;
2004 return (orig_dsize - dsize);
2005 }