+//
+// This joins the cluster multi-cast group.
+//
+int cluster_init()
+{
+ struct sockaddr_in addr;
+ struct sockaddr_in interface_addr;
+ struct ip_mreq mreq;
+ struct ifreq ifr;
+ int opt;
+
+ config->cluster_undefined_sessions = MAXSESSION-1;
+ config->cluster_undefined_tunnels = MAXTUNNEL-1;
+
+ if (!config->cluster_address)
+ return 0;
+ if (!*config->cluster_interface)
+ return 0;
+
+ cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons(CLUSTERPORT);
+ addr.sin_addr.s_addr = INADDR_ANY;
+ setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
+
+ opt = fcntl(cluster_sockfd, F_GETFL, 0);
+ fcntl(cluster_sockfd, F_SETFL, opt | O_NONBLOCK);
+
+ if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
+ {
+ LOG(0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
+ return -1;
+ }
+
+ strcpy(ifr.ifr_name, config->cluster_interface);
+ if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0)
+ {
+ LOG(0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
+ return -1;
+ }
+
+ memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr));
+ my_address = interface_addr.sin_addr.s_addr;
+
+ // Join multicast group.
+ mreq.imr_multiaddr.s_addr = config->cluster_address;
+ mreq.imr_interface = interface_addr.sin_addr;
+
+
+ opt = 0; // Turn off multicast loopback.
+ setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
+
+ if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0)
+ {
+ LOG(0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
+ return -1;
+ }
+
+ if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0)
+ {
+ LOG(0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
+ return -1;
+ }
+
+ config->cluster_last_hb = TIME;
+ config->cluster_seq_number = -1;
+
+ return cluster_sockfd;
+}
+
+
+//
+// Send a chunk of data to the entire cluster (usually via the multicast
+// address ).
+//
+
+static int cluster_send_data(void *data, int datalen)
+{
+ struct sockaddr_in addr = {0};
+
+ if (!cluster_sockfd) return -1;
+ if (!config->cluster_address) return 0;
+
+ addr.sin_addr.s_addr = config->cluster_address;
+ addr.sin_port = htons(CLUSTERPORT);
+ addr.sin_family = AF_INET;
+
+ LOG(5, 0, 0, "Cluster send data: %d bytes\n", datalen);
+
+ if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
+ {
+ LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+//
+// Add a chunk of data to a heartbeat packet.
+// Maintains the format. Assumes that the caller
+// has passed in a big enough buffer!
+//
+static void add_type(char **p, int type, int more, char *data, int size)
+{
+ *((uint32_t *) (*p)) = type;
+ *p += sizeof(uint32_t);
+
+ *((uint32_t *)(*p)) = more;
+ *p += sizeof(uint32_t);
+
+ if (data && size > 0) {
+ memcpy(*p, data, size);
+ *p += size;
+ }
+}
+
+// advertise our presence via BGP or gratuitous ARP
+static void advertise(void)
+{
+#ifdef BGP
+ if (bgp_configured)
+ bgp_enable_routing(1);
+ else
+#endif /* BGP */
+ if (config->send_garp)
+ send_garp(config->bind_address); // Start taking traffic.
+}
+
+static void cluster_uptodate(void)
+{
+ if (config->cluster_iam_uptodate)
+ return;
+
+ if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels)
+ return;
+
+ config->cluster_iam_uptodate = 1;
+
+ LOG(0, 0, 0, "Now uptodate with master.\n");
+ advertise();
+}
+
+//
+// Send a unicast UDP packet to a peer with 'data' as the
+// contents.
+//
+static int peer_send_data(in_addr_t peer, char *data, int size)
+{
+ struct sockaddr_in addr = {0};
+
+ if (!cluster_sockfd) return -1;
+ if (!config->cluster_address) return 0;
+
+ if (!peer) // Odd??
+ return -1;
+
+ addr.sin_addr.s_addr = peer;
+ addr.sin_port = htons(CLUSTERPORT);
+ addr.sin_family = AF_INET;
+
+ LOG_HEX(5, "Peer send", data, size);
+
+ if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
+ {
+ LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+//
+// Send a structured message to a peer with a single element of type 'type'.
+//
+static int peer_send_message(in_addr_t peer, int type, int more, char *data, int size)
+{
+ char buf[65536]; // Vast overkill.
+ char *p = buf;
+
+ LOG(4, 0, 0, "Sending message to peer (type %d, more %d, size %d)\n", type, more, size);
+ add_type(&p, type, more, data, size);
+
+ return peer_send_data(peer, buf, (p-buf) );
+}
+
+//
+// Forward a state changing packet to the master.
+//
+// The master just processes the payload as if it had
+// received it off the tun device.
+//
+int master_forward_packet(char *data, int size, in_addr_t addr, int port)
+{
+ char buf[65536]; // Vast overkill.
+ char *p = buf;
+
+ if (!config->cluster_master_address) // No election has been held yet. Just skip it.
+ return -1;
+
+ LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
+
+ STAT(c_forwarded);
+ add_type(&p, C_FORWARD, addr, (char *) &port, sizeof(port));
+ memcpy(p, data, size);
+ p += size;
+
+ return peer_send_data(config->cluster_master_address, buf, (p - buf));
+}
+
+//
+// Forward a throttled packet to the master for handling.
+//
+// The master just drops the packet into the appropriate
+// token bucket queue, and lets normal processing take care
+// of it.
+//
+int master_throttle_packet(int tbfid, char *data, int size)
+{
+ char buf[65536]; // Vast overkill.
+ char *p = buf;
+
+ if (!config->cluster_master_address) // No election has been held yet. Just skip it.
+ return -1;
+
+ LOG(4, 0, 0, "Throttling packet master (size %d, tbfid %d)\n", size, tbfid);
+
+ add_type(&p, C_THROTTLE, tbfid, data, size);
+
+ return peer_send_data(config->cluster_master_address, buf, (p-buf) );
+
+}
+
+//
+// Forward a walled garden packet to the master for handling.
+//
+// The master just writes the packet straight to the tun
+// device (where is will normally loop through the
+// firewall rules, and come back in on the tun device)
+//
+// (Note that this must be called with the tun header
+// as the start of the data).
+int master_garden_packet(sessionidt s, char *data, int size)
+{
+ char buf[65536]; // Vast overkill.
+ char *p = buf;
+
+ if (!config->cluster_master_address) // No election has been held yet. Just skip it.
+ return -1;
+
+ LOG(4, 0, 0, "Walled garden packet to master (size %d)\n", size);
+
+ add_type(&p, C_GARDEN, s, data, size);
+
+ return peer_send_data(config->cluster_master_address, buf, (p-buf));
+
+}
+
+//
+// Send a chunk of data as a heartbeat..
+// We save it in the history buffer as we do so.
+//
+static void send_heartbeat(int seq, char *data, int size)
+{
+ int i;
+
+ if (size > sizeof(past_hearts[0].data))
+ {
+ LOG(0, 0, 0, "Tried to heartbeat something larger than the maximum packet!\n");
+ kill(0, SIGTERM);
+ exit(1);
+ }
+ i = seq % HB_HISTORY_SIZE;
+ past_hearts[i].seq = seq;
+ past_hearts[i].size = size;
+ memcpy(&past_hearts[i].data, data, size); // Save it.
+ cluster_send_data(data, size);
+}
+
+//
+// Send an 'i am alive' message to every machine in the cluster.
+//
+void cluster_send_ping(time_t basetime)
+{
+ char buff[100 + sizeof(pingt)];
+ char *p = buff;
+ pingt x;
+
+ if (config->cluster_iam_master && basetime) // We're heartbeating so no need to ping.
+ return;
+
+ LOG(5, 0, 0, "Sending cluster ping...\n");
+
+ x.ver = 1;
+ x.addr = config->bind_address;
+ x.undef = config->cluster_undefined_sessions + config->cluster_undefined_tunnels;
+ x.basetime = basetime;
+
+ add_type(&p, C_PING, basetime, (char *) &x, sizeof(x));
+ cluster_send_data(buff, (p-buff) );
+}
+
+//
+// Walk the session counters looking for non-zero ones to send
+// to the master. We send up to 600 of them at one time.
+// We examine a maximum of 3000 sessions.
+// (50k max session should mean that we normally
+// examine the entire session table every 25 seconds).
+
+#define MAX_B_RECS (600)
+void master_update_counts(void)
+{
+ int i, c;
+ bytest b[MAX_B_RECS+1];
+
+ if (config->cluster_iam_master) // Only happens on the slaves.
+ return;
+
+ if (!config->cluster_master_address) // If we don't have a master, skip it for a while.
+ return;
+
+ i = MAX_B_RECS * 5; // Examine max 2000 sessions;
+ if (config->cluster_highest_sessionid > i)
+ i = config->cluster_highest_sessionid;
+
+ for ( c = 0; i > 0 ; --i) {
+ // Next session to look at.
+ walk_session_number++;
+ if ( walk_session_number > config->cluster_highest_sessionid)
+ walk_session_number = 1;
+
+ if (!sess_local[walk_session_number].cin && !sess_local[walk_session_number].cout)
+ continue; // Unused. Skip it.
+
+ b[c].sid = walk_session_number;
+ b[c].in = sess_local[walk_session_number].cin;
+ b[c].out = sess_local[walk_session_number].cout;
+
+ if (++c > MAX_B_RECS) // Send a max of 400 elements in a packet.
+ break;
+
+ // Reset counters.
+ sess_local[walk_session_number].cin = sess_local[walk_session_number].cout = 0;
+ }
+
+ if (!c) // Didn't find any that changes. Get out of here!
+ return;
+
+
+ // Forward the data to the master.
+ LOG(4, 0, 0, "Sending byte counters to master (%d elements)\n", c);
+ peer_send_message(config->cluster_master_address, C_BYTES, c, (char *) &b, sizeof(b[0]) * c);
+ return;
+}
+
+//
+// On the master, check how our slaves are going. If
+// one of them's not up-to-date we'll heartbeat faster.
+// If we don't have any of them, then we need to turn
+// on our own packet handling!
+//
+void cluster_check_slaves(void)
+{
+ int i;
+ static int have_peers = 0;
+ int had_peers = have_peers;
+ clockt t = TIME;
+
+ if (!config->cluster_iam_master)
+ return; // Only runs on the master...
+
+ config->cluster_iam_uptodate = 1; // cleared in loop below
+
+ for (i = have_peers = 0; i < num_peers; i++)
+ {
+ if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
+ continue; // Stale peer! Skip them.
+
+ if (!peers[i].basetime)
+ continue; // Shutdown peer! Skip them.
+
+ if (peers[i].uptodate)
+ have_peers = 1;
+
+ if (!peers[i].uptodate)
+ config->cluster_iam_uptodate = 0; // Start fast heartbeats
+ }
+
+#ifdef BGP
+ // in a cluster, withdraw/add routes when we get a peer/lose all peers
+ if (bgp_configured && have_peers != had_peers)
+ bgp_enable_routing(!have_peers);
+#endif /* BGP */
+}
+
+//
+// Check that we have a master. If it's been too
+// long since we heard from a master then hold an election.
+//
+void cluster_check_master(void)
+{
+ int i, count, tcount, high_unique_id = 0;
+ int last_free = 0;
+ clockt t = TIME;
+ static int probed = 0;
+
+ if (config->cluster_iam_master)
+ return; // Only runs on the slaves...
+
+ // If the master is late (missed 2 hearbeats by a second and a
+ // hair) it may be that the switch has dropped us from the
+ // multicast group, try unicasting one probe to the master
+ // which will hopefully respond with a unicast heartbeat that
+ // will allow us to limp along until the querier next runs.
+ if (TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
+ {
+ if (!probed && config->cluster_master_address)
+ {
+ probed = 1;
+ LOG(1, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
+ 0.1 * (TIME - (config->cluster_last_hb + config->cluster_hb_interval)));
+
+ peer_send_message(config->cluster_master_address,
+ C_LASTSEEN, config->cluster_seq_number, NULL, 0);
+ }
+ } else { // We got a recent heartbeat; reset the probe flag.
+ probed = 0;
+ }
+
+ if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
+ return; // Everything's ok!
+
+ config->cluster_last_hb = TIME + 1; // Just the one election thanks.
+
+ LOG(0, 0, 0, "Master timed out! Holding election...\n");
+
+ for (i = 0; i < num_peers; i++)
+ {
+ if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
+ continue; // Stale peer! Skip them.
+
+ if (!peers[i].basetime)
+ continue; // Shutdown peer! Skip them.
+
+ if (peers[i].basetime < basetime) {
+ LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
+ return; // They'll win the election. Get out of here.
+ }
+
+ if (peers[i].basetime == basetime &&
+ peers[i].peer > my_address) {
+ LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
+ return; // They'll win the election. Wait for them to come up.
+ }
+ }
+
+ // Wow. it's been ages since I last heard a heartbeat
+ // and I'm better than an of my peers so it's time
+ // to become a master!!!
+
+ config->cluster_iam_master = 1;
+ config->cluster_master_address = 0;
+
+ LOG(0, 0, 0, "I am declaring myself the master!\n");
+
+ if (config->cluster_seq_number == -1)
+ config->cluster_seq_number = 0;
+
+ //
+ // Go through and mark all the tunnels as defined.
+ // Count the highest used tunnel number as well.
+ //
+ config->cluster_highest_tunnelid = 0;
+ for (i = 0, tcount = 0; i < MAXTUNNEL; ++i) {
+ if (tunnel[i].state == TUNNELUNDEF)
+ tunnel[i].state = TUNNELFREE;
+
+ if (tunnel[i].state != TUNNELFREE && i > config->cluster_highest_tunnelid)
+ config->cluster_highest_tunnelid = i;
+ }
+
+ //
+ // Go through and mark all the sessions as being defined.
+ // reset the idle timeouts.
+ // add temporary byte counters to permanent ones.
+ // Re-string the free list.
+ // Find the ID of the highest session.
+ last_free = 0;
+ high_unique_id = 0;
+ config->cluster_highest_sessionid = 0;
+ for (i = 0, count = 0; i < MAXSESSION; ++i) {
+ if (session[i].tunnel == T_UNDEF) {
+ session[i].tunnel = T_FREE;
+ ++count;
+ }
+
+ if (!session[i].opened) { // Unused session. Add to free list.
+ memset(&session[i], 0, sizeof(session[i]));
+ session[i].tunnel = T_FREE;
+ session[last_free].next = i;
+ session[i].next = 0;
+ last_free = i;
+ continue;
+ }
+
+ // Reset all the idle timeouts..
+ session[i].last_packet = time_now;
+
+ // Accumulate un-sent byte counters.
+ session[i].cin += sess_local[i].cin;
+ session[i].cout += sess_local[i].cout;
+ session[i].total_cin += sess_local[i].cin;
+ session[i].total_cout += sess_local[i].cout;
+
+ sess_local[i].cin = sess_local[i].cout = 0;
+
+ session[i].radius = 0; // Reset authentication as the radius blocks aren't up to date.
+
+ if (session[i].unique_id >= high_unique_id) // This is different to the index into the session table!!!
+ high_unique_id = session[i].unique_id+1;
+
+ session[i].tbf_in = session[i].tbf_out = 0; // Remove stale pointers from old master.
+ throttle_session(i, session[i].throttle_in, session[i].throttle_out);
+
+ config->cluster_highest_sessionid = i;
+ }
+
+ session[last_free].next = 0; // End of chain.
+ last_id = high_unique_id; // Keep track of the highest used session ID.
+
+ become_master();
+
+ rebuild_address_pool();
+
+ // If we're not the very first master, this is a big issue!
+ if(count>0)
+ LOG(0, 0, 0, "Warning: Fixed %d uninitialized sessions in becoming master!\n", count);
+
+ config->cluster_undefined_sessions = 0;
+ config->cluster_undefined_tunnels = 0;
+ config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
+
+ if (!num_peers) // lone master
+ advertise();
+
+ // FIXME. We need to fix up the tunnel control message
+ // queue here! There's a number of other variables we
+ // should also update.
+}
+
+
+//
+// Check that our session table is validly matching what the
+// master has in mind.
+//
+// In particular, if we have too many sessions marked 'undefined'
+// we fix it up here, and we ensure that the 'first free session'
+// pointer is valid.
+//
+static void cluster_check_sessions(int highsession, int freesession_ptr, int hightunnel)