+ else if (*(uint16_t *) (buf + 2) == htons(PKTIPV6) // IPV6
+ && config->ipv6_prefix.s6_addr[0])
+ processipv6out(buf, len);
+
+ // Else discard.
+}
+
+// Handle retries, timeouts. Runs every 1/10th sec, want to ensure
+// that we look at the whole of the tunnel, radius and session tables
+// every second
+static void regular_cleanups(double period)
+{
+ // Next tunnel, radius and session to check for actions on.
+ static tunnelidt t = 0;
+ static int r = 0;
+ static sessionidt s = 0;
+
+ int t_actions = 0;
+ int r_actions = 0;
+ int s_actions = 0;
+
+ int t_slice;
+ int r_slice;
+ int s_slice;
+
+ int i;
+ int a;
+
+ // divide up tables into slices based on the last run
+ t_slice = config->cluster_highest_tunnelid * period;
+ r_slice = (MAXRADIUS - 1) * period;
+ s_slice = config->cluster_highest_sessionid * period;
+
+ if (t_slice < 1)
+ t_slice = 1;
+ else if (t_slice > config->cluster_highest_tunnelid)
+ t_slice = config->cluster_highest_tunnelid;
+
+ if (r_slice < 1)
+ r_slice = 1;
+ else if (r_slice > (MAXRADIUS - 1))
+ r_slice = MAXRADIUS - 1;
+
+ if (s_slice < 1)
+ s_slice = 1;
+ else if (s_slice > config->cluster_highest_sessionid)
+ s_slice = config->cluster_highest_sessionid;
+
+ LOG(4, 0, 0, "Begin regular cleanup (last %f seconds ago)\n", period);
+
+ for (i = 0; i < t_slice; i++)
+ {
+ t++;
+ if (t > config->cluster_highest_tunnelid)
+ t = 1;
+
+ // check for expired tunnels
+ if (tunnel[t].die && tunnel[t].die <= TIME)
+ {
+ STAT(tunnel_timeout);
+ tunnelkill(t, "Expired");
+ t_actions++;
+ continue;
+ }
+ // check for message resend
+ if (tunnel[t].retry && tunnel[t].controlc)
+ {
+ // resend pending messages as timeout on reply
+ if (tunnel[t].retry <= TIME)
+ {
+ controlt *c = tunnel[t].controls;
+ uint8_t w = tunnel[t].window;
+ tunnel[t].try++; // another try
+ if (tunnel[t].try > 5)
+ tunnelkill(t, "Timeout on control message"); // game over
+ else
+ while (c && w--)
+ {
+ tunnelsend(c->buf, c->length, t);
+ c = c->next;
+ }
+
+ t_actions++;
+ }
+ }
+ // Send hello
+ if (tunnel[t].state == TUNNELOPEN && !tunnel[t].controlc && (time_now - tunnel[t].lastrec) > 60)
+ {
+ controlt *c = controlnew(6); // sending HELLO
+ controladd(c, 0, t); // send the message
+ LOG(3, 0, t, "Sending HELLO message\n");
+ t_actions++;
+ }
+
+ // Check for tunnel changes requested from the CLI
+ if ((a = cli_tunnel_actions[t].action))
+ {
+ cli_tunnel_actions[t].action = 0;
+ if (a & CLI_TUN_KILL)
+ {
+ LOG(2, 0, t, "Dropping tunnel by CLI\n");
+ tunnelshutdown(t, "Requested by administrator", 1, 0, 0);
+ t_actions++;
+ }
+ }
+ }
+
+ for (i = 0; i < r_slice; i++)
+ {
+ r++;
+ if (r >= MAXRADIUS)
+ r = 1;
+
+ if (!radius[r].state)
+ continue;
+
+ if (radius[r].retry <= TIME)
+ {
+ radiusretry(r);
+ r_actions++;
+ }
+ }
+
+ for (i = 0; i < s_slice; i++)
+ {
+ s++;
+ if (s > config->cluster_highest_sessionid)
+ s = 1;
+
+ if (!session[s].opened) // Session isn't in use
+ continue;
+
+ // check for expired sessions
+ if (session[s].die)
+ {
+ if (session[s].die <= TIME)
+ {
+ sessionkill(s, "Expired");
+ s_actions++;
+ }
+ continue;
+ }
+
+ // check for timed out sessions
+ if (session[s].timeout)
+ {
+ bundleidt bid = session[s].bundle;
+ if (bid)
+ {
+ clockt curr_time = time_now;
+ if (curr_time - bundle[bid].last_check >= 1)
+ {
+ bundle[bid].online_time += (curr_time-bundle[bid].last_check)*bundle[bid].num_of_links;
+ bundle[bid].last_check = curr_time;
+ if (bundle[bid].online_time >= session[s].timeout)
+ {
+ int ses;
+ for (ses = bundle[bid].num_of_links - 1; ses >= 0; ses--)
+ {
+ sessionshutdown(bundle[bid].members[ses], "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+ }
+ }
+ else if (session[s].timeout <= time_now - session[s].opened)
+ {
+ sessionshutdown(s, "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+
+ // PPP timeouts
+ if (sess_local[s].lcp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.lcp;
+ switch (session[s].ppp.lcp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].lcp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for LCP ConfigReq... resending\n");
+ sendlcp(s, session[s].tunnel);
+ change_state(s, lcp, next_state);
+ }
+ else
+ {
+ sessionshutdown(s, "No response to LCP ConfigReq.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ }
+
+ s_actions++;
+ }
+
+ if (session[s].die)
+ continue;
+ }
+
+ if (sess_local[s].ipcp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ipcp;
+ switch (session[s].ppp.ipcp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ipcp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPCP ConfigReq... resending\n");
+ sendipcp(s, session[s].tunnel);
+ change_state(s, ipcp, next_state);
+ }
+ else
+ {
+ sessionshutdown(s, "No response to IPCP ConfigReq.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ }
+
+ s_actions++;
+ }
+
+ if (session[s].die)
+ continue;
+ }
+
+ if (sess_local[s].ipv6cp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ipv6cp;
+ switch (session[s].ppp.ipv6cp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ipv6cp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPV6CP ConfigReq... resending\n");
+ sendipv6cp(s, session[s].tunnel);
+ change_state(s, ipv6cp, next_state);
+ }
+ else
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPV6CP ConfigReq\n");
+ change_state(s, ipv6cp, Stopped);
+ }
+
+ s_actions++;
+ }
+ }
+
+ if (sess_local[s].ccp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ccp;
+ switch (session[s].ppp.ccp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ccp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for CCP ConfigReq... resending\n");
+ sendccp(s, session[s].tunnel);
+ change_state(s, ccp, next_state);
+ }
+ else
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for CCP ConfigReq\n");
+ change_state(s, ccp, Stopped);
+ }
+
+ s_actions++;
+ }
+ }
+
+ // Drop sessions who have not responded within IDLE_TIMEOUT seconds
+ if (session[s].last_packet && (time_now - session[s].last_packet >= IDLE_TIMEOUT))
+ {
+ sessionshutdown(s, "No response to LCP ECHO requests.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // No data in ECHO_TIMEOUT seconds, send LCP ECHO
+ if (session[s].ppp.phase >= Establish && (time_now - session[s].last_packet >= ECHO_TIMEOUT) &&
+ (time_now - sess_local[s].last_echo >= ECHO_TIMEOUT))
+ {
+ uint8_t b[MAXETHER];
+
+ uint8_t *q = makeppp(b, sizeof(b), 0, 0, s, session[s].tunnel, PPPLCP, 1, 0, 0);
+ if (!q) continue;
+
+ *q = EchoReq;
+ *(uint8_t *)(q + 1) = (time_now % 255); // ID
+ *(uint16_t *)(q + 2) = htons(8); // Length
+ *(uint32_t *)(q + 4) = session[s].ppp.lcp == Opened ? htonl(session[s].magic) : 0; // Magic Number
+
+ LOG(4, s, session[s].tunnel, "No data in %d seconds, sending LCP ECHO\n",
+ (int)(time_now - session[s].last_packet));
+ tunnelsend(b, 24, session[s].tunnel); // send it
+ sess_local[s].last_echo = time_now;
+ s_actions++;
+ }
+
+ // Drop sessions who have reached session_timeout seconds
+ if (session[s].session_timeout && (time_now - session[s].opened >= session[s].session_timeout))
+ {
+ sessionshutdown(s, "Session Timeout Reached", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // Drop sessions who have reached idle_timeout seconds
+ if (session[s].last_data && session[s].idle_timeout && (time_now - session[s].last_data >= session[s].idle_timeout))
+ {
+ sessionshutdown(s, "Idle Timeout Reached", CDN_ADMIN_DISC, TERM_IDLE_TIMEOUT);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // Check for actions requested from the CLI
+ if ((a = cli_session_actions[s].action))
+ {
+ int send = 0;
+
+ cli_session_actions[s].action = 0;
+ if (a & CLI_SESS_KILL)
+ {
+ LOG(2, s, session[s].tunnel, "Dropping session by CLI\n");
+ sessionshutdown(s, "Requested by administrator.", CDN_ADMIN_DISC, TERM_ADMIN_RESET);
+ a = 0; // dead, no need to check for other actions
+ s_actions++;
+ }
+
+ if (a & CLI_SESS_NOSNOOP)
+ {
+ LOG(2, s, session[s].tunnel, "Unsnooping session by CLI\n");
+ session[s].snoop_ip = 0;
+ session[s].snoop_port = 0;
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_SNOOP)
+ {
+ LOG(2, s, session[s].tunnel, "Snooping session by CLI (to %s:%u)\n",
+ fmtaddr(cli_session_actions[s].snoop_ip, 0),
+ cli_session_actions[s].snoop_port);
+
+ session[s].snoop_ip = cli_session_actions[s].snoop_ip;
+ session[s].snoop_port = cli_session_actions[s].snoop_port;
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOTHROTTLE)
+ {
+ LOG(2, s, session[s].tunnel, "Un-throttling session by CLI\n");
+ throttle_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_THROTTLE)
+ {
+ LOG(2, s, session[s].tunnel, "Throttling session by CLI (to %dkb/s up and %dkb/s down)\n",
+ cli_session_actions[s].throttle_in,
+ cli_session_actions[s].throttle_out);
+
+ throttle_session(s, cli_session_actions[s].throttle_in, cli_session_actions[s].throttle_out);
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOFILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Un-filtering session by CLI\n");
+ filter_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_FILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Filtering session by CLI (in=%d, out=%d)\n",
+ cli_session_actions[s].filter_in,
+ cli_session_actions[s].filter_out);
+
+ filter_session(s, cli_session_actions[s].filter_in, cli_session_actions[s].filter_out);
+ s_actions++;
+ send++;
+ }
+
+ if (send)
+ cluster_send_session(s);
+ }
+
+ // RADIUS interim accounting
+ if (config->radius_accounting && config->radius_interim > 0
+ && session[s].ip && !session[s].walled_garden
+ && !sess_local[s].radius // RADIUS already in progress
+ && time_now - sess_local[s].last_interim >= config->radius_interim)
+ {
+ int rad = radiusnew(s);
+ if (!rad)
+ {
+ LOG(1, s, session[s].tunnel, "No free RADIUS sessions for Interim message\n");
+ STAT(radius_overflow);
+ continue;
+ }
+
+ LOG(3, s, session[s].tunnel, "Sending RADIUS Interim for %s (%u)\n",
+ session[s].user, session[s].unique_id);
+
+ radiussend(rad, RADIUSINTERIM);
+ sess_local[s].last_interim = time_now;
+ s_actions++;
+ }
+ }
+
+ LOG(4, 0, 0, "End regular cleanup: checked %d/%d/%d tunnels/radius/sessions; %d/%d/%d actions\n",
+ t_slice, r_slice, s_slice, t_actions, r_actions, s_actions);
+}
+
+//
+// Are we in the middle of a tunnel update, or radius
+// requests??
+//
+static int still_busy(void)
+{
+ int i;
+ static clockt last_talked = 0;
+ static clockt start_busy_wait = 0;
+
+ if (!config->cluster_iam_master)