+
+ for (i = 0; i < s_slice; i++)
+ {
+ s++;
+ if (s > config->cluster_highest_sessionid)
+ s = 1;
+
+ if (!session[s].opened) // Session isn't in use
+ continue;
+
+ // check for expired sessions
+ if (session[s].die)
+ {
+ if (session[s].die <= TIME)
+ {
+ sessionkill(s, "Expired");
+ s_actions++;
+ }
+ continue;
+ }
+
+ // PPP timeouts
+ if (sess_local[s].lcp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.lcp;
+ switch (session[s].ppp.lcp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].lcp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for LCP ConfigReq... resending\n");
+ sendlcp(s, session[s].tunnel);
+ change_state(s, lcp, next_state);
+ }
+ else
+ {
+ sessionshutdown(s, "No response to LCP ConfigReq.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ }
+
+ s_actions++;
+ }
+
+ if (session[s].die)
+ continue;
+ }
+
+ if (sess_local[s].ipcp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ipcp;
+ switch (session[s].ppp.ipcp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ipcp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPCP ConfigReq... resending\n");
+ sendipcp(s, session[s].tunnel);
+ change_state(s, ipcp, next_state);
+ }
+ else
+ {
+ sessionshutdown(s, "No response to IPCP ConfigReq.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ }
+
+ s_actions++;
+ }
+
+ if (session[s].die)
+ continue;
+ }
+
+ if (sess_local[s].ipv6cp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ipv6cp;
+ switch (session[s].ppp.ipv6cp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ipv6cp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPV6CP ConfigReq... resending\n");
+ sendipv6cp(s, session[s].tunnel);
+ change_state(s, ipv6cp, next_state);
+ }
+ else
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPV6CP ConfigReq\n");
+ change_state(s, ipv6cp, Stopped);
+ }
+
+ s_actions++;
+ }
+ }
+
+ if (sess_local[s].ccp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ccp;
+ switch (session[s].ppp.ccp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ccp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for CCP ConfigReq... resending\n");
+ sendccp(s, session[s].tunnel);
+ change_state(s, ccp, next_state);
+ }
+ else
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for CCP ConfigReq\n");
+ change_state(s, ccp, Stopped);
+ }
+
+ s_actions++;
+ }
+ }
+
+ // Drop sessions who have not responded within IDLE_TIMEOUT seconds
+ if (session[s].last_packet && (time_now - session[s].last_packet >= IDLE_TIMEOUT))
+ {
+ sessionshutdown(s, "No response to LCP ECHO requests.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // No data in ECHO_TIMEOUT seconds, send LCP ECHO
+ if (session[s].ppp.phase >= Establish && (time_now - session[s].last_packet >= ECHO_TIMEOUT) &&
+ (time_now - sess_local[s].last_echo >= ECHO_TIMEOUT))
+ {
+ uint8_t b[MAXETHER];
+
+ uint8_t *q = makeppp(b, sizeof(b), 0, 0, s, session[s].tunnel, PPPLCP, 1, 0, 0);
+ if (!q) continue;
+
+ *q = EchoReq;
+ *(uint8_t *)(q + 1) = (time_now % 255); // ID
+ *(uint16_t *)(q + 2) = htons(8); // Length
+ *(uint32_t *)(q + 4) = session[s].ppp.lcp == Opened ? htonl(session[s].magic) : 0; // Magic Number
+
+ LOG(4, s, session[s].tunnel, "No data in %d seconds, sending LCP ECHO\n",
+ (int)(time_now - session[s].last_packet));
+ tunnelsend(b, 24, session[s].tunnel); // send it
+ sess_local[s].last_echo = time_now;
+ s_actions++;
+ }
+
+ // Drop sessions who have reached session_timeout seconds
+ if (session[s].session_timeout)
+ {
+ bundleidt bid = session[s].bundle;
+ if (bid)
+ {
+ if (time_now - bundle[bid].last_check >= 1)
+ {
+ bundle[bid].online_time += (time_now - bundle[bid].last_check) * bundle[bid].num_of_links;
+ bundle[bid].last_check = time_now;
+ if (bundle[bid].online_time >= session[s].session_timeout)
+ {
+ int ses;
+ for (ses = bundle[bid].num_of_links - 1; ses >= 0; ses--)
+ {
+ sessionshutdown(bundle[bid].members[ses], "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+ }
+ }
+ else if (time_now - session[s].opened >= session[s].session_timeout)
+ {
+ sessionshutdown(s, "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+
+ // Drop sessions who have reached idle_timeout seconds
+ if (session[s].last_data && session[s].idle_timeout && (time_now - session[s].last_data >= session[s].idle_timeout))
+ {
+ sessionshutdown(s, "Idle Timeout Reached", CDN_ADMIN_DISC, TERM_IDLE_TIMEOUT);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // Check for actions requested from the CLI
+ if ((a = cli_session_actions[s].action))
+ {
+ int send = 0;
+
+ cli_session_actions[s].action = 0;
+ if (a & CLI_SESS_KILL)
+ {
+ LOG(2, s, session[s].tunnel, "Dropping session by CLI\n");
+ sessionshutdown(s, "Requested by administrator.", CDN_ADMIN_DISC, TERM_ADMIN_RESET);
+ a = 0; // dead, no need to check for other actions
+ s_actions++;
+ }
+
+ if (a & CLI_SESS_NOSNOOP)
+ {
+ LOG(2, s, session[s].tunnel, "Unsnooping session by CLI\n");
+ session[s].snoop_ip = 0;
+ session[s].snoop_port = 0;
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_SNOOP)
+ {
+ LOG(2, s, session[s].tunnel, "Snooping session by CLI (to %s:%u)\n",
+ fmtaddr(cli_session_actions[s].snoop_ip, 0),
+ cli_session_actions[s].snoop_port);
+
+ session[s].snoop_ip = cli_session_actions[s].snoop_ip;
+ session[s].snoop_port = cli_session_actions[s].snoop_port;
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOTHROTTLE)
+ {
+ LOG(2, s, session[s].tunnel, "Un-throttling session by CLI\n");
+ throttle_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_THROTTLE)
+ {
+ LOG(2, s, session[s].tunnel, "Throttling session by CLI (to %dkb/s up and %dkb/s down)\n",
+ cli_session_actions[s].throttle_in,
+ cli_session_actions[s].throttle_out);
+
+ throttle_session(s, cli_session_actions[s].throttle_in, cli_session_actions[s].throttle_out);
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOFILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Un-filtering session by CLI\n");
+ filter_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_FILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Filtering session by CLI (in=%d, out=%d)\n",
+ cli_session_actions[s].filter_in,
+ cli_session_actions[s].filter_out);
+
+ filter_session(s, cli_session_actions[s].filter_in, cli_session_actions[s].filter_out);
+ s_actions++;
+ send++;
+ }
+
+ if (send)
+ cluster_send_session(s);
+ }
+
+ // RADIUS interim accounting
+ if (config->radius_accounting && config->radius_interim > 0
+ && session[s].ip && !session[s].walled_garden
+ && !sess_local[s].radius // RADIUS already in progress
+ && time_now - sess_local[s].last_interim >= config->radius_interim)
+ {
+ int rad = radiusnew(s);
+ if (!rad)
+ {
+ LOG(1, s, session[s].tunnel, "No free RADIUS sessions for Interim message\n");
+ STAT(radius_overflow);
+ continue;
+ }
+
+ LOG(3, s, session[s].tunnel, "Sending RADIUS Interim for %s (%u)\n",
+ session[s].user, session[s].unique_id);
+
+ radiussend(rad, RADIUSINTERIM);
+ sess_local[s].last_interim = time_now;
+ s_actions++;
+ }
+ }
+
+ LOG(4, 0, 0, "End regular cleanup: checked %d/%d/%d tunnels/radius/sessions; %d/%d/%d actions\n",
+ t_slice, r_slice, s_slice, t_actions, r_actions, s_actions);
+}
+
+//
+// Are we in the middle of a tunnel update, or radius
+// requests??
+//
+static int still_busy(void)
+{
+ int i;
+ static clockt last_talked = 0;
+ static clockt start_busy_wait = 0;
+
+ if (!config->cluster_iam_master)
+ {
+#ifdef BGP
+ static time_t stopped_bgp = 0;
+ if (bgp_configured)
+ {
+ if (!stopped_bgp)
+ {
+ LOG(1, 0, 0, "Shutting down in %d seconds, stopping BGP...\n", QUIT_DELAY);
+
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ if (bgp_peers[i].state == Established)
+ bgp_stop(&bgp_peers[i]);
+
+ stopped_bgp = time_now;
+
+ // we don't want to become master
+ cluster_send_ping(0);
+
+ return 1;
+ }
+
+ if (time_now < (stopped_bgp + QUIT_DELAY))
+ return 1;
+ }
+#endif /* BGP */
+
+ return 0;
+ }
+
+ if (main_quit == QUIT_SHUTDOWN)
+ {
+ static int dropped = 0;
+ if (!dropped)
+ {
+ int i;
+
+ LOG(1, 0, 0, "Dropping sessions and tunnels\n");
+ for (i = 1; i < MAXTUNNEL; i++)
+ if (tunnel[i].ip || tunnel[i].state)
+ tunnelshutdown(i, "L2TPNS Closing", 6, 0, 0);
+
+ dropped = 1;
+ }
+ }
+
+ if (start_busy_wait == 0)
+ start_busy_wait = TIME;
+
+ for (i = config->cluster_highest_tunnelid ; i > 0 ; --i)
+ {
+ if (!tunnel[i].controlc)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Tunnel %u still has un-acked control messages.\n", i);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ // We stop waiting for radius after BUSY_WAIT_TIME 1/10th seconds
+ if (abs(TIME - start_busy_wait) > BUSY_WAIT_TIME)
+ {
+ LOG(1, 0, 0, "Giving up waiting for RADIUS to be empty. Shutting down anyway.\n");
+ return 0;
+ }
+
+ for (i = 1; i < MAXRADIUS; i++)
+ {
+ if (radius[i].state == RADIUSNULL)
+ continue;
+ if (radius[i].state == RADIUSWAIT)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Radius session %u is still busy (sid %u)\n", i, radius[i].session);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ return 0;