+ for (i = 1; i < MAXRADIUS; i++)
+ {
+ if (radius[i].state == RADIUSNULL)
+ continue;
+ if (radius[i].state == RADIUSWAIT)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Radius session %u is still busy (sid %u)\n", i, radius[i].session);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_EPOLL
+# include <sys/epoll.h>
+#else
+# define FAKE_EPOLL_IMPLEMENTATION /* include the functions */
+# include "fake_epoll.h"
+#endif
+
+// the base set of fds polled: cli, cluster, tun, udp, control, dae, netlink
+#define BASE_FDS 7
+
+// additional polled fds
+#ifdef BGP
+# define EXTRA_FDS BGP_NUM_PEERS
+#else
+# define EXTRA_FDS 0
+#endif
+
+// main loop - gets packets on tun or udp and processes them
+static void mainloop(void)
+{
+ int i;
+ uint8_t buf[65536];
+ clockt next_cluster_ping = 0; // send initial ping immediately
+ struct epoll_event events[BASE_FDS + RADIUS_FDS + EXTRA_FDS];
+ int maxevent = sizeof(events)/sizeof(*events);
+
+ if ((epollfd = epoll_create(maxevent)) < 0)
+ {
+ LOG(0, 0, 0, "epoll_create failed: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ LOG(4, 0, 0, "Beginning of main loop. clifd=%d, cluster_sockfd=%d, tunfd=%d, udpfd=%d, controlfd=%d, daefd=%d, nlfd=%d\n",
+ clifd, cluster_sockfd, tunfd, udpfd, controlfd, daefd, nlfd);
+
+ /* setup our fds to poll for input */
+ {
+ static struct event_data d[BASE_FDS];
+ struct epoll_event e;
+
+ e.events = EPOLLIN;
+ i = 0;
+
+ if (clifd >= 0)
+ {
+ d[i].type = FD_TYPE_CLI;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, clifd, &e);
+ }
+
+ d[i].type = FD_TYPE_CLUSTER;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, cluster_sockfd, &e);
+
+ d[i].type = FD_TYPE_TUN;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, tunfd, &e);
+
+ d[i].type = FD_TYPE_UDP;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, udpfd, &e);
+
+ d[i].type = FD_TYPE_CONTROL;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, controlfd, &e);
+
+ d[i].type = FD_TYPE_DAE;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, daefd, &e);
+
+ d[i].type = FD_TYPE_NETLINK;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, nlfd, &e);
+ }
+
+#ifdef BGP
+ signal(SIGPIPE, SIG_IGN);
+ bgp_setup(config->as_number);
+ if (config->bind_address)
+ bgp_add_route(config->bind_address, 0xffffffff);
+
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ {
+ if (config->neighbour[i].name[0])
+ bgp_start(&bgp_peers[i], config->neighbour[i].name,
+ config->neighbour[i].as, config->neighbour[i].keepalive,
+ config->neighbour[i].hold, config->neighbour[i].update_source,
+ 0); /* 0 = routing disabled */
+ }
+#endif /* BGP */
+
+ while (!main_quit || still_busy())
+ {
+ int more = 0;
+ int n;
+
+
+ if (main_reload)
+ {
+ main_reload = 0;
+ read_config_file();
+ config->reload_config++;
+ }
+
+ if (config->reload_config)
+ {
+ config->reload_config = 0;
+ update_config();
+ }
+
+#ifdef BGP
+ bgp_set_poll();
+#endif /* BGP */
+
+ n = epoll_wait(epollfd, events, maxevent, 100); // timeout 100ms (1/10th sec)
+ STAT(select_called);
+
+ TIME = now(NULL);
+ if (n < 0)
+ {
+ if (errno == EINTR ||
+ errno == ECHILD) // EINTR was clobbered by sigchild_handler()
+ continue;
+
+ LOG(0, 0, 0, "Error returned from select(): %s\n", strerror(errno));
+ break; // exit
+ }
+
+ if (n)
+ {
+ struct sockaddr_in addr;
+ struct in_addr local;
+ socklen_t alen;
+ int c, s;
+ int udp_ready = 0;
+ int tun_ready = 0;
+ int cluster_ready = 0;
+ int udp_pkts = 0;
+ int tun_pkts = 0;
+ int cluster_pkts = 0;
+#ifdef BGP
+ uint32_t bgp_events[BGP_NUM_PEERS];
+ memset(bgp_events, 0, sizeof(bgp_events));
+#endif /* BGP */
+
+ for (c = n, i = 0; i < c; i++)
+ {
+ struct event_data *d = events[i].data.ptr;
+
+ switch (d->type)
+ {
+ case FD_TYPE_CLI: // CLI connections
+ {
+ int cli;
+
+ alen = sizeof(addr);
+ if ((cli = accept(clifd, (struct sockaddr *)&addr, &alen)) >= 0)
+ {
+ cli_do(cli);
+ close(cli);
+ }
+ else
+ LOG(0, 0, 0, "accept error: %s\n", strerror(errno));
+
+ n--;
+ break;
+ }
+
+ // these are handled below, with multiple interleaved reads
+ case FD_TYPE_CLUSTER: cluster_ready++; break;
+ case FD_TYPE_TUN: tun_ready++; break;
+ case FD_TYPE_UDP: udp_ready++; break;
+
+ case FD_TYPE_CONTROL: // nsctl commands
+ alen = sizeof(addr);
+ s = recvfromto(controlfd, buf, sizeof(buf), MSG_WAITALL, (struct sockaddr *) &addr, &alen, &local);
+ if (s > 0) processcontrol(buf, s, &addr, alen, &local);
+ n--;
+ break;
+
+ case FD_TYPE_DAE: // DAE requests
+ alen = sizeof(addr);
+ s = recvfromto(daefd, buf, sizeof(buf), MSG_WAITALL, (struct sockaddr *) &addr, &alen, &local);
+ if (s > 0) processdae(buf, s, &addr, alen, &local);
+ n--;
+ break;
+
+ case FD_TYPE_RADIUS: // RADIUS response
+ alen = sizeof(addr);
+ s = recvfrom(radfds[d->index], buf, sizeof(buf), MSG_WAITALL, (struct sockaddr *) &addr, &alen);
+ if (s >= 0 && config->cluster_iam_master)
+ {
+ if (addr.sin_addr.s_addr == config->radiusserver[0] ||
+ addr.sin_addr.s_addr == config->radiusserver[1])
+ processrad(buf, s, d->index);
+ else
+ LOG(3, 0, 0, "Dropping RADIUS packet from unknown source %s\n",
+ fmtaddr(addr.sin_addr.s_addr, 0));
+ }
+
+ n--;
+ break;
+
+#ifdef BGP
+ case FD_TYPE_BGP:
+ bgp_events[d->index] = events[i].events;
+ n--;
+ break;
+#endif /* BGP */
+
+ case FD_TYPE_NETLINK:
+ {
+ struct nlmsghdr *nh = (struct nlmsghdr *)buf;
+ s = netlink_recv(buf, sizeof(buf));
+ if (nh->nlmsg_type == NLMSG_ERROR)
+ {
+ struct nlmsgerr *errmsg = NLMSG_DATA(nh);
+ if (errmsg->error)
+ {
+ if (errmsg->msg.nlmsg_seq < min_initok_nlseqnum)
+ {
+ LOG(0, 0, 0, "Got a fatal netlink error (while %s): %s\n", tun_nl_phase_msg[nh->nlmsg_seq], strerror(-errmsg->error));
+ exit(1);
+ }
+ else
+
+ LOG(0, 0, 0, "Got a netlink error: %s\n", strerror(-errmsg->error));
+ }
+ // else it's a ack
+ }
+ else
+ LOG(1, 0, 0, "Got a unknown netlink message: type %d seq %d flags %d\n", nh->nlmsg_type, nh->nlmsg_seq, nh->nlmsg_flags);
+ n--;
+ break;
+ }
+
+ default:
+ LOG(0, 0, 0, "Unexpected fd type returned from epoll_wait: %d\n", d->type);
+ }
+ }
+
+#ifdef BGP
+ bgp_process(bgp_events);
+#endif /* BGP */
+
+ for (c = 0; n && c < config->multi_read_count; c++)
+ {
+ // L2TP
+ if (udp_ready)
+ {
+ alen = sizeof(addr);
+ if ((s = recvfrom(udpfd, buf, sizeof(buf), 0, (void *) &addr, &alen)) > 0)
+ {
+ processudp(buf, s, &addr);
+ udp_pkts++;
+ }
+ else
+ {
+ udp_ready = 0;
+ n--;
+ }
+ }
+
+ // incoming IP
+ if (tun_ready)
+ {
+ if ((s = read(tunfd, buf, sizeof(buf))) > 0)
+ {
+ processtun(buf, s);
+ tun_pkts++;
+ }
+ else
+ {
+ tun_ready = 0;
+ n--;
+ }
+ }
+
+ // cluster
+ if (cluster_ready)
+ {
+ alen = sizeof(addr);
+ if ((s = recvfrom(cluster_sockfd, buf, sizeof(buf), MSG_WAITALL, (void *) &addr, &alen)) > 0)
+ {
+ processcluster(buf, s, addr.sin_addr.s_addr);
+ cluster_pkts++;
+ }
+ else
+ {
+ cluster_ready = 0;
+ n--;
+ }
+ }
+ }
+
+ if (udp_pkts > 1 || tun_pkts > 1 || cluster_pkts > 1)
+ STAT(multi_read_used);
+
+ if (c >= config->multi_read_count)
+ {
+ LOG(3, 0, 0, "Reached multi_read_count (%d); processed %d udp, %d tun and %d cluster packets\n",
+ config->multi_read_count, udp_pkts, tun_pkts, cluster_pkts);
+
+ STAT(multi_read_exceeded);
+ more++;
+ }
+ }
+#ifdef BGP
+ else
+ /* no event received, but timers could still have expired */
+ bgp_process_peers_timers();
+#endif /* BGP */
+
+ if (time_changed)
+ {
+ double Mbps = 1024.0 * 1024.0 / 8 * time_changed;
+
+ // Log current traffic stats
+ snprintf(config->bandwidth, sizeof(config->bandwidth),
+ "UDP-ETH:%1.0f/%1.0f ETH-UDP:%1.0f/%1.0f TOTAL:%0.1f IN:%u OUT:%u",
+ (udp_rx / Mbps), (eth_tx / Mbps), (eth_rx / Mbps), (udp_tx / Mbps),
+ ((udp_tx + udp_rx + eth_tx + eth_rx) / Mbps),
+ udp_rx_pkt / time_changed, eth_rx_pkt / time_changed);
+
+ udp_tx = udp_rx = 0;
+ udp_rx_pkt = eth_rx_pkt = 0;
+ eth_tx = eth_rx = 0;
+ time_changed = 0;
+
+ if (config->dump_speed)
+ printf("%s\n", config->bandwidth);
+
+ // Update the internal time counter
+ strftime(time_now_string, sizeof(time_now_string), "%Y-%m-%d %H:%M:%S", localtime(&time_now));
+
+ {
+ // Run timer hooks
+ struct param_timer p = { time_now };
+ run_plugins(PLUGIN_TIMER, &p);
+ }
+ }
+
+ // Runs on every machine (master and slaves).
+ if (next_cluster_ping <= TIME)
+ {
+ // Check to see which of the cluster is still alive..
+
+ cluster_send_ping(basetime); // Only does anything if we're a slave
+ cluster_check_master(); // ditto.
+
+ cluster_heartbeat(); // Only does anything if we're a master.
+ cluster_check_slaves(); // ditto.
+
+ master_update_counts(); // If we're a slave, send our byte counters to our master.
+
+ if (config->cluster_iam_master && !config->cluster_iam_uptodate)
+ next_cluster_ping = TIME + 1; // out-of-date slaves, do fast updates
+ else
+ next_cluster_ping = TIME + config->cluster_hb_interval;
+ }
+
+ if (!config->cluster_iam_master)
+ continue;
+
+ // Run token bucket filtering queue..
+ // Only run it every 1/10th of a second.
+ {
+ static clockt last_run = 0;
+ if (last_run != TIME)
+ {
+ last_run = TIME;
+ tbf_run_timer();
+ }
+ }
+
+ // Handle timeouts, retries etc.
+ {
+ static double last_clean = 0;
+ double this_clean;
+ double diff;
+
+ TIME = now(&this_clean);
+ diff = this_clean - last_clean;
+
+ // Run during idle time (after we've handled
+ // all incoming packets) or every 1/10th sec
+ if (!more || diff > 0.1)
+ {
+ regular_cleanups(diff);
+ last_clean = this_clean;
+ }
+ }
+
+ if (*config->accounting_dir)
+ {
+ static clockt next_acct = 0;
+ static clockt next_shut_acct = 0;
+
+ if (next_acct <= TIME)
+ {
+ // Dump accounting data
+ next_acct = TIME + ACCT_TIME;
+ next_shut_acct = TIME + ACCT_SHUT_TIME;
+ dump_acct_info(1);
+ }
+ else if (next_shut_acct <= TIME)
+ {
+ // Dump accounting data for shutdown sessions
+ next_shut_acct = TIME + ACCT_SHUT_TIME;
+ if (shut_acct_n)
+ dump_acct_info(0);
+ }
+ }
+ }
+
+ // Are we the master and shutting down??
+ if (config->cluster_iam_master)
+ cluster_heartbeat(); // Flush any queued changes..
+
+ // Ok. Notify everyone we're shutting down. If we're
+ // the master, this will force an election.
+ cluster_send_ping(0);
+
+ //
+ // Important!!! We MUST not process any packets past this point!
+ LOG(1, 0, 0, "Shutdown complete\n");
+}
+
+static void stripdomain(char *host)
+{
+ char *p;
+
+ if ((p = strchr(host, '.')))
+ {
+ char *domain = 0;
+ char _domain[1024];
+
+ // strip off domain
+ FILE *resolv = fopen("/etc/resolv.conf", "r");
+ if (resolv)
+ {
+ char buf[1024];
+ char *b;
+
+ while (fgets(buf, sizeof(buf), resolv))
+ {
+ if (strncmp(buf, "domain", 6) && strncmp(buf, "search", 6))
+ continue;
+
+ if (!isspace(buf[6]))
+ continue;
+
+ b = buf + 7;
+ while (isspace(*b)) b++;
+
+ if (*b)
+ {
+ char *d = b;
+ while (*b && !isspace(*b)) b++;
+ *b = 0;
+ if (buf[0] == 'd') // domain is canonical
+ {
+ domain = d;
+ break;
+ }
+
+ // first search line
+ if (!domain)
+ {
+ // hold, may be subsequent domain line
+ strncpy(_domain, d, sizeof(_domain))[sizeof(_domain)-1] = 0;
+ domain = _domain;
+ }
+ }
+ }
+
+ fclose(resolv);
+ }
+
+ if (domain)
+ {
+ int hl = strlen(host);
+ int dl = strlen(domain);
+ if (dl < hl && host[hl - dl - 1] == '.' && !strcmp(host + hl - dl, domain))
+ host[hl -dl - 1] = 0;
+ }
+ else
+ {
+ *p = 0; // everything after first dot
+ }
+ }
+}
+
+// Init data structures
+static void initdata(int optdebug, char *optconfig)
+{
+ int i;
+
+ if (!(config = shared_malloc(sizeof(configt))))
+ {
+ fprintf(stderr, "Error doing malloc for configuration: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ memset(config, 0, sizeof(configt));
+ time(&config->start_time);
+ strncpy(config->config_file, optconfig, strlen(optconfig));
+ config->debug = optdebug;
+ config->num_tbfs = MAXTBFS;
+ config->rl_rate = 28; // 28kbps
+ config->cluster_mcast_ttl = 1;
+ config->cluster_master_min_adv = 1;
+ config->ppp_restart_time = 3;
+ config->ppp_max_configure = 10;
+ config->ppp_max_failure = 5;
+ config->kill_timedout_sessions = 1;
+ strcpy(config->random_device, RANDOMDEVICE);
+
+ log_stream = stderr;
+
+#ifdef RINGBUFFER
+ if (!(ringbuffer = shared_malloc(sizeof(struct Tringbuffer))))
+ {
+ LOG(0, 0, 0, "Error doing malloc for ringbuffer: %s\n", strerror(errno));
+ exit(1);
+ }
+ memset(ringbuffer, 0, sizeof(struct Tringbuffer));
+#endif
+
+ if (!(_statistics = shared_malloc(sizeof(struct Tstats))))
+ {
+ LOG(0, 0, 0, "Error doing malloc for _statistics: %s\n", strerror(errno));
+ exit(1);
+ }
+ if (!(tunnel = shared_malloc(sizeof(tunnelt) * MAXTUNNEL)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for tunnels: %s\n", strerror(errno));
+ exit(1);
+ }
+ if (!(bundle = shared_malloc(sizeof(bundlet) * MAXBUNDLE)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for bundles: %s\n", strerror(errno));
+ exit(1);
+ }
+ if (!(frag = shared_malloc(sizeof(fragmentationt) * MAXBUNDLE)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for fragmentations: %s\n", strerror(errno));
+ exit(1);
+ }
+ if (!(session = shared_malloc(sizeof(sessiont) * MAXSESSION)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for sessions: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (!(sess_local = shared_malloc(sizeof(sessionlocalt) * MAXSESSION)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for sess_local: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (!(radius = shared_malloc(sizeof(radiust) * MAXRADIUS)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for radius: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (!(ip_address_pool = shared_malloc(sizeof(ippoolt) * MAXIPPOOL)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for ip_address_pool: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (!(ip_filters = shared_malloc(sizeof(ip_filtert) * MAXFILTER)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for ip_filters: %s\n", strerror(errno));
+ exit(1);
+ }
+ memset(ip_filters, 0, sizeof(ip_filtert) * MAXFILTER);
+
+ if (!(cli_session_actions = shared_malloc(sizeof(struct cli_session_actions) * MAXSESSION)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for cli session actions: %s\n", strerror(errno));
+ exit(1);
+ }
+ memset(cli_session_actions, 0, sizeof(struct cli_session_actions) * MAXSESSION);
+
+ if (!(cli_tunnel_actions = shared_malloc(sizeof(struct cli_tunnel_actions) * MAXSESSION)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for cli tunnel actions: %s\n", strerror(errno));
+ exit(1);
+ }
+ memset(cli_tunnel_actions, 0, sizeof(struct cli_tunnel_actions) * MAXSESSION);
+
+ memset(tunnel, 0, sizeof(tunnelt) * MAXTUNNEL);
+ memset(bundle, 0, sizeof(bundlet) * MAXBUNDLE);
+ memset(session, 0, sizeof(sessiont) * MAXSESSION);
+ memset(radius, 0, sizeof(radiust) * MAXRADIUS);
+ memset(ip_address_pool, 0, sizeof(ippoolt) * MAXIPPOOL);
+
+ // Put all the sessions on the free list marked as undefined.
+ for (i = 1; i < MAXSESSION; i++)
+ {
+ session[i].next = i + 1;
+ session[i].tunnel = T_UNDEF; // mark it as not filled in.
+ }
+ session[MAXSESSION - 1].next = 0;
+ sessionfree = 1;
+
+ // Mark all the tunnels as undefined (waiting to be filled in by a download).
+ for (i = 1; i < MAXTUNNEL; i++)
+ tunnel[i].state = TUNNELUNDEF; // mark it as not filled in.
+
+ for (i = 1; i < MAXBUNDLE; i++) {
+ bundle[i].state = BUNDLEUNDEF;
+ }
+
+ if (!*hostname)
+ {
+ if (!*config->hostname)
+ {
+ // Grab my hostname unless it's been specified
+ gethostname(hostname, sizeof(hostname));
+ stripdomain(hostname);
+ }
+ else
+ strcpy(hostname, config->hostname);
+ }
+
+ _statistics->start_time = _statistics->last_reset = time(NULL);
+
+#ifdef BGP
+ if (!(bgp_peers = shared_malloc(sizeof(struct bgp_peer) * BGP_NUM_PEERS)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for bgp: %s\n", strerror(errno));
+ exit(1);
+ }
+#endif /* BGP */
+}
+
+static int assign_ip_address(sessionidt s)
+{
+ uint32_t i;
+ int best = -1;
+ time_t best_time = time_now;
+ char *u = session[s].user;
+ char reuse = 0;
+
+
+ CSTAT(assign_ip_address);
+
+ for (i = 1; i < ip_pool_size; i++)
+ {
+ if (!ip_address_pool[i].address || ip_address_pool[i].assigned)
+ continue;
+
+ if (!session[s].walled_garden && ip_address_pool[i].user[0] && !strcmp(u, ip_address_pool[i].user))
+ {
+ best = i;
+ reuse = 1;
+ break;
+ }
+
+ if (ip_address_pool[i].last < best_time)
+ {
+ best = i;
+ if (!(best_time = ip_address_pool[i].last))
+ break; // never used, grab this one
+ }
+ }
+
+ if (best < 0)
+ {
+ LOG(0, s, session[s].tunnel, "assign_ip_address(): out of addresses\n");
+ return 0;
+ }
+
+ session[s].ip = ip_address_pool[best].address;