+ if (!radfds[i]) continue;
+ FD_SET(radfds[i], &cr);
+ if (radfds[i] > cn)
+ cn = radfds[i];
+ }
+
+ while (!main_quit || still_busy())
+ {
+ fd_set r;
+ int n = cn;
+#ifdef BGP
+ fd_set w;
+ int bgp_set[BGP_NUM_PEERS];
+#endif /* BGP */
+
+ if (config->reload_config)
+ {
+ // Update the config state based on config settings
+ update_config();
+ }
+
+ memcpy(&r, &cr, sizeof(fd_set));
+ to.tv_sec = 0;
+ to.tv_usec = 100000; // 1/10th of a second.
+
+#ifdef BGP
+ FD_ZERO(&w);
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ {
+ bgp_set[i] = bgp_select_state(&bgp_peers[i]);
+ if (bgp_set[i] & 1)
+ {
+ FD_SET(bgp_peers[i].sock, &r);
+ if (bgp_peers[i].sock > n)
+ n = bgp_peers[i].sock;
+ }
+
+ if (bgp_set[i] & 2)
+ {
+ FD_SET(bgp_peers[i].sock, &w);
+ if (bgp_peers[i].sock > n)
+ n = bgp_peers[i].sock;
+ }
+ }
+
+ n = select(n + 1, &r, &w, 0, &to);
+#else /* BGP */
+ n = select(n + 1, &r, 0, 0, &to);
+#endif /* BGP */
+
+ TIME = now();
+ if (n < 0)
+ {
+ if (errno == EINTR)
+ continue;
+
+ log(0, 0, 0, 0, "Error returned from select(): %s\n", strerror(errno));
+ main_quit++;
+ break;
+ }
+ else if (n)
+ {
+ struct sockaddr_in addr;
+ int alen = sizeof(addr);
+ if (FD_ISSET(udpfd, &r))
+ {
+ int c, n;
+ for (c = 0; c < config->multi_read_count; c++)
+ {
+ if ((n = recvfrom(udpfd, buf, sizeof(buf), 0, (void *) &addr, &alen)) > 0)
+ processudp(buf, n, &addr);
+ else
+ break;
+ }
+ }
+ if (FD_ISSET(tunfd, &r))
+ {
+ int c, n;
+ for (c = 0; c < config->multi_read_count; c++)
+ {
+ if ((n = read(tunfd, buf, sizeof(buf))) > 0)
+ processtun(buf, n);
+ else
+ break;
+ }
+ }
+ for (i = 0; i < config->num_radfds; i++)
+ if (FD_ISSET(radfds[i], &r))
+ processrad(buf, recv(radfds[i], buf, sizeof(buf), 0), i);
+ if (FD_ISSET(cluster_sockfd, &r))
+ {
+ int size;
+ size = recvfrom(cluster_sockfd, buf, sizeof(buf), MSG_WAITALL, (void *) &addr, &alen);
+ processcluster(buf, size, addr.sin_addr.s_addr);
+ }
+ if (FD_ISSET(controlfd, &r))
+ processcontrol(buf, recvfrom(controlfd, buf, sizeof(buf), MSG_WAITALL, (void *) &addr, &alen), &addr);
+ if (FD_ISSET(clifd, &r))
+ {
+ struct sockaddr_in addr;
+ int sockfd;
+ int len = sizeof(addr);
+
+ if ((sockfd = accept(clifd, (struct sockaddr *)&addr, &len)) <= 0)
+ {
+ log(0, 0, 0, 0, "accept error: %s\n", strerror(errno));
+ continue;
+ }
+ else
+ {
+ cli_do(sockfd);
+ close(sockfd);
+ }
+ }
+ }
+
+ // Runs on every machine (master and slaves).
+ if (cluster_sockfd && next_cluster_ping <= TIME)
+ {
+ // Check to see which of the cluster is still alive..
+
+ cluster_send_ping(basetime); // Only does anything if we're a slave
+ cluster_check_master(); // ditto.
+
+ cluster_heartbeat(); // Only does anything if we're a master.
+ cluster_check_slaves(); // ditto.
+
+ master_update_counts(); // If we're a slave, send our byte counters to our master.
+
+ if (config->cluster_iam_master && !config->cluster_iam_uptodate)
+ next_cluster_ping = TIME + 1; // out-of-date slaves, do fast updates
+ else
+ next_cluster_ping = TIME + config->cluster_hb_interval;
+ }
+
+ // Run token bucket filtering queue..
+ // Only run it every 1/10th of a second.
+ // Runs on all machines both master and slave.
+ {
+ static clockt last_run = 0;
+ if (last_run != TIME)
+ {
+ last_run = TIME;
+ tbf_run_timer();
+ }
+ }
+
+ /* Handle timeouts. Make sure that this gets run anyway, even if there was
+ * something to read, else under load this will never actually run....
+ *
+ */
+ if (config->cluster_iam_master && next_clean <= time_now)
+ {
+ if (regular_cleanups())
+ {
+ // Did it finish?
+ next_clean = time_now + 1 ; // Didn't finish. Check quickly.
+ }
+ else
+ {
+ next_clean = time_now + config->cleanup_interval; // Did. Move to next interval.
+ }
+ }
+
+#ifdef BGP
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ {
+ bgp_process(&bgp_peers[i],
+ bgp_set[i] ? FD_ISSET(bgp_peers[i].sock, &r) : 0,
+ bgp_set[i] ? FD_ISSET(bgp_peers[i].sock, &w) : 0);
+ }
+#endif /* BGP */
+ }
+
+ // Are we the master and shutting down??
+ if (config->cluster_iam_master)
+ cluster_heartbeat(); // Flush any queued changes..
+
+ // Ok. Notify everyone we're shutting down. If we're
+ // the master, this will force an election.
+ cluster_send_ping(0);
+
+ //
+ // Important!!! We MUST not process any packets past this point!
+}
+
+// Init data structures
+void initdata(void)
+{
+ int i;
+ char *p;
+
+ if ((_statistics = shared_malloc(sizeof(struct Tstats))) == MAP_FAILED)
+ {
+ log(0, 0, 0, 0, "Error doing malloc for _statistics: %s\n", strerror(errno));