From 4977497c68b486883ee1ea9b7fa641a592d2079f Mon Sep 17 00:00:00 2001 From: DL6ER Date: Sat, 20 Jan 2018 11:01:04 +0100 Subject: [PATCH 1/8] Move MAGICBYTE into FTL.h to have it available everywhere (we will need it in database.c) Signed-off-by: DL6ER --- FTL.h | 3 +++ parser.c | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/FTL.h b/FTL.h index 24f51a53a..9a11faa5c 100644 --- a/FTL.h +++ b/FTL.h @@ -210,6 +210,9 @@ enum { QUERIES, FORWARDED, CLIENTS, DOMAINS, OVERTIME, WILDCARD }; enum { SOCKET }; enum { DNSSEC_UNSPECIFIED, DNSSEC_SECURE, DNSSEC_INSECURE, DNSSEC_BOGUS, DNSSEC_ABANDONED, DNSSEC_UNKNOWN }; +// Used to check memory integrity in various structs +#define MAGICBYTE 0x57 + logFileNamesStruct files; FTLFileNamesStruct FTLfiles; countersStruct counters; diff --git a/parser.c b/parser.c index 113f4c52b..ef61f97f3 100644 --- a/parser.c +++ b/parser.c @@ -9,7 +9,6 @@ * Please see LICENSE file for your rights under this license. */ #include "FTL.h" -#define MAGICBYTE 0x57 char *resolveHostname(const char *addr); void extracttimestamp(const char *readbuffer, int *querytimestamp, int *overTimetimestamp); From b58731d9f3de0e2d2c8ebe6d5ee7a5b29006c73e Mon Sep 17 00:00:00 2001 From: DL6ER Date: Sat, 20 Jan 2018 14:03:41 +0100 Subject: [PATCH 2/8] Import queries from long-term database (if available) on startup of FTL. Fall back to log interpretation if database is not available Signed-off-by: DL6ER --- FTL.h | 1 + database.c | 165 +++++++++++++++++++++++++++++++++++++++++++++++++++++ main.c | 6 +- parser.c | 123 +++++++++++++++++++++++++-------------- routines.h | 9 ++- structs.c | 2 + 6 files changed, 260 insertions(+), 46 deletions(-) diff --git a/FTL.h b/FTL.h index 9a11faa5c..4a5c7f0c3 100644 --- a/FTL.h +++ b/FTL.h @@ -255,3 +255,4 @@ long int lastdbindex; bool travis; bool DBdeleteoldqueries; bool rereadgravity; +long int lastDBimportedtimestamp; diff --git a/database.c b/database.c index f2de98480..2e322d9be 100644 --- a/database.c +++ b/database.c @@ -14,6 +14,7 @@ sqlite3 *db; bool database = false; bool DBdeleteoldqueries = false; long int lastdbindex = 0; +long int lastDBimportedtimestamp = 0; pthread_mutex_t dblock; @@ -480,3 +481,167 @@ void *DB_thread(void *val) return NULL; } + +// Get most recent 24 hours data from long-term database +void read_data_from_DB(void) +{ + // Open database file + if(!dbopen()) + { + logg("read_data_from_DB() - Failed to open DB"); + return; + } + + // Prepare request + char *rstr = NULL; + // Get time stamp 24 hours in the past + int differencetofullhour = time(NULL) % GCinterval; + long int mintime = ((long)time(NULL) - GCdelay - differencetofullhour) - MAXLOGAGE; + int rc = asprintf(&rstr, "SELECT * FROM queries WHERE timestamp >= %li", mintime); + if(rc < 1) + { + logg("read_data_from_DB() - Allocation error (%i): %s", rc, sqlite3_errmsg(db)); + return; + } + // Log DB query string in debug mode + if(debug) logg(rstr); + + // Prepare SQLite3 statement + sqlite3_stmt* stmt; + rc = sqlite3_prepare_v2(db, rstr, -1, &stmt, NULL); + if( rc ){ + logg("read_data_from_DB() - SQL error prepare (%i): %s", rc, sqlite3_errmsg(db)); + dbclose(); + check_database(rc); + return; + } + + // Loop through returned database rows + while((rc = sqlite3_step(stmt)) == SQLITE_ROW) + { + // Ensure we have enough space in the queries struct + memory_check(QUERIES); + memory_check(DOMAINS); + memory_check(CLIENTS); + + // Set ID for this query + int queryID = counters.queries; + + int queryTimeStamp = sqlite3_column_int(stmt, 1); + int type = sqlite3_column_int(stmt, 2); + int status = sqlite3_column_int(stmt, 3); + int domainID = findDomainID((const char *)sqlite3_column_text(stmt, 4)); + int clientID = findClientID((const char *)sqlite3_column_text(stmt, 5)); + int forwardID = findForwardID((const char *)sqlite3_column_text(stmt, 6), true); + + int overTimeTimeStamp = queryTimeStamp - (queryTimeStamp % 600 + 300); + int timeidx = findOverTimeID(overTimeTimeStamp); + validate_access("overTime", timeidx, true, __LINE__, __FUNCTION__, __FILE__); + + // Store this query in memory + validate_access("queries", queryID, false, __LINE__, __FUNCTION__, __FILE__); + queries[queryID].magic = MAGICBYTE; + queries[queryID].timestamp = queryTimeStamp; + queries[queryID].type = type; + queries[queryID].status = status; + queries[queryID].domainID = domainID; + queries[queryID].clientID = clientID; + queries[queryID].forwardID = forwardID; + queries[queryID].timeidx = timeidx; + queries[queryID].valid = true; // Mark this as a valid query (false = it has been garbage collected and should be skipped) + queries[queryID].db = true; // Mark this as already present in the database + queries[queryID].id = 0; // This is dnsmasq's internal ID. We don't store it in the database + queries[queryID].complete = true; // Mark as all information is avaiable + queries[queryID].reply = 0; // Reply type is not stored in database + queries[queryID].generation = 0; // Log generation is neither available nor important if reading from the database + lastDBimportedtimestamp = queryTimeStamp; + + // Handle type counters + if(type == 1) + { + counters.IPv4++; + overTime[timeidx].querytypedata[0]++; + } + else if(type == 2) + { + counters.IPv6++; + overTime[timeidx].querytypedata[1]++; + } + + // Update overTime data + overTime[timeidx].total++; + + // Update overTime data structure with the new client + validate_access_oTcl(timeidx, clientID, __LINE__, __FUNCTION__, __FILE__); + overTime[timeidx].clientdata[clientID]++; + + // Increase DNS queries counter + counters.queries++; + + // Increment status counters + switch(status) + { + case 0: // Unknown + counters.unknown++; + break; + + case 1: // Blocked by gravity.list + counters.blocked++; + overTime[timeidx].blocked++; + validate_access("domains", domainID, true, __LINE__, __FUNCTION__, __FILE__); + domains[domainID].blockedcount++; + break; + + case 2: // Forwarded + counters.forwardedqueries++; + // Update overTime data structure + validate_access_oTfd(timeidx, forwardID, __LINE__, __FUNCTION__, __FILE__); + overTime[timeidx].forwarddata[forwardID]++; + break; + + case 3: // Cached or local config + counters.cached++; + // Update overTime data structure + overTime[timeidx].cached++; + break; + + case 4: // Wildcard blocked + counters.wildcardblocked++; + + // Update overTime data structure + overTime[timeidx].blocked++; + validate_access("domains", domainID, true, __LINE__, __FUNCTION__, __FILE__); + domains[domainID].blockedcount++; + domains[domainID].wildcard = true; + break; + + case 5: // black.list + counters.blocked++; + + // Update overTime data structure + overTime[timeidx].blocked++; + validate_access("domains", domainID, true, __LINE__, __FUNCTION__, __FILE__); + domains[domainID].blockedcount++; + break; + + default: + logg("Error: Found unknown status %i in long term database!", status); + logg(" Timestamp: %i", queryTimeStamp); + logg(" Continuing anyway..."); + break; + } + } + logg("Imported %i queries from the long-term database", counters.queries); + + if( rc != SQLITE_DONE ){ + logg("read_data_from_DB() - SQL error step (%i): %s", rc, sqlite3_errmsg(db)); + dbclose(); + check_database(rc); + return; + } + + // Finalize SQLite3 statement + sqlite3_finalize(stmt); + dbclose(); + free(rstr); +} diff --git a/main.c b/main.c index a63cb1769..d231889af 100644 --- a/main.c +++ b/main.c @@ -49,6 +49,10 @@ int main (int argc, char* argv[]) { if(config.maxDBdays != 0) db_init(); + // Try to import queries from long-term database if available + if(database) + read_data_from_DB(); + logg("Starting initial log file parsing"); initial_log_parsing(); logg("Finished initial log file parsing"); @@ -181,7 +185,7 @@ int main (int argc, char* argv[]) { save_to_DB(); logg("Finished final database update"); } - + // Close sockets close_telnet_socket(); close_unix_socket(); diff --git a/parser.c b/parser.c index ef61f97f3..257ea36e2 100644 --- a/parser.c +++ b/parser.c @@ -296,6 +296,9 @@ void process_pihole_log(int file) // Skip parsing of log entries that are too old altogether if 24h window is requested if(config.rolling_24h && querytimestamp < mintime) continue; + // Check if this query has already been imported from the database + if(querytimestamp < lastDBimportedtimestamp) continue; + // Ensure we have enough space in the queries struct memory_check(QUERIES); int queryID = counters.queries; @@ -441,23 +444,6 @@ void process_pihole_log(int file) validate_access("overTime", timeidx, true, __LINE__, __FUNCTION__, __FILE__); overTime[timeidx].total++; - // Determine if there is enough space for saving the current - // clientID in the overTime data structure, allocate space otherwise - validate_access("overTime", timeidx, true, __LINE__, __FUNCTION__, __FILE__); - if(overTime[timeidx].clientnum <= clientID) - { - // Reallocate more space for clientdata - overTime[timeidx].clientdata = realloc(overTime[timeidx].clientdata, (clientID+1)*sizeof(*overTime[timeidx].clientdata)); - // Initialize new data fields with zeroes - for(i = overTime[timeidx].clientnum; i <= clientID; i++) - { - overTime[timeidx].clientdata[i] = 0; - memory.clientdata++; - } - // Update counter - overTime[timeidx].clientnum = clientID + 1; - } - // Update overTime data structure with the new client validate_access_oTcl(timeidx, clientID, __LINE__, __FUNCTION__, __FILE__); overTime[timeidx].clientdata[clientID]++; @@ -473,6 +459,11 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "gravity.list")) continue; + // Check if this query has already been imported from the database + int querytimestamp, overTimetimestamp; + extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); + if(querytimestamp < lastDBimportedtimestamp) continue; + // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -522,6 +513,11 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "forwarded")) continue; + // Check if this query has already been imported from the database + int querytimestamp, overTimetimestamp; + extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); + if(querytimestamp < lastDBimportedtimestamp) continue; + // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -589,24 +585,6 @@ void process_pihole_log(int file) // Get time index int timeidx = getTimeIndex(readbuffer); - // Determine if there is enough space for saving the current - // forwardID in the overTime data structure, allocate space otherwise - validate_access("overTime", timeidx, true, __LINE__, __FUNCTION__, __FILE__); - if(overTime[timeidx].forwardnum <= forwardID) - { - // Reallocate more space for forwarddata - overTime[timeidx].forwarddata = realloc(overTime[timeidx].forwarddata, (forwardID+1)*sizeof(*overTime[timeidx].forwarddata)); - // Initialize new data fields with zeroes - int j; - for(j = overTime[timeidx].forwardnum; j <= forwardID; j++) - { - overTime[timeidx].forwarddata[j] = 0; - memory.forwarddata++; - } - // Update counter - overTime[timeidx].forwardnum = forwardID + 1; - } - // Update overTime data structure with the new forwarder validate_access_oTfd(timeidx, forwardID, __LINE__, __FUNCTION__, __FILE__); overTime[timeidx].forwarddata[forwardID]++; @@ -633,6 +611,11 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "cached")) continue; + // Check if this query has already been imported from the database + int querytimestamp, overTimetimestamp; + extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); + if(querytimestamp < lastDBimportedtimestamp) continue; + // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -680,6 +663,11 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "config")) continue; + // Check if this query has already been imported from the database + int querytimestamp, overTimetimestamp; + extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); + if(querytimestamp < lastDBimportedtimestamp) continue; + // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -727,7 +715,7 @@ void process_pihole_log(int file) domains[queries[i].domainID].blockedcount++; domains[queries[i].domainID].wildcard = true; } - else + else if(queries[i].status == 3) { // Answered from a custom (user provided) cache file counters.cached++; @@ -745,6 +733,11 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "black.list")) continue; + // Check if this query has already been imported from the database + int querytimestamp, overTimetimestamp; + extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); + if(querytimestamp < lastDBimportedtimestamp) continue; + // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -805,6 +798,11 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "reply")) continue; + // Check if this query has already been imported from the database + int querytimestamp, overTimetimestamp; + extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); + if(querytimestamp < lastDBimportedtimestamp) continue; + // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -874,6 +872,11 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "DNSSEC")) continue; + // Check if this query has already been imported from the database + int querytimestamp, overTimetimestamp; + extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); + if(querytimestamp < lastDBimportedtimestamp) continue; + // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -1296,24 +1299,58 @@ void validate_access(const char * name, int pos, bool testmagic, int line, const } } -void validate_access_oTfd(int timeidx, int pos, int line, const char * function, const char * file) +void validate_access_oTfd(int timeidx, int forwardID, int line, const char * function, const char * file) { + // Determine if there is enough space for saving the current + // forwardID in the overTime data structure, allocate space otherwise + validate_access("overTime", timeidx, true, __LINE__, __FUNCTION__, __FILE__); + if(overTime[timeidx].forwardnum <= forwardID) + { + // Reallocate more space for forwarddata + overTime[timeidx].forwarddata = realloc(overTime[timeidx].forwarddata, (forwardID+1)*sizeof(*overTime[timeidx].forwarddata)); + // Initialize new data fields with zeroes + int j; + for(j = overTime[timeidx].forwardnum; j <= forwardID; j++) + { + overTime[timeidx].forwarddata[j] = 0; + memory.forwarddata++; + } + // Update counter + overTime[timeidx].forwardnum = forwardID + 1; + } + int limit = overTime[timeidx].forwardnum; - if(pos >= limit || pos < 0) + if(forwardID >= limit || forwardID < 0) { logg("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); - logg("FATAL ERROR: Trying to access overTime.forwarddata[%i], but maximum is %i", pos, limit); + logg("FATAL ERROR: Trying to access overTime.forwarddata[%i], but maximum is %i", forwardID, limit); logg(" found in %s() (line %i) in %s", function, line, file); } } -void validate_access_oTcl(int timeidx, int pos, int line, const char * function, const char * file) +void validate_access_oTcl(int timeidx, int clientID, int line, const char * function, const char * file) { + // Determine if there is enough space for saving the current + // clientID in the overTime data structure, allocate space otherwise + if(overTime[timeidx].clientnum <= clientID) + { + // Reallocate more space for clientdata + overTime[timeidx].clientdata = realloc(overTime[timeidx].clientdata, (clientID+1)*sizeof(*overTime[timeidx].clientdata)); + // Initialize new data fields with zeroes + int i; + for(i = overTime[timeidx].clientnum; i <= clientID; i++) + { + overTime[timeidx].clientdata[i] = 0; + memory.clientdata++; + } + // Update counter + overTime[timeidx].clientnum = clientID + 1; + } int limit = overTime[timeidx].clientnum; - if(pos >= limit || pos < 0) + if(clientID >= limit || clientID < 0) { logg("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); - logg("FATAL ERROR: Trying to access overTime.clientdata[%i], but maximum is %i", pos, limit); + logg("FATAL ERROR: Trying to access overTime.clientdata[%i], but maximum is %i", clientID, limit); logg(" found in %s() (line %i) in %s", function, line, file); } } @@ -1349,8 +1386,8 @@ void reresolveHostnames(void) free(hostname); } } -int findOverTimeID(int overTimetimestamp) +int findOverTimeID(int overTimetimestamp) { int timeidx = -1, i; // Check struct size diff --git a/routines.h b/routines.h index 54f5e53a7..4629fdcce 100644 --- a/routines.h +++ b/routines.h @@ -30,9 +30,13 @@ void handle_signals(void); void process_pihole_log(int file); void *pihole_log_thread(void *val); void validate_access(const char * name, int pos, bool testmagic, int line, const char * function, const char * file); -void validate_access_oTfd(int timeidx, int pos, int line, const char * function, const char * file); -void validate_access_oTcl(int timeidx, int pos, int line, const char * function, const char * file); +void validate_access_oTfd(int timeidx, int forwardID, int line, const char * function, const char * file); +void validate_access_oTcl(int timeidx, int clientID, int line, const char * function, const char * file); void reresolveHostnames(void); +int findClientID(const char *client); +int findDomainID(const char *domain); +int findForwardID(const char * forward, bool count); +int findOverTimeID(int overTimetimestamp); void pihole_log_flushed(bool message); @@ -83,3 +87,4 @@ void db_init(void); void *DB_thread(void *val); int get_number_of_queries_in_DB(void); void save_to_DB(void); +void read_data_from_DB(void); diff --git a/structs.c b/structs.c index dd5fe2176..aa78aa9cb 100644 --- a/structs.c +++ b/structs.c @@ -120,6 +120,8 @@ void memory_check(int which) break; default: /* That cannot happen */ + logg("Fatal error in memory_check(%i)", which); + exit(EXIT_FAILURE); break; } } From 0f07c9e553e5b1ece3c21629193daefcadd73088 Mon Sep 17 00:00:00 2001 From: DL6ER Date: Sat, 20 Jan 2018 14:07:32 +0100 Subject: [PATCH 3/8] Remove unnecessarily duplicated validate_access() calls Signed-off-by: DL6ER --- database.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/database.c b/database.c index 2e322d9be..67b2b2fdd 100644 --- a/database.c +++ b/database.c @@ -588,7 +588,6 @@ void read_data_from_DB(void) case 1: // Blocked by gravity.list counters.blocked++; overTime[timeidx].blocked++; - validate_access("domains", domainID, true, __LINE__, __FUNCTION__, __FILE__); domains[domainID].blockedcount++; break; @@ -610,7 +609,6 @@ void read_data_from_DB(void) // Update overTime data structure overTime[timeidx].blocked++; - validate_access("domains", domainID, true, __LINE__, __FUNCTION__, __FILE__); domains[domainID].blockedcount++; domains[domainID].wildcard = true; break; @@ -620,7 +618,6 @@ void read_data_from_DB(void) // Update overTime data structure overTime[timeidx].blocked++; - validate_access("domains", domainID, true, __LINE__, __FUNCTION__, __FILE__); domains[domainID].blockedcount++; break; From 2443b231e194acbb2e34c8e3fa912dad5ef798bb Mon Sep 17 00:00:00 2001 From: DL6ER Date: Sat, 20 Jan 2018 14:38:45 +0100 Subject: [PATCH 4/8] Detect if we cached the but still need to ask the upstream servers for the actual IPs. In think case we have to decrease the "cached" counter Signed-off-by: DL6ER --- parser.c | 45 ++++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/parser.c b/parser.c index 257ea36e2..b8c56e449 100644 --- a/parser.c +++ b/parser.c @@ -274,6 +274,13 @@ void process_pihole_log(int file) break; } + // Get timestamp + int querytimestamp, overTimetimestamp; + extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); + + // Check if this query has already been imported from the database + if(querytimestamp < lastDBimportedtimestamp) continue; + // Test if the read line is a query line if(strstr(readbuffer," query[A") != NULL) { @@ -286,10 +293,6 @@ void process_pihole_log(int file) continue; } - // Get timestamp - int querytimestamp, overTimetimestamp; - extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); - // Get minimum time stamp to analyze int differencetofullhour = time(NULL) % GCinterval; int mintime = (time(NULL) - GCdelay - differencetofullhour) - MAXLOGAGE; @@ -459,11 +462,6 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "gravity.list")) continue; - // Check if this query has already been imported from the database - int querytimestamp, overTimetimestamp; - extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); - if(querytimestamp < lastDBimportedtimestamp) continue; - // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -513,11 +511,6 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "forwarded")) continue; - // Check if this query has already been imported from the database - int querytimestamp, overTimetimestamp; - extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); - if(querytimestamp < lastDBimportedtimestamp) continue; - // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -569,6 +562,15 @@ void process_pihole_log(int file) // Check both UUID and generation of this query if(queries[i].id == dnsmasqID && queries[i].generation == loggeneration) { + // Detect if we cached the but need to ask the upstream + // servers for the actual IPs now + if(queries[i].status == 2) + { + // Fix counters + counters.cached--; + validate_access("overTime", queries[i].timeidx, true, __LINE__, __FUNCTION__, __FILE__); + overTime[queries[i].timeidx].cached--; + } queries[i].status = 2; queries[i].forwardID = forwardID; found = true; @@ -611,11 +613,6 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "cached")) continue; - // Check if this query has already been imported from the database - int querytimestamp, overTimetimestamp; - extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); - if(querytimestamp < lastDBimportedtimestamp) continue; - // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -663,11 +660,6 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "config")) continue; - // Check if this query has already been imported from the database - int querytimestamp, overTimetimestamp; - extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); - if(querytimestamp < lastDBimportedtimestamp) continue; - // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -733,11 +725,6 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "black.list")) continue; - // Check if this query has already been imported from the database - int querytimestamp, overTimetimestamp; - extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); - if(querytimestamp < lastDBimportedtimestamp) continue; - // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines From bc77d6e67a0c51035cdbd81dc3e56c239dbeba92 Mon Sep 17 00:00:00 2001 From: DL6ER Date: Sat, 20 Jan 2018 15:00:03 +0100 Subject: [PATCH 5/8] Ensure that only the first forward destination a query is sent to gets counted Signed-off-by: DL6ER --- parser.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/parser.c b/parser.c index b8c56e449..48b80e152 100644 --- a/parser.c +++ b/parser.c @@ -548,13 +548,6 @@ void process_pihole_log(int file) // Convert forward to lower case strtolower(forward); - // Get ID of forward destination, create new forward destination record - // if not found in current data structure - int forwardID = findForwardID(forward, true); - - // Release allocated memory - free(forward); - // Save status and forwardID in corresponding query indentified by dnsmasq's ID bool found = false; for(i=0; i but need to ask the upstream // servers for the actual IPs now - if(queries[i].status == 2) + if(queries[i].status == 3) { // Fix counters counters.cached--; validate_access("overTime", queries[i].timeidx, true, __LINE__, __FUNCTION__, __FILE__); overTime[queries[i].timeidx].cached--; + + queries[i].complete = false; } queries[i].status = 2; - queries[i].forwardID = forwardID; found = true; break; } @@ -584,6 +578,18 @@ void process_pihole_log(int file) continue; } + // Count only if current query has not been counted so far + if(queries[i].complete) + continue; + + // Get ID of forward destination, create new forward destination record + // if not found in current data structure + int forwardID = findForwardID(forward, true); + queries[i].forwardID = forwardID; + + // Release allocated memory + free(forward); + // Get time index int timeidx = getTimeIndex(readbuffer); From 933bc5223776b8062f493b42b43fd188028f69d5 Mon Sep 17 00:00:00 2001 From: DL6ER Date: Sat, 20 Jan 2018 15:18:33 +0100 Subject: [PATCH 6/8] Fix tests due to corrected counting of forward destinations Signed-off-by: DL6ER --- test/test_suite.sh | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/test/test_suite.sh b/test/test_suite.sh index c9f01b28e..c42e44f48 100644 --- a/test/test_suite.sh +++ b/test/test_suite.sh @@ -105,22 +105,18 @@ load 'libs/bats-support/load' [[ ${lines[0]} == "Connection to 127.0.0.1 4711 port [tcp/*] succeeded!" ]] [[ ${lines[1]} =~ "0 57.14 ::1 local" ]] [[ ${lines[2]} =~ "1 14.29 2620:0:ccd::2 resolver2.ipv6-sandbox.opendns.com" ]] - [[ ${lines[3]} =~ "2 9.52 2001:1608:10:25::9249:d69b" ]] - [[ ${lines[4]} =~ "3 9.52 2001:1608:10:25::1c04:b12f" ]] - [[ ${lines[5]} =~ "4 9.52 2620:0:ccc::2 resolver1.ipv6-sandbox.opendns.com" ]] - [[ ${lines[6]} == "---EOM---" ]] + [[ ${lines[3]} =~ "2 14.29 2620:0:ccd::2 resolver2.ipv6-sandbox.opendns.com" ]] + [[ ${lines[4]} == "---EOM---" ]] } @test "Forward Destinations (unsorted)" { run bash -c 'echo ">forward-dest unsorted" | nc -v 127.0.0.1 4711' echo "output: ${lines[@]}" [[ ${lines[0]} == "Connection to 127.0.0.1 4711 port [tcp/*] succeeded!" ]] - [[ ${lines[1]} =~ "0 9.52 2001:1608:10:25::9249:d69b" ]] - [[ ${lines[2]} =~ "1 9.52 2001:1608:10:25::1c04:b12f" ]] - [[ ${lines[3]} =~ "2 14.29 2620:0:ccd::2 resolver2.ipv6-sandbox.opendns.com" ]] - [[ ${lines[4]} =~ "3 9.52 2620:0:ccc::2 resolver1.ipv6-sandbox.opendns.com" ]] - [[ ${lines[5]} =~ "4 57.14 ::1 local" ]] - [[ ${lines[6]} == "---EOM---" ]] + [[ ${lines[1]} =~ "0 28.57 2001:1608:10:25::9249:d69b" ]] + [[ ${lines[2]} =~ "1 14.29 2620:0:ccd::2 resolver2.ipv6-sandbox.opendns.com" ]] + [[ ${lines[3]} =~ "2 57.14 ::1 local" ]] + [[ ${lines[4]} == "---EOM---" ]] } @test "Query Types" { From 7c10244e4d494c6d19f8fa82063d3cd128b425cc Mon Sep 17 00:00:00 2001 From: DL6ER Date: Sat, 20 Jan 2018 15:21:16 +0100 Subject: [PATCH 7/8] Another small tweak for the test suite Signed-off-by: DL6ER --- test/test_suite.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_suite.sh b/test/test_suite.sh index c42e44f48..ed05c68de 100644 --- a/test/test_suite.sh +++ b/test/test_suite.sh @@ -104,7 +104,7 @@ load 'libs/bats-support/load' echo "output: ${lines[@]}" [[ ${lines[0]} == "Connection to 127.0.0.1 4711 port [tcp/*] succeeded!" ]] [[ ${lines[1]} =~ "0 57.14 ::1 local" ]] - [[ ${lines[2]} =~ "1 14.29 2620:0:ccd::2 resolver2.ipv6-sandbox.opendns.com" ]] + [[ ${lines[2]} =~ "1 28.57 2001:1608:10:25::9249:d69b" ]] [[ ${lines[3]} =~ "2 14.29 2620:0:ccd::2 resolver2.ipv6-sandbox.opendns.com" ]] [[ ${lines[4]} == "---EOM---" ]] } From 5fcae6664c4085971a5a99581e6f8de386777fee Mon Sep 17 00:00:00 2001 From: DL6ER Date: Sat, 20 Jan 2018 19:47:49 +0100 Subject: [PATCH 8/8] Added missing free() and removed redundant code Signed-off-by: DL6ER --- parser.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/parser.c b/parser.c index 48b80e152..735b6cc47 100644 --- a/parser.c +++ b/parser.c @@ -299,15 +299,11 @@ void process_pihole_log(int file) // Skip parsing of log entries that are too old altogether if 24h window is requested if(config.rolling_24h && querytimestamp < mintime) continue; - // Check if this query has already been imported from the database - if(querytimestamp < lastDBimportedtimestamp) continue; - // Ensure we have enough space in the queries struct memory_check(QUERIES); int queryID = counters.queries; int timeidx = findOverTimeID(overTimetimestamp); - // Detect time travel events if(timeidx < 0) { @@ -575,12 +571,16 @@ void process_pihole_log(int file) { // This may happen e.g. if the original query was a PTR query or "pi.hole" // as we ignore them altogether + free(forward); continue; } // Count only if current query has not been counted so far if(queries[i].complete) + { + free(forward); continue; + } // Get ID of forward destination, create new forward destination record // if not found in current data structure @@ -791,11 +791,6 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "reply")) continue; - // Check if this query has already been imported from the database - int querytimestamp, overTimetimestamp; - extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); - if(querytimestamp < lastDBimportedtimestamp) continue; - // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines @@ -865,11 +860,6 @@ void process_pihole_log(int file) if(!checkQuery(readbuffer, "DNSSEC")) continue; - // Check if this query has already been imported from the database - int querytimestamp, overTimetimestamp; - extracttimestamp(readbuffer, &querytimestamp, &overTimetimestamp); - if(querytimestamp < lastDBimportedtimestamp) continue; - // Get dnsmasq's ID for this transaction int dnsmasqID = getdnsmasqID(readbuffer); // Skip invalid lines