#ADD_DEFINITIONS("-pedantic-errors")
# TODO(sangwan.kwon): Get version from packing spec.
-SET(OSQUERY_BUILD_VERSION "1.4.4")
+SET(OSQUERY_BUILD_VERSION "1.4.5")
# Set various platform/platform-version/build version/etc defines.
ADD_DEFINITIONS(-DOSQUERY_BUILD_VERSION=${OSQUERY_BUILD_VERSION}
std::map<std::string, ScheduledQuery> schedule;
std::map<std::string, std::string> options;
std::map<std::string, std::vector<std::string> > files;
+ /// All data catches optional/plugin-parsed configuration keys.
pt::ptree all_data;
};
+class ConfigParserPlugin;
+typedef std::shared_ptr<ConfigParserPlugin> ConfigPluginRef;
+
/**
* @brief A singleton that exposes accessors to osquery's configuration data.
*
/// Merge a retrieved config source JSON into a working ConfigData.
static void mergeConfig(const std::string& source, ConfigData& conf);
- private:
+ public:
/**
- * @brief the private member that stores the raw osquery config data in a
- * native format
+ * @brief Record performance (monitoring) information about a scheduled query.
+ *
+ * The daemon and query scheduler will optionally record process metadata
+ * before and after executing each query. This can be compared and reported
+ * on an interval or within the osquery_schedule table.
+ *
+ * The config consumes and calculates the optional performance differentials.
+ * It would also be possible to store this in the RocksDB backing store or
+ * report directly to a LoggerPlugin sink. The Config is the most appropriate
+ * as the metrics are transient to the process running the schedule and apply
+ * to the updates/changes reflected in the schedule, from the config.
+ *
+ * @param name The unique name of the scheduled item
+ * @param delay Number of seconds (wall time) taken by the query
+ * @param size Number of characters generated by query
+ * @param t0 the process row before the query
+ * @param t1 the process row after the query
*/
+ static void recordQueryPerformance(const std::string& name,
+ size_t delay,
+ size_t size,
+ const Row& t0,
+ const Row& t1);
+
+ private:
+ /// The raw osquery config data in a native format
ConfigData data_;
+
/// The raw JSON source map from the config plugin.
std::map<std::string, std::string> raw_;
boost::shared_mutex mutex_;
private:
+ static const pt::ptree& getParsedData(const std::string& parser);
+ static const ConfigPluginRef getParser(const std::string& parser);
+
+ /// A default, empty property tree used when a missing parser is requested.
+ pt::ptree empty_data_;
+
+ private:
/// Config accessors, `ConfigDataInstance`, are the forced use of the config
/// data. This forces the caller to use a shared read lock.
friend class ConfigDataInstance;
/**
* @brief All accesses to the Config's data must request a ConfigDataInstance.
*
- * This class will request a read-only lock of the config's changable internal
+ * This class will request a read-only lock of the config's changeable internal
* data structures such as query schedule, options, monitored files, etc.
*
* Since a variable config plugin may implement `update` calls, internal uses
~ConfigDataInstance() { lock_.unlock(); }
/// Helper accessor for Config::data_.schedule.
- const std::map<std::string, ScheduledQuery> schedule() {
+ const std::map<std::string, ScheduledQuery> schedule() const {
return Config::getInstance().data_.schedule;
}
/// Helper accessor for Config::data_.options.
- const std::map<std::string, std::string>& options() {
+ const std::map<std::string, std::string>& options() const {
return Config::getInstance().data_.options;
}
/// Helper accessor for Config::data_.files.
- const std::map<std::string, std::vector<std::string> >& files() {
+ const std::map<std::string, std::vector<std::string> >& files() const {
return Config::getInstance().data_.files;
}
+ const pt::ptree& getParsedData(const std::string& parser) const {
+ return Config::getParsedData(parser);
+ }
+
+ const ConfigPluginRef getParser(const std::string& parser) const {
+ return Config::getParser(parser);
+ }
+
/// Helper accessor for Config::data_.all_data.
- const pt::ptree& data() { return Config::getInstance().data_.all_data; }
+ const pt::ptree& data() const { return Config::getInstance().data_.all_data; }
private:
/// A read lock on the reader/writer config data accessor/update mutex.
Status call(const PluginRequest& request, PluginResponse& response);
};
+/// Helper merged and parsed property tree.
+typedef pt::ptree ConfigTree;
+
+/**
+ * @brief A pluggable configuration parser.
+ *
+ * An osquery config instance is populated from JSON using a ConfigPlugin.
+ * That plugin may update the config data asynchronously and read from
+ * several sources, as is the case with "filesystem" and reading multiple files.
+ *
+ * A ConfigParserPlugin will receive the merged configuration at osquery start
+ * and the updated (still merged) config if any ConfigPlugin updates the
+ * instance asynchronously. Each parser specifies a set of top-level JSON
+ * keys to receive. The config instance will auto-merge the key values
+ * from multiple sources if they are dictionaries or lists.
+ *
+ * If a top-level key is a dictionary, each source with the top-level key
+ * will have its own dictionary keys merged and replaced based on the lexical
+ * order of sources. For the "filesystem" config plugin this is the lexical
+ * sorting of filenames. If the top-level key is a list, each source with the
+ * top-level key will have its contents appended.
+ *
+ * Each config parser plugin will live alongside the config instance for the
+ * life of the osquery process. The parser may perform actions at config load
+ * and config update "time" as well as keep its own data members and be
+ * accessible through the Config class API.
+ */
+class ConfigParserPlugin : public Plugin {
+ protected:
+ /**
+ * @brief Return a list of top-level config keys to receive in updates.
+ *
+ * The ::update method will receive a map of these keys with a JSON-parsed
+ * property tree of configuration data.
+ *
+ * @return A list of string top-level JSON keys.
+ */
+ virtual std::vector<std::string> keys() = 0;
+
+ /**
+ * @brief Receive a merged property tree for each top-level config key.
+ *
+ * Called when the Config instance is initially loaded with data from the
+ * active config plugin and when it is updated via an async ConfigPlugin
+ * update. Every config parser will receive a map of merged data for each key
+ * they requested in keys().
+ *
+ * @param config A JSON-parsed property tree map.
+ * @return Failure if the parser should no longer receive updates.
+ */
+ virtual Status update(const std::map<std::string, ConfigTree>& config) = 0;
+
+ protected:
+ /// Allow the config parser to keep some global state.
+ pt::ptree data_;
+
+ private:
+ Status setUp();
+
+ private:
+ /// Config::update will call all appropriate parser updates.
+ friend class Config;
+ /// A config data instance implements a read/write lock around data_ access.
+ friend class ConfigDataInstance;
+};
+
/**
* @brief Calculate a splayed integer based on a variable splay percentage
*
* after reading JSON data in the plugin implementation.
*/
CREATE_REGISTRY(ConfigPlugin, "config");
+
+/**
+ * @brief ConfigParser plugin registry.
+ *
+ * This creates an osquery registry for "config_parser" which may implement
+ * ConfigParserPlugin. A ConfigParserPlugin should not export any call actions
+ * but rather have a simple property tree-accessor API through Config.
+ */
+CREATE_LAZY_REGISTRY(ConfigParserPlugin, "config_parser");
}
#endif
// clang-format on
+#ifndef __constructor__
+#define __constructor__ __attribute__((constructor))
+#endif
+
/// A configuration error is catastrophic and should exit the watcher.
#define EXIT_CATASTROPHIC 78
void initWorker(const std::string& name);
/// Initialize the osquery watcher, optionally spawn a worker.
void initWatcher();
- /// Set the config and logger plugins, optionally depend on an extension.
- void initConfigLogger();
+ /// Set and wait for an active plugin optionally broadcasted.
+ void initActivePlugin(const std::string& type, const std::string& name);
private:
int* argc_;
* @param delim the delimiter which you'd like to split the string by.
* @param occurences the number of times to split by delim.
*
- * @return a vector of strings split by delim for occurences.
+ * @return a vector of strings split by delim for occurrences.
*/
std::vector<std::string> split(const std::string& s,
const std::string& delim,
// DBHandle RAII singleton
/////////////////////////////////////////////////////////////////////////////
+class DBHandle;
+typedef std::shared_ptr<DBHandle> DBHandleRef;
+
/**
* @brief RAII singleton around RocksDB database access.
*
*
* @return a shared pointer to an instance of DBHandle
*/
- static std::shared_ptr<DBHandle> getInstance();
+ static DBHandleRef getInstance();
/**
* @brief Check the sanity of the database configuration options
*
* @return a shared pointer to an instance of DBHandle
*/
- static std::shared_ptr<DBHandle> getInstanceAtPath(const std::string& path);
+ static DBHandleRef getInstanceAtPath(const std::string& path);
/**
* @brief A method which gets you an in-memory RocksDB instance.
*
* @return a shared pointer to an instance of DBHandle
*/
- static std::shared_ptr<DBHandle> getInstanceInMemory();
+ static DBHandleRef getInstanceInMemory();
/**
* @brief A method which allows you to configure various aspects of RocksDB
*
* @return a shared pointer to an instance of DBHandle
*/
- static std::shared_ptr<DBHandle> getInstance(const std::string& path,
- bool in_memory);
+ static DBHandleRef getInstance(const std::string& path, bool in_memory);
/**
* @brief Private helper around accessing the column family handle for a
#include <string>
#include <vector>
-#include <osquery/core.h>
+#include <osquery/status.h>
#include <osquery/database/db_handle.h>
#include <osquery/database/results.h>
* Given a query, this constructor calculates the value of columnFamily_,
* which can be accessed via the getColumnFamilyName getter method.
*
- * @param q a SheduledQuery struct which represents the query which
- * you would like to interact with
+ * @param q a SheduledQuery struct
*/
- explicit Query(const std::string& name, ScheduledQuery q)
+ explicit Query(const std::string& name, const ScheduledQuery& q)
: query_(q), name_(name) {}
/////////////////////////////////////////////////////////////////////////////
* This method retrieves the data from RocksDB and returns the data in a
* HistoricalQueryResults struct.
*
- * @param hQR a reference to a HistoricalQueryResults struct which will be
- * populated with results if the osquery::Status indicates the operation was
- * successful
+ * @param hQR the output HistoricalQueryResults struct
*
- * @return an instance of osquery::Status indicating the success or failure
- * of the operation
+ * @return the success or failure of the operation
*/
- Status getHistoricalQueryResults(HistoricalQueryResults& hQR);
+ // Status getHistoricalQueryResults(HistoricalQueryResults& hQR);
+ Status getPreviousQueryResults(QueryData& results);
private:
/**
* @brief Serialize the data in RocksDB into a useful data structure using a
* custom database handle
*
- * This method is the same as getHistoricalQueryResults(), but with the
+ * This method is the same as getHistoricalQueryResults, but with the
* addition of a parameter which allows you to pass a custom RocksDB
- * database handle. This version of getHistoricalQueryResults should only be
- * used internally and by unit tests.
- *
- * @param hQR a reference to a HistoricalQueryResults struct which will be
- * populated with results if the osquery::Status indicates the operation was
- * successful @param db the RocksDB database handle to use to acquire the
- * relevant data
+ * database handle.
*
+ * @param hQR the output HistoricalQueryResults struct
* @param db a shared pointer to a custom DBHandle
*
- * @return an instance of osquery::Status indicating the success or failure
- * of the operation
- *
+ * @return the success or failure of the operation
* @see getHistoricalQueryResults
*/
- Status getHistoricalQueryResults(HistoricalQueryResults& hQR,
- std::shared_ptr<DBHandle> db);
+ // Status getHistoricalQueryResults(HistoricalQueryResults& hQR,
+ // std::shared_ptr<DBHandle> db);
+ Status getPreviousQueryResults(QueryData& results, DBHandleRef db);
public:
/**
* @param db a custom RocksDB database handle
*
* @return a vector containing the string names of all scheduled queries
- * which currently exist in the database
*
* @see getStoredQueryNames()
*/
- static std::vector<std::string> getStoredQueryNames(
- std::shared_ptr<DBHandle> db);
+ static std::vector<std::string> getStoredQueryNames(DBHandleRef db);
public:
/**
* @brief Accessor method for checking if a given scheduled query exists in
* the database
*
- * @return a boolean indicating whether or not the scheduled query which is
- * being operated on already exists in the database
+ * @return does the scheduled query which is already exists in the database
*/
bool isQueryNameInDatabase();
*
* @param db a custom RocksDB database handle
*
- * @return a boolean indicating whether or not the scheduled query which is
- * being operated on already exists in the database
+ * @return does the scheduled query which is already exists in the database
*/
- bool isQueryNameInDatabase(std::shared_ptr<DBHandle> db);
+ bool isQueryNameInDatabase(DBHandleRef db);
public:
/**
* @return an instance of osquery::Status indicating the success or failure
* of the operation
*/
- Status addNewResults(const QueryData& qd, int unix_time);
+ Status addNewResults(const QueryData& qd);
private:
/**
* @return an instance of osquery::Status indicating the success or failure
* of the operation
*/
- Status addNewResults(const QueryData& qd,
- int unix_time,
- std::shared_ptr<DBHandle> db);
+ Status addNewResults(const QueryData& qd, DBHandleRef db);
public:
/**
- * @brief Add a new set of results to the persistant storage and get back
- * the diff results.
+ * @brief Add a new set of results to the persistent storage and get back
+ * the differential results.
*
- * Given the results of the execution of a scheduled query, add the results
+ * Given the results of an execution of a scheduled query, add the results
* to the database using addNewResults and get back a data structure
* indicating what rows in the query's results have changed.
*
- * @param qd the QueryData object, which has the results of the query which
- * you would like to store
- * @param dr a reference to a DiffResults object, which will be populated
- * with the difference of the execution which is currently in the database
- * and the execution you just put in the database
- * @param unix_time the time that the query was executed
+ * @param qd the QueryData object containing query results to store
+ * @param dr an output to a DiffResults object populated based on last run
*
- * @return an instance of osquery::Status indicating the success or failure
- * of the operation
+ * @return the success or failure of the operation
*/
- Status addNewResults(const QueryData& qd, DiffResults& dr, int unix_time);
+ Status addNewResults(const QueryData& qd, DiffResults& dr);
private:
/**
- * @brief Add a new set of results to the persistant storage and get back
- * the diff results, using a custom database handle.
+ * @brief Add a new set of results to the persistent storage and get back
+ * the differential results, using a custom database handle.
*
- * This method is the same as addNewResults(), but with the addition of a
+ * This method is the same as Query::addNewResults, but with the addition of a
* parameter which allows you to pass a custom RocksDB database handle
*
- * @param qd the QueryData object, which has the results of the query which
- * you would like to store
- * @param dr a reference to a DiffResults object, which will be populated
- * with the difference of the execution which is currently in the database
- * and the execution you just put in the database
- * @param calculate_diff a boolean indicating whether or not you'd like to
- * calculate the diff result to be stored in the dr parameter.
- * @param unix_time the time that the query was executed
+ * @param qd the QueryData object containing query results to store
+ * @param dr an output to a DiffResults object populated based on last run
*
- * @return an instance of osquery::Status indicating the success or failure
- * of the operation
+ * @return the success or failure of the operation
*/
Status addNewResults(const QueryData& qd,
DiffResults& dr,
bool calculate_diff,
- int unix_time,
- std::shared_ptr<DBHandle> db);
+ DBHandleRef db);
public:
/**
* @brief A getter for the most recent result set for a scheduled query
*
- * @param qd the QueryData object which will be populated if all operations
- * are successful
+ * @param qd the output QueryData object
*
- * @return an instance of osquery::Status indicating the success or failure
- * of the operation
+ * @return the success or failure of the operation
*/
- osquery::Status getCurrentResults(QueryData& qd);
+ Status getCurrentResults(QueryData& qd);
private:
/**
* @brief A getter for the most recent result set for a scheduled query,
* but with the addition of a parameter which allows you to pass a custom
- * RocksDB database handle
+ * RocksDB database handle.
*
- * This method is the same as getCurrentResults(), but with addition of a
- * parameter which allows you to pass a custom RocksDB database handle
+ * This method is the same as Query::getCurrentResults, but with addition of a
+ * parameter which allows you to pass a custom RocksDB database handle.
*
- * @param qd the QueryData object which will be populated if all operations
- * are successful
+ * @param qd the output QueryData object
* @param db a custom RocksDB database handle
*
- * @return an instance of osquery::Status indicating the success or failure
- * of the operation
+ * @return the success or failure of the operation
*/
- Status getCurrentResults(QueryData& qd, std::shared_ptr<DBHandle> db);
+ Status getCurrentResults(QueryData& qd, DBHandleRef db);
private:
/////////////////////////////////////////////////////////////////////////////
FRIEND_TEST(QueryTests, test_is_query_name_in_database);
FRIEND_TEST(QueryTests, test_get_stored_query_names);
FRIEND_TEST(QueryTests, test_get_executions);
- FRIEND_TEST(QueryTests, test_get_current_results);
- FRIEND_TEST(QueryTests, test_get_historical_query_results);
+ FRIEND_TEST(QueryTests, test_get_query_results);
FRIEND_TEST(QueryTests, test_query_name_not_found_in_db);
};
}
#include <osquery/status.h>
+namespace pt = boost::property_tree;
+
namespace osquery {
/////////////////////////////////////////////////////////////////////////////
* @brief Serialize a Row into a property tree
*
* @param r the Row to serialize
- * @param tree a reference to a property tree which, if all operations are
- * completed successfully, the contents of Row will be serialized into
+ * @param tree the output property tree
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
+ * @return Status indicating the success or failure of the operation
*/
Status serializeRow(const Row& r, boost::property_tree::ptree& tree);
* @brief Serialize a Row object into a JSON string
*
* @param r the Row to serialize
- * @param json a reference to a string which, if all operations are completed
- * successfully, the contents of Row will be serialized into
+ * @param json the output JSON string
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
+ * @return Status indicating the success or failure of the operation
*/
Status serializeRowJSON(const Row& r, std::string& json);
+/**
+ * @brief Deserialize a Row object from a property tree
+ *
+ * @param tree the input property tree
+ * @param r the output Row structure
+ *
+ * @return Status indicating the success or failure of the operation
+ */
Status deserializeRow(const boost::property_tree::ptree& tree, Row& r);
+
+/**
+ * @brief Deserialize a Row object from a JSON string
+ *
+ * @param json the input JSON string
+ * @param r the output Row structure
+ *
+ * @return Status indicating the success or failure of the operation
+ */
Status deserializeRowJSON(const std::string& json, Row& r);
/////////////////////////////////////////////////////////////////////////////
* @brief Serialize a QueryData object into a property tree
*
* @param q the QueryData to serialize
- * @param tree a reference to a property tree which, if all operations are
- * completed successfully, the contents of QueryData will be serialized into
+ * @param tree the output property tree
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeQueryData(const QueryData& q, pt::ptree& tree);
+
+/**
+ * @brief Serialize a QueryData object into a JSON string
+ *
+ * @param q the QueryData to serialize
+ * @param json the output JSON string
+ *
+ * @return Status indicating the success or failure of the operation
*/
-Status serializeQueryData(const QueryData& q,
- boost::property_tree::ptree& tree);
+Status serializeQueryDataJSON(const QueryData& q, std::string& json);
+
+Status deserializeQueryData(const pt::ptree& tree, QueryData& qd);
+Status deserializeQueryDataJSON(const std::string& json, QueryData& qd);
/////////////////////////////////////////////////////////////////////////////
// DiffResults
* @brief Serialize a DiffResults object into a property tree
*
* @param d the DiffResults to serialize
- * @param tree a reference to a property tree which, if all operations are
- * completed successfully, the contents of DiffResults will be serialized into
+ * @param tree the output property tree
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
+ * @return Status indicating the success or failure of the operation
*/
-Status serializeDiffResults(const DiffResults& d,
- boost::property_tree::ptree& tree);
+Status serializeDiffResults(const DiffResults& d, pt::ptree& tree);
/**
* @brief Serialize a DiffResults object into a JSON string
*
* @param d the DiffResults to serialize
- * @param json a reference to a string which, if all operations are completed
- * successfully, the contents of DiffResults will be serialized into
+ * @param json the output JSON string
*
* @return an instance of osquery::Status, indicating the success or failure
* of the operation
*/
DiffResults diff(const QueryData& old_, const QueryData& new_);
-/////////////////////////////////////////////////////////////////////////////
-// HistoricalQueryResults
-/////////////////////////////////////////////////////////////////////////////
-
-/**
- * @brief A representation of scheduled query's historical results on disk
- *
- * In practice, a HistoricalQueryResults object is generated after inspecting
- * the persistent data storage.
- */
-struct HistoricalQueryResults {
- /**
- * @brief the most recent results in the database
- *
- * mostRecentResults->first is the timestamp of the most recent results and
- * mostRecentResults->second is the query result data of the most recent
- */
- std::pair<int, QueryData> mostRecentResults;
-
- /// equals operator
- bool operator==(const HistoricalQueryResults& comp) const {
- return (comp.mostRecentResults == mostRecentResults);
- }
-
- /// not equals operator
- bool operator!=(const HistoricalQueryResults& comp) const {
- return !(*this == comp);
- }
-};
-
-/**
- * @brief Serialize a HistoricalQueryResults object into a property tree
- *
- * @param r the HistoricalQueryResults to serialize
- * @param tree a reference to a property tree which, if all operations are
- * completed successfully, the contents of HistoricalQueryResults will be
- * serialized into
- *
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
- */
-Status serializeHistoricalQueryResults(const HistoricalQueryResults& r,
- boost::property_tree::ptree& tree);
-
-/**
- * @brief Serialize a HistoricalQueryResults object into a JSON string
- *
- * @param r the HistoricalQueryResults to serialize
- * @param json a reference to a string which, if all operations are completed
- * successfully, the contents of HistoricalQueryResults will be serialized
- * into
- *
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
- */
-Status serializeHistoricalQueryResultsJSON(const HistoricalQueryResults& r,
- std::string& json);
-
-/**
- * @brief Deserialize a property tree into a HistoricalQueryResults object
- *
- * @param tree a property tree which contains a serialized
- * HistoricalQueryResults
- * @param r a reference to a HistoricalQueryResults object which, if all
- * operations are completed successfully, the contents of tree will be
- * serialized into
- *
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
- */
-Status deserializeHistoricalQueryResults(
- const boost::property_tree::ptree& tree, HistoricalQueryResults& r);
-
-/**
- * @brief Deserialize JSON into a HistoricalQueryResults object
- *
- * @param json a string which contains a serialized HistoricalQueryResults
- * @param r a reference to a HistoricalQueryResults object which, if all
- * operations are completed successfully, the contents of json will be
- * serialized into
- *
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
- */
-Status deserializeHistoricalQueryResultsJSON(const std::string& json,
- HistoricalQueryResults& r);
-
/**
* @brief Add a Row to a QueryData if the Row hasn't appeared in the QueryData
* already
* @param q the QueryData list to append to
* @param r the Row to add to q
*
- * @return true if the Row was added to the QueryData, false if it wasn't
+ * @return true if the Row was added to the QueryData, false if it was not
*/
bool addUniqueRowToQueryData(QueryData& q, const Row& r);
/**
* @brief Construct a new QueryData from an existing one, replacing all
- * non-ascii characters with their \u encoding.
+ * non-ASCII characters with their \u encoding.
*
* This function is intended as a workaround for
* https://svn.boost.org/trac/boost/ticket/8883,
- * and will allow rows containing data with non-ascii characters to be stored in
+ * and will allow rows containing data with non-ASCII characters to be stored in
* the database and parsed back into a property tree.
*
* @param oldData the old QueryData to copy
* @param newData the new escaped QueryData object
*/
-void escapeQueryData(const osquery::QueryData &oldData, osquery::QueryData &newData);
+void escapeQueryData(const QueryData& oldData, QueryData& newData);
/**
* @brief represents the relevant parameters of a scheduled query.
struct ScheduledQuery {
/// The SQL query.
std::string query;
+
/// How often the query should be executed, in second.
- int interval;
+ size_t interval;
+
/// A temporary splayed internal.
- int splayed_interval;
+ size_t splayed_interval;
+
+ /// Number of executions.
+ size_t executions;
+
+ /// Total wall time taken
+ size_t wall_time;
+
+ /// Total user time (cycles)
+ size_t user_time;
+
+ /// Total system time (cycles)
+ size_t system_time;
+
+ /// Average memory differentials. This should be near 0.
+ size_t memory;
+
+ /// Total characters, bytes, generated by query.
+ size_t output_size;
+
+ /// Set of query options.
+ std::map<std::string, bool> options;
+
+ ScheduledQuery()
+ : interval(0),
+ splayed_interval(0),
+ executions(0),
+ wall_time(0),
+ user_time(0),
+ system_time(0),
+ memory(0),
+ output_size(0) {}
/// equals operator
bool operator==(const ScheduledQuery& comp) const {
};
/////////////////////////////////////////////////////////////////////////////
-// ScheduledQueryLogItem
+// QueryLogItem
/////////////////////////////////////////////////////////////////////////////
/**
- * @brief A data structure which represents data to log in the event of an
- * operating system state change
+ * @brief Query results from a schedule, snapshot, or ad-hoc execution.
*
* When a scheduled query yields new results, we need to log that information
- * to our upstream logging receiver. The data that needs to be logged is the
- * entire DiffResults set as well as some additional metadata.
+ * to our upstream logging receiver. A QueryLogItem contains metadata and
+ * results in potentially-differential form for a logger.
*/
-struct ScheduledQueryLogItem {
- /// The data which was changed as a result of the scheduled query
- DiffResults diffResults;
+struct QueryLogItem {
+ /// Differential results from the query.
+ DiffResults results;
- /// The name of the scheduled query
+ /// Optional snapshot results, no differential applied.
+ QueryData snapshot_results;
+
+ /// The name of the scheduled query.
std::string name;
- /// The identifier (hostname, or uuid) of the host on which the query was
- /// executed
- std::string hostIdentifier;
+ /// The identifier (hostname, or uuid) of the host.
+ std::string identifier;
- /// The time that the query was executed, in unix time
- int unixTime;
+ /// The time that the query was executed, seconds as UNIX time.
+ int time;
- /// The time that the query was executed, in ASCII
- std::string calendarTime;
+ /// The time that the query was executed, an ASCII string.
+ std::string calendar_time;
/// equals operator
- bool operator==(const ScheduledQueryLogItem& comp) const {
- return (comp.diffResults == diffResults) && (comp.name == name);
+ bool operator==(const QueryLogItem& comp) const {
+ return (comp.results == results) && (comp.name == name);
}
/// not equals operator
- bool operator!=(const ScheduledQueryLogItem& comp) const {
- return !(*this == comp);
- }
+ bool operator!=(const QueryLogItem& comp) const { return !(*this == comp); }
};
/**
- * @brief Serialize a ScheduledQueryLogItem object into a property tree
+ * @brief Serialize a QueryLogItem object into a property tree
*
- * @param i the ScheduledQueryLogItem to serialize
- * @param tree a reference to a property tree which, if all operations are
- * completed successfully, the contents of ScheduledQueryLogItem will be
- * serialized into
+ * @param item the QueryLogItem to serialize
+ * @param tree the output property tree
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
+ * @return Status indicating the success or failure of the operation
*/
-Status serializeScheduledQueryLogItem(const ScheduledQueryLogItem& i,
- boost::property_tree::ptree& tree);
+Status serializeQueryLogItem(const QueryLogItem& item, pt::ptree& tree);
/**
- * @brief Serialize a ScheduledQueryLogItem object into a JSON string
+ * @brief Serialize a QueryLogItem object into a JSON string
*
- * @param i the ScheduledQueryLogItem to serialize
- * @param json a reference to a string which, if all operations are completed
- * successfully, the contents of ScheduledQueryLogItem will be serialized into
+ * @param item the QueryLogItem to serialize
+ * @param json the output JSON string
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
+ * @return Status indicating the success or failure of the operation
*/
-Status serializeScheduledQueryLogItemJSON(const ScheduledQueryLogItem& i,
- std::string& json);
+Status serializeQueryLogItemJSON(const QueryLogItem& item, std::string& json);
+
+Status deserializeQueryLogItem(const pt::ptree& tree, QueryLogItem& item);
+Status deserializeQueryLogItemJSON(const std::string& json, QueryLogItem& item);
/**
- * @brief Serialize a ScheduledQueryLogItem object into a property tree
+ * @brief Serialize a QueryLogItem object into a property tree
* of events, a list of actions.
*
- * @param item the ScheduledQueryLogItem to serialize
- * @param tree a reference to a property tree which, if all operations are
- * completed successfully, the contents of ScheduledQueryLogItem will be
- * serialized into
+ * @param item the QueryLogItem to serialize
+ * @param tree the output property tree
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
+ * @return Status indicating the success or failure of the operation
*/
-Status serializeScheduledQueryLogItemAsEvents(
- const ScheduledQueryLogItem& item, boost::property_tree::ptree& tree);
+Status serializeQueryLogItemAsEvents(const QueryLogItem& item, pt::ptree& tree);
/**
- * @brief Serialize a ScheduledQueryLogItem object into a JSON string of events,
+ * @brief Serialize a QueryLogItem object into a JSON string of events,
* a list of actions.
*
- * @param i the ScheduledQueryLogItem to serialize
- * @param json a reference to a string which, if all operations are completed
- * successfully, the contents of ScheduledQueryLogItem will be serialized into
+ * @param i the QueryLogItem to serialize
+ * @param json the output JSON string
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation
+ * @return Status indicating the success or failure of the operation
*/
-Status serializeScheduledQueryLogItemAsEventsJSON(
- const ScheduledQueryLogItem& i, std::string& json);
+Status serializeQueryLogItemAsEventsJSON(const QueryLogItem& i,
+ std::string& json);
}
#include <boost/thread/locks.hpp>
#include <boost/thread/mutex.hpp>
-#include <osquery/database.h>
#include <osquery/registry.h>
#include <osquery/status.h>
#include <osquery/tables.h>
* @brief An EventPublisher will define a SubscriptionContext for
* EventSubscriber%s to use.
*
- * Most EventPublisher%s will reqire specific information for interacting with
+ * Most EventPublisher%s will require specific information for interacting with
* an OS to receive events. The SubscriptionContext contains information the
* EventPublisher will use to register OS API callbacks, create
* subscriptioning/listening handles, etc.
* @brief An EventSubscriber EventCallback method will receive an EventContext.
*
* The EventContext contains the event-related data supplied by an
- * EventPublisher when the event occures. If a subscribing EventSubscriber
+ * EventPublisher when the event occurs. If a subscribing EventSubscriber
* would be called for the event, the EventSubscriber%'s EventCallback is
* passed an EventContext.
*/
EventTime time;
/// The string representation of the time, often used for indexing.
std::string time_string;
+
+ EventContext() : id(0), time(0) {}
};
typedef std::shared_ptr<Subscription> SubscriptionRef;
typedef EventSubscriber<BaseEventPublisher> BaseEventSubscriber;
typedef std::shared_ptr<EventSubscriber<BaseEventPublisher>> EventSubscriberRef;
+/**
+ * @brief EventSubscriber%s may exist in various states.
+ *
+ * The subscriber will move through states when osquery is initializing the
+ * registry, starting event publisher loops, and requesting initialization of
+ * each subscriber and the optional set of subscriptions it creates. If this
+ * initialization fails the publishers or EventFactory may eject, warn, or
+ * otherwise not use the subscriber's subscriptions.
+ *
+ * The supported states are:
+ * - None: The default state, uninitialized.
+ * - Running: Subscriber is ready for events.
+ * - Paused: Subscriber was successfully initialized but not currently accepting
+ * events.
+ * - Failed: Subscriber failed to initialize or is otherwise offline.
+ */
+enum EventSubscriberState {
+ SUBSCRIBER_NONE,
+ SUBSCRIBER_RUNNING,
+ SUBSCRIBER_PAUSED,
+ SUBSCRIBER_FAILED,
+};
+
/// Use a single placeholder for the EventContextRef passed to EventCallback.
using std::placeholders::_1;
using std::placeholders::_2;
EventPublisherID type() const { return TYPE; }
/**
- * @brief DECLARE_SUBSCRIBER supplies needed boilerplate code that applies a
- * string-type EventSubscriberID to identify the subscriber declaration.
- */
-#define DECLARE_SUBSCRIBER(NAME) \
- public: \
- EventSubscriberID name() const { return NAME; }
-
-/**
* @brief A Subscription is used to configure an EventPublisher and bind a
* callback to a SubscriptionContext.
*
*/
struct Subscription {
public:
+ // EventSubscriber name.
+ std::string subscriber_name;
+
/// An EventPublisher%-specific SubscriptionContext.
SubscriptionContextRef context;
/// An EventSubscription member EventCallback method.
/// A pointer to possible extra data
void* user_data;
- static SubscriptionRef create() { return std::make_shared<Subscription>(); }
+ explicit Subscription(EventSubscriberID& name)
+ : subscriber_name(name), user_data(nullptr) {}
+
+ static SubscriptionRef create(EventSubscriberID& name) {
+ auto subscription = std::make_shared<Subscription>(name);
+ return subscription;
+ }
- static SubscriptionRef create(const SubscriptionContextRef& mc,
+ static SubscriptionRef create(EventSubscriberID& name,
+ const SubscriptionContextRef& mc,
EventCallback ec = 0,
void* user_data = nullptr) {
- auto subscription = std::make_shared<Subscription>();
+ auto subscription = std::make_shared<Subscription>(name);
subscription->context = mc;
subscription->callback = ec;
subscription->user_data = user_data;
* subscriptions. An example is Linux `inotify` where multiple
* EventSubscription%s will subscription identical paths, e.g., /etc for
* config changes. Since Linux `inotify` has a subscription limit, `configure`
- * can depup paths.
+ * can dedup paths.
*/
virtual void configure() {}
/**
* @brief Perform handle opening, OS API callback registration.
*
- * `setUp` is the event framework's EventPublisher constructor equivilent.
+ * `setUp` is the event framework's EventPublisher constructor equivalent.
* When `setUp` is called the EventPublisher is running in a dedicated thread
* and may manage/allocate/wait for resources.
*/
* @brief The generic check loop to call SubscriptionContext callback methods.
*
* It is NOT recommended to override `fire`. The simple logic of enumerating
- * the Subscription%s and using `shouldFire` is more appropraite.
+ * the Subscription%s and using `shouldFire` is more appropriate.
*
* @param ec The EventContext created and fired by the EventPublisher.
* @param time The most accurate time associated with the event.
private:
EventPublisherPlugin(EventPublisherPlugin const&);
- void operator=(EventPublisherPlugin const&);
+ EventPublisherPlugin& operator=(EventPublisherPlugin const&);
private:
/// Set ending to True to cause event type run loops to finish.
*
* This is a template-generated method that up-casts the generic fired
* event/subscription contexts, and calls the callback if the event should
- * fire given a scription.
+ * fire given a subscription.
*
* @param sub The SubscriptionContext and optional EventCallback.
* @param ec The event that was fired.
/**
* @brief Store parsed event data from an EventCallback in a backing store.
*
- * Within a EventCallback the EventSubscriber has an opprotunity to create
+ * Within a EventCallback the EventSubscriber has an opportunity to create
* an osquery Row element, add the relevant table data for the EventSubscriber
* and store that element in the osquery backing store. At query-time
* the added data will apply selection criteria and return these elements.
private:
/*
- * @brief When `get`ting event results, return EventID%s from time indexes.
+ * @brief When `get`ing event results, return EventID%s from time indexes.
*
* Used by EventSubscriber::get to retrieve EventID, EventTime indexes. This
* applies the lookup-efficiency checks for time list appropriate bins.
*
* An EventID is an index/element-identifier for the backing store.
* Each EventPublisher maintains a fired EventContextID to identify the many
- * events that may or may not be fired to subscriptioning criteria for this
+ * events that may or may not be fired to 'subscriptioning' criteria for this
* EventSubscriber. This EventContextID is NOT the same as an EventID.
* EventSubscriber development should not require use of EventID%s, if this
* indexing is required within-EventCallback consider an
* @brief Suggested entrypoint for table generation.
*
* The EventSubscriber is a convention that removes a lot of boilerplate event
- * subscriptioning and acting. The `genTable` static entrypoint is the
+ * 'subscriptioning' and acting. The `genTable` static entrypoint is the
* suggested method for table specs.
*
* @return The query-time table data, retrieved from a backing store.
return get(0, 0);
}
- /// The string name identifying this EventSubscriber.
- virtual EventSubscriberID name() const { return "subscriber"; }
-
protected:
/// Backing storage indexing namespace definition methods.
- EventPublisherID dbNamespace() const { return type() + "." + name(); }
+ EventPublisherID dbNamespace() const { return type() + "." + getName(); }
/// The string EventPublisher identifying this EventSubscriber.
virtual EventPublisherID type() const = 0;
private:
EventSubscriberPlugin(EventSubscriberPlugin const&);
- void operator=(EventSubscriberPlugin const&);
+ EventSubscriberPlugin& operator=(EventSubscriberPlugin const&);
private:
Status setUp() { return Status(0, "Setup never used"); }
* @brief A factory for associating event generators to EventPublisherID%s.
*
* This factory both registers new event types and the subscriptions that use
- * them. An EventPublisher is also a factory, the single event factory arbitates
- * Subscription creatating and management for each associated EventPublisher.
+ * them. An EventPublisher is also a factory, the single event factory
+ * arbitrates Subscription creation and management for each associated
+ * EventPublisher.
*
* Since event types may be plugins, they are created using the factory.
* Since subscriptions may be configured/disabled they are also factory-managed.
*/
-class EventFactory {
+class EventFactory : private boost::noncopyable {
public:
/// Access to the EventFactory instance.
static EventFactory& getInstance();
/**
* @brief Add an EventPublisher to the factory.
*
- * The registration is mostly abstracted using osquery's registery.
+ * The registration is mostly abstracted using osquery's registry.
*
* @param event_pub If for some reason the caller needs access to the
* EventPublisher instance they can register-by-instance.
/**
* @brief Add a SubscriptionContext and EventCallback Subscription to an
- *EventPublisher.
+ * EventPublisher.
*
* Create a Subscription from a given SubscriptionContext and EventCallback
* and add that Subscription to the EventPublisher associated identifier.
* @return Was the SubscriptionContext appropriate for the EventPublisher.
*/
static Status addSubscription(EventPublisherID& type_id,
+ EventSubscriberID& name_id,
const SubscriptionContextRef& mc,
EventCallback cb = 0,
void* user_data = nullptr);
static EventPublisherRef getEventPublisher(EventPublisherID& pub);
/// Return an instance to a registered EventSubscriber.
- static EventSubscriberRef getEventSubscriber(EventSubscriberID& pub);
+ static EventSubscriberRef getEventSubscriber(EventSubscriberID& sub);
+ static bool exists(EventSubscriberID& sub);
static std::vector<std::string> publisherTypes();
static std::vector<std::string> subscriberNames();
/// An EventFactory will exist for the lifetime of the application.
EventFactory() {}
EventFactory(EventFactory const&);
- void operator=(EventFactory const&);
+ EventFactory& operator=(EventFactory const&);
~EventFactory() {}
private:
* When the EventSubscriber%'s `init` method is called you are assured the
* EventPublisher has `setUp` and is ready to subscription for events.
*/
- virtual void init() {}
+ virtual Status init() { return Status(0, "OK"); }
/// Helper function to call the publisher's templated subscription generator.
SCRef createSubscriptionContext() const {
// EventSubscriber and a single parameter placeholder (the EventContext).
auto cb = std::bind(base_entry, self, _1, _2);
// Add a subscription using the callable and SubscriptionContext.
- EventFactory::addSubscription(type(), sc, cb, user_data);
+ EventFactory::addSubscription(type(), self->getName(), sc, cb, user_data);
}
/// Helper EventPublisher string type accessor.
EventPublisherID type() const { return BaseEventPublisher::getType<PUB>(); }
+ /**
+ * @brief Request the subscriber's initialization state.
+ *
+ * When event subscribers are created (initialized) they are expected to emit
+ * a set of subscriptions to their publisher "type". If the subscriber fails
+ * to initialize then the publisher may remove any intermediate subscriptions.
+ */
+ EventSubscriberState state() const { return state_; }
+
+ /// Set the subscriber state.
+ void state(EventSubscriberState state) { state_ = state; }
+
+ public:
+ EventSubscriber() : EventSubscriberPlugin(), state_(SUBSCRIBER_NONE) {}
+
+ private:
+ /// The event subscriber's run state.
+ EventSubscriberState state_;
+
private:
FRIEND_TEST(EventsTests, test_event_sub);
FRIEND_TEST(EventsTests, test_event_sub_subscribe);
/// the event factory.
void attachEvents();
-/// Sleep in a boost::thread interruptable state.
+/// Sleep in a boost::thread interruptible state.
void publisherSleep(size_t milli);
CREATE_REGISTRY(EventPublisherPlugin, "event_publisher");
namespace osquery {
-/**
- * Our wildcard directory traversal function will not resolve more than
- * this many wildcards.
- */
+/// Globbing directory traversal function recursive limit.
const unsigned int kMaxDirectoryTraversalDepth = 40;
typedef unsigned int ReturnSetting;
+
enum {
- REC_LIST_FILES = 0x1, // Return only files
- REC_LIST_FOLDERS = 0x2, // Return only folders
- REC_EVENT_OPT = 0x4, // Enable optimizations for file event resolutions
+ /// Return only files
+ REC_LIST_FILES = 0x1,
+ /// Return only folders
+ REC_LIST_FOLDERS = 0x2,
+ /// Enable optimizations for file event resolutions
+ REC_EVENT_OPT = 0x4,
REC_LIST_ALL = REC_LIST_FILES | REC_LIST_FOLDERS
};
+/// Globbing wildcard character.
const std::string kWildcardCharacter = "%";
+/// Globbing wildcard recursive character (double wildcard).
const std::string kWildcardCharacterRecursive =
kWildcardCharacter + kWildcardCharacter;
/// Return bit-mask-style permissions.
std::string lsperms(int mode);
+/**
+ * @brief Parse a JSON file on disk into a property tree.
+ *
+ * @param path the path of the JSON file
+ * @param tree output property tree
+ *
+ * @return an instance of Status, indicating the success or failure
+ */
+Status parseJSON(const boost::filesystem::path& path,
+ boost::property_tree::ptree& tree);
+
+/**
+ * @brief Parse JSON content into a property tree.
+ *
+ * @param path JSON string data
+ * @param tree output property tree
+ *
+ * @return an instance of Status, indicating the success or failure
+ */
+Status parseJSONContent(const std::string& content,
+ boost::property_tree::ptree& tree);
+
#ifdef __APPLE__
/**
* @brief Parse a property list on disk into a property tree.
/**
* @brief Parse property list content into a property tree.
*
- * @param fileContent a string reference to the content of a plist
+ * @param content a string reference to the content of a plist
* @param tree a non-const reference to a Boost property tree, which will be
* populated with the results of the property list
*
* @return an instance of Status, indicating the success or failure
* of the operation.
*/
-Status parsePlistContent(const std::string& fileContent,
+Status parsePlistContent(const std::string& content,
boost::property_tree::ptree& tree);
#endif
DECLARE_string(logger_plugin);
/**
- * @breif An internal severity set mapping to Glog's LogSeverity levels.
+ * @brief An internal severity set mapping to Glog's LogSeverity levels.
*/
enum StatusLogSeverity {
O_INFO = 0,
* logs generated between program launch and logger start.
*
* The logger initialization is called once CLI flags have been parsed, the
- * registry items are constructed, extension routes broadcased and extension
+ * registry items are constructed, extension routes broadcasted and extension
* plugins discovered (as a logger may be an extension plugin) and the config
* has been loaded (which may include additional CLI flag-options).
*
* osquery logger's `init` method is called.
*
* The return status of `init` is very important. If a success is returned
- * then the glog log sink stays active and now forwards every status log
+ * then the Glog log sink stays active and now forwards every status log
* to the logger's `logStatus` method. If a failure is returned this means
- * the logger does not support status logging and glog should continue
+ * the logger does not support status logging and Glog should continue
* as the only status log sink.
*
* @param binary_name The string name of the process (argv[0]).
}
/**
- * @brief If the active logger's `init` method returned success then glog
+ * @brief If the active logger's `init` method returned success then Glog
* log lines will be collected, and forwarded to `logStatus`.
*
* `logStatus` and `init` are tightly coupled. Glog log lines will ONLY be
* forwarded to `logStatus` if the logger's `init` method returned success.
*
- * @param log A vector of parsed glog log lines.
+ * @param log A vector of parsed Glog log lines.
* @return Status non-op indicating success or failure.
*/
virtual Status logStatus(const std::vector<StatusLogLine>& log) {
return Status(1, "Not enabled");
}
+
+ /**
+ * @brief Optionally handle snapshot query results separately from events.
+ *
+ * If a logger plugin wants to write snapshot query results (potentially
+ * large amounts of data) to a specific sink it should implement logSnapshot.
+ * Otherwise the serialized log item data will be forwarded to logString.
+ *
+ * @param s A special log item will complete results from a query.
+ * @return log status
+ */
+ virtual Status logSnapshot(const std::string& s) { return logString(s); }
+
+ /// An optional health logging facility.
+ virtual Status logHealth(const std::string& s) {
+ return Status(1, "Not used");
+ }
};
+/// Set the verbose mode, changes Glog's sinking logic and will affect plugins.
+void setVerboseLevel();
+
/// Start status logging to a buffer until the logger plugin is online.
void initStatusLogger(const std::string& name);
* log normal osquery operations, use Google Logging.
*
* @param s the string to log
+ * @param category a category/metadata key
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation.
+ * @return Status indicating the success or failure of the operation
*/
-Status logString(const std::string& s);
+Status logString(const std::string& message, const std::string& category);
/**
* @brief Log a string using a specific logger receiver.
* Note that this method should only be used to log results. If you'd like to
* log normal osquery operations, use Google Logging.
*
- * @param s the string to log
+ * @param message the string to log
+ * @param category a category/metadata key
* @param receiver a string representing the log receiver to use
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation.
+ * @return Status indicating the success or failure of the operation
*/
-Status logString(const std::string& s, const std::string& receiver);
+Status logString(const std::string& message,
+ const std::string& category,
+ const std::string& receiver);
/**
- * @brief Directly log results of scheduled queries to the default receiver
+ * @brief Log results of scheduled queries to the default receiver
*
* @param item a struct representing the results of a scheduled query
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation.
+ * @return Status indicating the success or failure of the operation
*/
-Status logScheduledQueryLogItem(const ScheduledQueryLogItem& item);
+Status logQueryLogItem(const QueryLogItem& item);
/**
- * @brief Directly log results of scheduled queries to a specified receiver
+ * @brief Log results of scheduled queries to a specified receiver
*
* @param item a struct representing the results of a scheduled query
* @param receiver a string representing the log receiver to use
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation.
+ * @return Status indicating the success or failure of the operation
+ */
+Status logQueryLogItem(const QueryLogItem& item, const std::string& receiver);
+
+/**
+ * @brief Log raw results from a query (or a snapshot scheduled query).
+ *
+ * @param results the unmangled results from the query planner.
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status logSnapshotQuery(const QueryLogItem& item);
+
+/**
+ * @brief Log the worker's health along with health of each query.
+ *
+ * @param results the query results from the osquery schedule appended with a
+ * row of health from the worker.
+ *
+ * @return Status indicating the success or failure of the operation
*/
-Status logScheduledQueryLogItem(const ScheduledQueryLogItem& item,
- const std::string& receiver);
+Status logHealthStatus(const QueryLogItem& item);
/**
* @brief Logger plugin registry.
* @param type A typename that derives from Plugin.
* @param name A string identifier for the registry.
*/
-#define CREATE_REGISTRY(type, name) \
- namespace registry { \
- __attribute__((constructor)) static void type##Registry() { \
- Registry::create<type>(name); \
- } \
+#define CREATE_REGISTRY(type, name) \
+ namespace registry { \
+ __constructor__ static void type##Registry() { \
+ Registry::create<type>(name); \
+ } \
}
/**
* @param type A typename that derives from Plugin.
* @param name A string identifier for the registry.
*/
-#define CREATE_LAZY_REGISTRY(type, name) \
- namespace registry { \
- __attribute__((constructor)) static void type##Registry() { \
- Registry::create<type>(name, true); \
- } \
+#define CREATE_LAZY_REGISTRY(type, name) \
+ namespace registry { \
+ __constructor__ static void type##Registry() { \
+ Registry::create<type>(name, true); \
+ } \
}
/**
* @param registry The string name for the registry.
* @param name A string identifier for this registry item.
*/
-#define REGISTER(type, registry, name) \
- __attribute__((constructor)) static void type##RegistryItem() { \
- Registry::add<type>(registry, name); \
+#define REGISTER(type, registry, name) \
+ __constructor__ static void type##RegistryItem() { \
+ Registry::add<type>(registry, name); \
}
/// The same as REGISTER but prevents the plugin item from being broadcasted.
-#define REGISTER_INTERNAL(type, registry, name) \
- __attribute__((constructor)) static void type##RegistryItem() { \
- Registry::add<type>(registry, name, true); \
+#define REGISTER_INTERNAL(type, registry, name) \
+ __constructor__ static void type##RegistryItem() { \
+ Registry::add<type>(registry, name, true); \
}
/**
/// The call-in prototype for Registry modules.
typedef void (*ModuleInitalizer)(void);
-class Plugin {
+class Plugin : private boost::noncopyable {
public:
- Plugin() { name_ = "unnamed"; }
+ Plugin() : name_("unnamed") {}
virtual ~Plugin() {}
public:
/// Allow the plugin to introspect into the registered name (for logging).
void setName(const std::string& name) { name_ = name; }
+ const std::string& getName() const { return name_; }
+
/// Allow a specialized plugin type to act when an external plugin is
/// registered (e.g., a TablePlugin will attach the table name).
static Status addExternal(const std::string& name,
private:
Plugin(Plugin const&);
- void operator=(Plugin const&);
+ Plugin& operator=(Plugin const&);
};
-class RegistryHelperCore {
+class RegistryHelperCore : private boost::noncopyable {
public:
explicit RegistryHelperCore(bool auto_setup = false)
: auto_setup_(auto_setup) {}
class RegistryModuleLoader : private boost::noncopyable {
public:
/// Unlock the registry, open, construct, and allow the module to declare.
- RegistryModuleLoader(const std::string& path);
+ explicit RegistryModuleLoader(const std::string& path);
/// Keep the symbol resolution/calling out of construction.
void init();
protected:
RegistryFactory()
- : allow_duplicates_(false), locked_(false), external_(false) {}
+ : allow_duplicates_(false),
+ locked_(false),
+ module_uuid_(0),
+ external_(false) {}
RegistryFactory(RegistryFactory const&);
- void operator=(RegistryFactory const&);
+ RegistryFactory& operator=(RegistryFactory const&);
virtual ~RegistryFactory() {}
private:
#include <osquery/database/results.h>
#include <osquery/status.h>
+/// Allow Tables to use "tracked" deprecated OS APIs.
+#define OSQUERY_USE_DEPRECATED(expr) \
+ do { \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") \
+ expr; \
+ _Pragma("clang diagnostic pop") \
+ } while (0)
+
namespace osquery {
namespace tables {
explicit Constraint(unsigned char _op) { op = _op; }
// A constraint list in a context knows only the operator at creation.
- explicit Constraint(unsigned char _op, const std::string& _expr) {
- op = _op;
- expr = _expr;
- }
+ explicit Constraint(unsigned char _op, const std::string& _expr)
+ : op(_op), expr(_expr) {}
};
/**
* @brief Check if an expression matches the query constraints.
*
* Evaluate ALL constraints in this ConstraintList against the string
- * expression. The affinity of the constrait will be used as the affinite
+ * expression. The affinity of the constraint will be used as the affinite
* and lexical type of the expression and set of constraint expressions.
* If there are no predicate constraints in this list, all expression will
* match. Constraints are limitations.
bool exists() const { return (constraints_.size() > 0); }
/**
- * @brief Check if a constrait exist AND matches the type expression.
+ * @brief Check if a constraint exist AND matches the type expression.
*
* See ConstraintList::exists and ConstraintList::matches.
*
void serialize(boost::property_tree::ptree& tree) const;
void unserialize(const boost::property_tree::ptree& tree);
- ConstraintList() { affinity = "TEXT"; }
+ ConstraintList() : affinity("TEXT") {}
private:
/// List of constraint operator/expressions.
/// Pass a constraint map to the query request.
typedef std::map<std::string, struct ConstraintList> ConstraintMap;
-/// Populate a containst list from a query's parsed predicate.
+/// Populate a constraint list from a query's parsed predicate.
typedef std::vector<std::pair<std::string, struct Constraint> > ConstraintSet;
/**
*
* To attach a virtual table create a TablePlugin subclass and register the
* virtual table name as the plugin ID. osquery will enumerate all registered
- * TablePlugins and attempt to attach them to SQLite at instanciation.
+ * TablePlugins and attempt to attach them to SQLite at instantiation.
*
* Note: When updating this class, be sure to update the corresponding template
* in osquery/tables/templates/default.cpp.in
SET(TARGET_OSQUERY_LIB osquery)
SET(TARGET_OSQUERY_LIB_ADDITIONAL osquery_additional)
+SET(TARGET_OSQUERY_TEST osquery-test)
SET(TARGET_OSQUERY_SHELL osqueryi)
SET(TARGET_OSQUERY_DAEMON osqueryd)
udev)
SET(${TARGET_OSQUERY_LIB}_SRCS "")
SET(${TARGET_OSQUERY_LIB_ADDITIONAL}_SRCS "")
+SET(${TARGET_OSQUERY_TEST}_SRCS "")
SET(OSQUERY_CODEGEN_PATH "${CMAKE_SOURCE_DIR}/tools/codegen")
SET(OSQUERY_TABLES_PATH "${CMAKE_SOURCE_DIR}/osquery/tables")
ENDIF()
ENDMACRO(ADD_OSQUERY_LIBRARY TARGET)
-MACRO(ADD_OSQUERY_TEST IS_CORE TEST_NAME SOURCE)
- ADD_EXECUTABLE(${TEST_NAME} ${SOURCE})
-
- TARGET_LINK_LIBRARIES(${TEST_NAME} ${TARGET_OSQUERY_LIB})
- TARGET_LINK_LIBRARIES(${TEST_NAME} gtest)
-# SET_TARGET_PROPERTIES(${TARGET} PROPERTIES COMPILE_FLAGS "-DGTEST_HAS_TR1_TUPLE=0")
- ADD_TEST(${TEST_NAME} ${TEST_NAME})
- INSTALL(TARGETS ${TEST_NAME}
- DESTINATION ${CMAKE_INSTALL_BINDIR}
- PERMISSIONS OWNER_READ
- OWNER_WRITE
- OWNER_EXECUTE
- GROUP_READ
- GROUP_EXECUTE
- WORLD_READ
- WORLD_EXECUTE)
+MACRO(ADD_OSQUERY_TEST IS_CORE SOURCE)
+ LIST(APPEND ${TARGET_OSQUERY_TEST}_SRCS ${SOURCE})
+ SET(${TARGET_OSQUERY_TEST}_SRCS ${${TARGET_OSQUERY_TEST}_SRCS} PARENT_SCOPE)
ENDMACRO(ADD_OSQUERY_TEST)
MACRO(TARGET_OSQUERY_LINK_WHOLE TARGET LIBRARY)
ADD_SUBDIRECTORY(tables)
## Table generation #############################################################
-FILE(GLOB TABLE_FILES "tables/specs/x/*.table")
+FILE(GLOB TABLE_FILES "tables/specs/*.table")
FILE(GLOB TABLE_FILES_LINUX "tables/specs/linux/*.table")
+FILE(GLOB TABLE_FILES_UTILITY "tables/specs/utility/*.table")
LIST(APPEND TABLE_FILES ${TABLE_FILES_LINUX})
+LIST(APPEND TABLE_FILES ${TABLE_FILES_UTILITY})
SET(GENERATED_TABLES "")
TABLE_FILE_GEN
${TABLE_FILE_GEN})
STRING(REPLACE "linux/" "" TABLE_FILE_GEN ${TABLE_FILE_GEN})
- STRING(REPLACE "x/" "" TABLE_FILE_GEN ${TABLE_FILE_GEN})
+ STRING(REPLACE "" "" TABLE_FILE_GEN ${TABLE_FILE_GEN})
STRING(REPLACE ".table" ".cpp" TABLE_FILE_GEN ${TABLE_FILE_GEN})
ADD_CUSTOM_COMMAND(
OUTPUT ${TABLE_FILE_GEN}
WORLD_READ
WORLD_EXECUTE)
+## osquery-test generation ##########################################################
+ADD_EXECUTABLE(${TARGET_OSQUERY_TEST} ${${TARGET_OSQUERY_TEST}_SRCS} main/tests.cpp)
+
+TARGET_LINK_LIBRARIES(${TARGET_OSQUERY_TEST} ${TARGET_OSQUERY_LIB})
+TARGET_LINK_LIBRARIES(${TARGET_OSQUERY_TEST} gtest)
+# SET_TARGET_PROPERTIES(${TARGET} PROPERTIES COMPILE_FLAGS "-DGTEST_HAS_TR1_TUPLE=0")
+ADD_TEST(${TARGET_OSQUERY_TEST} ${TARGET_OSQUERY_TEST})
+INSTALL(TARGETS ${TARGET_OSQUERY_TEST}
+ DESTINATION ${CMAKE_INSTALL_BINDIR}
+ PERMISSIONS OWNER_READ
+ OWNER_WRITE
+ OWNER_EXECUTE
+ GROUP_READ
+ GROUP_EXECUTE
+ WORLD_READ
+ WORLD_EXECUTE)
+
## example extension with the SDK ##############################################
ADD_EXECUTABLE(example_extension examples/example_extension.cpp)
TARGET_LINK_LIBRARIES(example_extension ${TARGET_OSQUERY_LIB})
# Build the example extension module with the SDK
ADD_OSQUERY_MODULE(modexample examples/example_module.cpp)
-
ADD_OSQUERY_LIBRARY(FALSE osquery_config_plugins update.cpp
plugins/filesystem.cpp)
-ADD_OSQUERY_TEST(FALSE osquery_config_tests config_tests.cpp)
+FILE(GLOB OSQUERY_CONFIG_TESTS "tests/*.cpp")
if (Registry::external()) {
for (const auto& source : config) {
PluginRequest request = {
- {"action", "update"},
- {"source", source.first},
- {"data", source.second},
+ {"action", "update"},
+ {"source", source.first},
+ {"data", source.second},
};
// A "update" registry item within core should call the core's update
// method. The config plugin call action handling must also know to
ConfigData conf;
for (const auto& source : config) {
if (Registry::external()) {
- VLOG(1) << "Updating extension config source: " << source.first;
+ VLOG(1) << "Updating extension config with source: " << source.first;
} else {
- VLOG(1) << "Updating config source: " << source.first;
+ VLOG(1) << "Updating config with source: " << source.first;
}
getInstance().raw_[source.first] = source.second;
}
mergeConfig(source.second, conf);
}
+ // Call each parser with the optionally-empty, requested, top level keys.
+ for (const auto& plugin : Registry::all("config_parser")) {
+ auto parser = std::static_pointer_cast<ConfigParserPlugin>(plugin.second);
+ if (parser == nullptr || parser.get() == nullptr) {
+ continue;
+ }
+
+ // For each key requested by the parser, add a property tree reference.
+ std::map<std::string, ConfigTree> parser_config;
+ for (const auto& key : parser->keys()) {
+ if (conf.all_data.count(key) > 0) {
+ parser_config[key] = conf.all_data.get_child(key);
+ } else {
+ parser_config[key] = pt::ptree();
+ }
+ }
+ parser->update(parser_config);
+ }
+
getInstance().data_ = conf;
return Status(0, "OK");
}
}
inline void mergeOption(const tree_node& option, ConfigData& conf) {
- conf.options[option.first.data()] = option.second.data();
- if (conf.all_data.count("options") > 0) {
- conf.all_data.get_child("options").erase(option.first);
- }
- conf.all_data.add_child("options." + option.first, option.second);
-}
-
-inline void mergeAdditional(const tree_node& node, ConfigData& conf) {
- if (conf.all_data.count("additional_monitoring") > 0) {
- conf.all_data.get_child("additional_monitoring").erase(node.first);
- }
- conf.all_data.add_child("additional_monitoring." + node.first, node.second);
-
- // Support special merging of file paths.
- if (node.first != "file_paths") {
- return;
+ std::string key = option.first.data();
+ std::string value = option.second.data();
+
+ Flag::updateValue(key, value);
+ // There is a special case for supported Gflags-reserved switches.
+ if (key == "verbose" || key == "verbose_debug" || key == "debug") {
+ setVerboseLevel();
+ if (Flag::getValue("verbose") == "true") {
+ VLOG(1) << "Verbose logging enabled by config option";
+ }
}
- for (const auto& category : node.second) {
- for (const auto& path : category.second) {
- resolveFilePattern(path.second.data(),
- conf.files[category.first],
- REC_LIST_FOLDERS | REC_EVENT_OPT);
- }
+ conf.options[key] = value;
+ if (conf.all_data.count("options") > 0) {
+ conf.all_data.get_child("options").erase(key);
}
+ conf.all_data.add_child("options." + key, option.second);
}
-// inline void mergeScheduledQuery(const tree_node& node, ConfigData& conf) {
inline void mergeScheduledQuery(const std::string& name,
const tree_node& node,
ConfigData& conf) {
ScheduledQuery query;
query.query = node.second.get<std::string>("query", "");
query.interval = node.second.get<int>("interval", 0);
+ // This is a candidate for a catch-all iterator with a catch for boolean type.
+ query.options["snapshot"] = node.second.get<bool>("snapshot", false);
// Check if this query exists, if so, check if it was changed.
if (conf.schedule.count(name) > 0) {
conf.all_data.add_child("schedule." + name, node.second);
}
-void Config::mergeConfig(const std::string& source, ConfigData& conf) {
- std::stringstream json_data;
- json_data << source;
-
- pt::ptree tree;
- pt::read_json(json_data, tree);
-
- // Legacy query schedule vector support.
- if (tree.count("scheduledQueries") > 0) {
- LOG(INFO) << RLOG(903) << "config 'scheduledQueries' is deprecated";
- for (const auto& node : tree.get_child("scheduledQueries")) {
- auto query_name = node.second.get<std::string>("name", "");
- mergeScheduledQuery(query_name, node, conf);
+inline void mergeExtraKey(const std::string& name,
+ const tree_node& node,
+ ConfigData& conf) {
+ // Automatically merge extra list/dict top level keys.
+ for (const auto& subitem : node.second) {
+ if (node.second.count("") == 0 && conf.all_data.count(name) > 0) {
+ conf.all_data.get_child(name).erase(subitem.first);
}
+ conf.all_data.add_child(name + "." + subitem.first, subitem.second);
}
+}
- // Key/value query schedule map support.
- if (tree.count("schedule") > 0) {
- for (const auto& node : tree.get_child("schedule")) {
- mergeScheduledQuery(node.first.data(), node, conf);
- }
+inline void mergeFilePath(const std::string& name,
+ const tree_node& node,
+ ConfigData& conf) {
+ for (const auto& path : node.second) {
+ resolveFilePattern(path.second.data(),
+ conf.files[node.first],
+ REC_LIST_FOLDERS | REC_EVENT_OPT);
+ }
+ conf.all_data.add_child(name + "." + node.first, node.second);
+}
+
+void Config::mergeConfig(const std::string& source, ConfigData& conf) {
+ pt::ptree tree;
+ try {
+ std::stringstream json_data;
+ json_data << source;
+ pt::read_json(json_data, tree);
+ } catch (const pt::json_parser::json_parser_error& e) {
+ VLOG(1) << "Error parsing config JSON: " << e.what();
+ return;
}
if (tree.count("additional_monitoring") > 0) {
+ LOG(INFO) << RLOG(903) << "config 'additional_monitoring' is deprecated";
for (const auto& node : tree.get_child("additional_monitoring")) {
- mergeAdditional(node, conf);
+ tree.add_child(node.first, node.second);
}
+ tree.erase("additional_monitoring");
}
- if (tree.count("options") > 0) {
- for (const auto& option : tree.get_child("options")) {
- mergeOption(option, conf);
+ for (const auto& item : tree) {
+ // Iterate over each top-level configuration key.
+ auto key = std::string(item.first.data());
+ if (key == "scheduledQueries") {
+ LOG(INFO) << RLOG(903) << "config 'scheduledQueries' is deprecated";
+ for (const auto& node : item.second) {
+ auto query_name = node.second.get<std::string>("name", "");
+ mergeScheduledQuery(query_name, node, conf);
+ }
+ } else if (key == "schedule") {
+ for (const auto& node : item.second) {
+ mergeScheduledQuery(node.first.data(), node, conf);
+ }
+ } else if (key == "options") {
+ for (const auto& option : item.second) {
+ mergeOption(option, conf);
+ }
+ } else if (key == "file_paths") {
+ for (const auto& category : item.second) {
+ mergeFilePath(key, category, conf);
+ }
+ } else {
+ mergeExtraKey(key, item, conf);
}
}
}
+const pt::ptree& Config::getParsedData(const std::string& key) {
+ if (!Registry::exists("config_parser", key)) {
+ return getInstance().empty_data_;
+ }
+
+ const auto& item = Registry::get("config_parser", key);
+ auto parser = std::static_pointer_cast<ConfigParserPlugin>(item);
+ if (parser == nullptr || parser.get() == nullptr) {
+ return getInstance().empty_data_;
+ }
+
+ return parser->data_;
+}
+
+const ConfigPluginRef Config::getParser(const std::string& key) {
+ if (!Registry::exists("config_parser", key)) {
+ return ConfigPluginRef();
+ }
+
+ const auto& item = Registry::get("config_parser", key);
+ const auto parser = std::static_pointer_cast<ConfigParserPlugin>(item);
+ if (parser == nullptr || parser.get() == nullptr) {
+ return ConfigPluginRef();
+ }
+
+ return parser;
+}
+
Status Config::getMD5(std::string& hash_string) {
// Request an accessor to our own config, outside of an update.
ConfigDataInstance config;
std::stringstream out;
- write_json(out, config.data());
+ pt::write_json(out, config.data());
hash_string = osquery::hashFromBuffer(
HASH_TYPE_MD5, (void*)out.str().c_str(), out.str().length());
Status Config::checkConfig() { return load(); }
+void Config::recordQueryPerformance(const std::string& name,
+ size_t delay,
+ size_t size,
+ const Row& r0,
+ const Row& r1) {
+ // Grab a lock on the schedule structure and check the name.
+ ConfigDataInstance config;
+ if (config.schedule().count(name) == 0) {
+ // Unknown query schedule name.
+ return;
+ }
+
+ // Grab access to the non-const schedule item.
+ auto& query = getInstance().data_.schedule.at(name);
+ auto diff = strtol(r1.at("user_time").c_str(), nullptr, 10) -
+ strtol(r0.at("user_time").c_str(), nullptr, 10);
+ query.user_time += diff;
+ diff = strtol(r1.at("system_time").c_str(), nullptr, 10) -
+ strtol(r0.at("system_time").c_str(), nullptr, 10);
+ query.system_time += diff;
+ diff = strtol(r1.at("resident_size").c_str(), nullptr, 10) -
+ strtol(r0.at("resident_size").c_str(), nullptr, 10);
+ // Memory is stored as an average of BSS changes between query executions.
+ query.memory =
+ (query.memory * query.executions + diff) / (query.executions + 1);
+ query.wall_time += delay;
+ query.output_size += size;
+ query.executions += 1;
+}
+
Status ConfigPlugin::call(const PluginRequest& request,
PluginResponse& response) {
if (request.count("action") == 0) {
return Status(1, "Config plugin action unknown: " + request.at("action"));
}
+Status ConfigParserPlugin::setUp() {
+ for (const auto& key : keys()) {
+ data_.put(key, "");
+ }
+ return Status(0, "OK");
+}
+
int splayValue(int original, int splayPercent) {
if (splayPercent <= 0 || splayPercent > 100) {
return original;
Status FilesystemConfigPlugin::genConfig(
std::map<std::string, std::string>& config) {
- if (!fs::exists(FLAGS_config_path)) {
+ if (!fs::is_regular_file(FLAGS_config_path)) {
return Status(1, "config file does not exist");
}
std::vector<std::string> conf_files;
resolveFilePattern(FLAGS_config_path + ".d/%.conf", conf_files);
- if (conf_files.size() > 0) {
- VLOG(1) << "Discovered (" << conf_files.size() << ") additional configs";
- }
-
std::sort(conf_files.begin(), conf_files.end());
conf_files.push_back(FLAGS_config_path);
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
TEST_F(ConfigTests, test_queries_execute) {
ConfigDataInstance config;
- EXPECT_EQ(config.schedule().size(), 2);
+ EXPECT_EQ(config.schedule().size(), 3);
}
TEST_F(ConfigTests, test_watched_files) {
ConfigDataInstance config;
- EXPECT_EQ(config.files().size(), 2);
+ ASSERT_EQ(config.files().size(), 3);
+ // From the deprecated "additional_monitoring" collection.
EXPECT_EQ(config.files().at("downloads").size(), 1);
- EXPECT_EQ(config.files().at("system_binaries").size(), 2);
+
+ // From the new, recommended top-level "file_paths" collection.
+ EXPECT_EQ(config.files().at("downloads2").size(), 1);
+ EXPECT_EQ(config.files().at("system_binaries").size(), 1);
}
TEST_F(ConfigTests, test_locking) {
ConfigDataInstance config1;
ConfigDataInstance config2;
- // But a unique lock cannot be aquired.
+ // But a unique lock cannot be acquired.
boost::unique_lock<boost::shared_mutex> lock(Config::getInstance().mutex_,
boost::defer_lock);
-
ASSERT_FALSE(lock.try_lock());
}
}
}
+TEST_F(ConfigTests, test_bad_config_update) {
+ std::string bad_json = "{\"options\": {},}";
+ ASSERT_NO_THROW(Config::update({{"bad_source", bad_json}}));
+}
+
+class TestConfigParserPlugin : public ConfigParserPlugin {
+ public:
+ std::vector<std::string> keys() {
+ return {"dictionary", "dictionary2", "list"};
+ }
+
+ Status update(const std::map<std::string, ConfigTree>& config) {
+ // Set a simple boolean indicating the update callin occurred.
+ update_called = true;
+ // Copy all expected keys into the parser's data.
+ for (const auto& key : config) {
+ data_.put_child(key.first, key.second);
+ }
+
+ // Set parser-rendered additional data.
+ data_.put("dictionary3.key2", "value2");
+ return Status(0, "OK");
+ }
+
+ static bool update_called;
+
+ private:
+ FRIEND_TEST(ConfigTests, test_config_parser);
+};
+
+// An intermediate boolean to check parser updates.
+bool TestConfigParserPlugin::update_called = false;
+
+TEST_F(ConfigTests, test_config_parser) {
+ // Register a config parser plugin.
+ Registry::add<TestConfigParserPlugin>("config_parser", "test");
+ Registry::get("config_parser", "test")->setUp();
+
+ {
+ // Access the parser's data without having updated the configuration.
+ ConfigDataInstance config;
+ const auto& test_data = config.getParsedData("test");
+
+ // Expect the setUp method to have run and set blank defaults.
+ // Accessing an invalid property tree key will abort.
+ ASSERT_EQ(test_data.get_child("dictionary").count(""), 0);
+ }
+
+ // Update or load the config, expect the parser to be called.
+ Config::update(
+ {{"source1",
+ "{\"dictionary\": {\"key1\": \"value1\"}, \"list\": [\"first\"]}"}});
+ ASSERT_TRUE(TestConfigParserPlugin::update_called);
+
+ {
+ // Now access the parser's data AFTER updating the config (no longer blank)
+ ConfigDataInstance config;
+ const auto& test_data = config.getParsedData("test");
+
+ // Expect a value that existed in the configuration.
+ EXPECT_EQ(test_data.count("dictionary"), 1);
+ EXPECT_EQ(test_data.get("dictionary.key1", ""), "value1");
+ // Expect a value for every key the parser requested.
+ // Every requested key will be present, event if the key's tree is empty.
+ EXPECT_EQ(test_data.count("dictionary2"), 1);
+ // Expect the parser-created data item.
+ EXPECT_EQ(test_data.count("dictionary3"), 1);
+ EXPECT_EQ(test_data.get("dictionary3.key2", ""), "value2");
+ }
+
+ // Update from a secondary source into a dictionary.
+ // Expect that the keys in the top-level dictionary are merged.
+ Config::update({{"source2", "{\"dictionary\": {\"key3\": \"value3\"}}"}});
+ // Update from a third source into a list.
+ // Expect that the items from each source in the top-level list are merged.
+ Config::update({{"source3", "{\"list\": [\"second\"]}"}});
+
+ {
+ ConfigDataInstance config;
+ const auto& test_data = config.getParsedData("test");
+
+ EXPECT_EQ(test_data.count("dictionary"), 1);
+ EXPECT_EQ(test_data.get("dictionary.key1", ""), "value1");
+ EXPECT_EQ(test_data.get("dictionary.key3", ""), "value3");
+ EXPECT_EQ(test_data.count("list"), 1);
+ EXPECT_EQ(test_data.get_child("list").count(""), 2);
+ }
+}
+
TEST_F(ConfigTests, test_splay) {
auto val1 = splayValue(100, 10);
EXPECT_GE(val1, 90);
EXPECT_EQ(val5, 1);
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
# TODO(Sangwan): Detach from core
ADD_OSQUERY_LIBRARY(TRUE osquery_test_util test_util.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_flags_tests flags_tests.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_hash_tests hash_tests.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_status_tests status_tests.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_tables_tests tables_tests.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_text_tests text_tests.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_conversions_tests conversions_tests.cpp)
+FILE(GLOB OSQUERY_CORE_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(TRUE ${OSQUERY_CORE_TESTS})
#include <boost/archive/iterators/binary_from_base64.hpp>
#include <boost/archive/iterators/base64_from_binary.hpp>
-#include <boost/archive/iterators/transform_width.hpp>
-
#include "osquery/core/conversions.h"
namespace bai = boost::archive::iterators;
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
}
Status Flag::updateValue(const std::string& name, const std::string& value) {
- GFLAGS_NAMESPACE::SetCommandLineOption(name.c_str(), value.c_str());
- return Status(0, "OK");
+ if (instance().flags_.count(name) > 0) {
+ GFLAGS_NAMESPACE::SetCommandLineOption(name.c_str(), value.c_str());
+ return Status(0, "OK");
+ } else if (instance().aliases_.count(name) > 0) {
+ // Updating a flag by an alias name.
+ auto& real_name = instance().aliases_.at(name).description;
+ GFLAGS_NAMESPACE::SetCommandLineOption(real_name.c_str(), value.c_str());
+ return Status(0, "OK");
+ }
+ return Status(1, "Flag not found");
}
std::map<std::string, FlagInfo> Flag::flags() {
std::vector<GFLAGS_NAMESPACE::CommandLineFlagInfo> info;
GFLAGS_NAMESPACE::GetAllFlags(&info);
+ // Determine max indent needed for all flag names.
size_t max = 0;
for (const auto& flag : info) {
max = (max > flag.name.size()) ? max : flag.name.size();
}
- max += 7;
+ // Additional index for flag values.
+ max += 5;
auto& aliases = instance().aliases_;
auto& details = instance().flags_;
*/
#include <syslog.h>
+#include <stdio.h>
#include <time.h>
+#include <unistd.h>
#include <boost/algorithm/string/trim.hpp>
#include <boost/filesystem.hpp>
#include <osquery/config.h>
#include <osquery/core.h>
+#include <osquery/database.h>
#include <osquery/events.h>
#include <osquery/extensions.h>
#include <osquery/flags.h>
#include "osquery/core/watcher.h"
+namespace fs = boost::filesystem;
+
namespace osquery {
#define DESCRIPTION \
"osquery %s, your OS as a high-performance relational database\n"
-#define EPILOG "\nosquery project page <http://osquery.io>.\n"
+#define EPILOG "\nosquery project page <https://osquery.io>.\n"
#define OPTIONS \
"\nosquery configuration options (set by config or CLI flags):\n\n"
#define OPTIONS_SHELL "\nosquery shell-only CLI flags:\n\n"
"been created. Please consider explicitly defining those " \
"options as a different \n" \
"path. Additionally, review the \"using osqueryd\" wiki page:\n" \
- " - https://github.com/facebook/osquery/wiki/using-osqueryd\n\n";
+ " - https://osquery.readthedocs.org/en/latest/introduction/using-osqueryd/" \
+ "\n\n";
CLI_FLAG(bool,
config_check,
CLI_FLAG(bool, daemonize, false, "Run as daemon (osqueryd only)");
#endif
-namespace fs = boost::filesystem;
-
void printUsage(const std::string& binary, int tool) {
// Parse help options before gflags. Only display osquery-related options.
fprintf(stdout, DESCRIPTION, OSQUERY_VERSION);
std::srand(time(nullptr));
// osquery implements a custom help/usage output.
- std::string first_arg = (*argc_ > 1) ? std::string((*argv_)[1]) : "";
- if ((first_arg == "--help" || first_arg == "-h" || first_arg == "-help") &&
- tool != OSQUERY_TOOL_TEST) {
- printUsage(binary_, tool_);
- ::exit(0);
+ for (int i = 1; i < *argc_; i++) {
+ auto help = std::string((*argv_)[i]);
+ if ((help == "--help" || help == "-help" || help == "--h" ||
+ help == "-h") &&
+ tool != OSQUERY_TOOL_TEST) {
+ printUsage(binary_, tool_);
+ ::exit(0);
+ }
}
// To change the default config plugin, compile osquery with
FLAGS_logger_plugin = STR(OSQUERY_DEFAULT_LOGGER_PLUGIN);
#endif
+ // Set version string from CMake build
+ GFLAGS_NAMESPACE::SetVersionString(OSQUERY_VERSION);
+
+ // Let gflags parse the non-help options/flags.
+ GFLAGS_NAMESPACE::ParseCommandLineFlags(
+ argc_, argv_, (tool == OSQUERY_TOOL_SHELL));
+
if (tool == OSQUERY_TOOL_SHELL) {
// The shell is transient, rewrite config-loaded paths.
- osquery::FLAGS_disable_logging = true;
-
+ FLAGS_disable_logging = true;
// Get the caller's home dir for temporary storage/state management.
auto homedir = osqueryHomeDirectory();
if (osquery::pathExists(homedir).ok() ||
}
}
- // Set version string from CMake build
- GFLAGS_NAMESPACE::SetVersionString(OSQUERY_VERSION);
-
- // Let gflags parse the non-help options/flags.
- GFLAGS_NAMESPACE::ParseCommandLineFlags(
- argc_, argv_, (tool == OSQUERY_TOOL_SHELL));
-
// If the caller is checking configuration, disable the watchdog/worker.
if (FLAGS_config_check) {
FLAGS_disable_watchdog = true;
// Initialize the status and results logger.
initStatusLogger(binary_);
if (tool != OSQUERY_EXTENSION) {
- VLOG(1) << "osquery initialized [version=" << OSQUERY_VERSION << "]";
+ if (isWorker()) {
+ VLOG(1) << "osquery worker initialized [watcher="
+ << getenv("OSQUERY_WORKER") << "]";
+ } else {
+ VLOG(1) << "osquery initialized [version=" << OSQUERY_VERSION << "]";
+ }
} else {
VLOG(1) << "osquery extension initialized [sdk=" << OSQUERY_SDK_VERSION
<< "]";
}
void Initializer::initDaemon() {
+ if (FLAGS_config_check) {
+ // No need to daemonize, emit log lines, or create process mutexes.
+ return;
+ }
+
#ifndef __APPLE__
// OSX uses launchd to daemonize.
if (osquery::FLAGS_daemonize) {
syslog(
LOG_NOTICE, "%s started [version=%s]", binary_.c_str(), OSQUERY_VERSION);
- // check if /var/osquery exists
- if ((Flag::isDefault("pidfile") || Flag::isDefault("db_path")) &&
+ // Check if /var/osquery exists
+ if ((Flag::isDefault("pidfile") || Flag::isDefault("database_path")) &&
!isDirectory("/var/osquery")) {
std::cerr << CONFIG_ERROR
}
void Initializer::initWatcher() {
// The watcher takes a list of paths to autoload extensions from.
- loadExtensions();
+ osquery::loadExtensions();
// Add a watcher service thread to start/watch an optional worker and set
// of optional extensions in the autoload paths.
if (Watcher::hasManagedExtensions() || !FLAGS_disable_watchdog) {
- Dispatcher::getInstance().addService(std::make_shared<WatcherRunner>(
+ Dispatcher::addService(std::make_shared<WatcherRunner>(
*argc_, *argv_, !FLAGS_disable_watchdog));
}
// the extensions and worker process.
if (!FLAGS_disable_watchdog) {
Dispatcher::joinServices();
- // Executation should never reach this point.
+ // Execution should never reach this point.
::exit(EXIT_FAILURE);
}
}
void Initializer::initWorker(const std::string& name) {
- // Set the worker's process name.
+ // Clear worker's arguments.
size_t name_size = strlen((*argv_)[0]);
for (int i = 0; i < *argc_; i++) {
if ((*argv_)[i] != nullptr) {
memset((*argv_)[i], 0, strlen((*argv_)[i]));
}
}
- strncpy((*argv_)[0], name.c_str(), name_size);
+
+ // Set the worker's process name.
+ if (name.size() <= name_size) {
+ std::copy(name.begin(), name.end(), (*argv_)[0]);
+ }
// Start a watcher watcher thread to exit the process if the watcher exits.
- Dispatcher::getInstance().addService(
- std::make_shared<WatcherWatcherRunner>(getppid()));
+ Dispatcher::addService(std::make_shared<WatcherWatcherRunner>(getppid()));
}
void Initializer::initWorkerWatcher(const std::string& name) {
bool Initializer::isWorker() { return (getenv("OSQUERY_WORKER") != nullptr); }
-void Initializer::initConfigLogger() {
+void Initializer::initActivePlugin(const std::string& type,
+ const std::string& name) {
// Use a delay, meaning the amount of milliseconds waited for extensions.
size_t delay = 0;
// The timeout is the maximum time in seconds to wait for extensions.
size_t timeout = atoi(FLAGS_extensions_timeout.c_str());
- while (!Registry::setActive("config", FLAGS_config_plugin)) {
- // If there is at least 1 autoloaded extension, it may broadcast a route
- // to the active config plugin.
- if (!Watcher::hasManagedExtensions() || delay > timeout * 1000) {
- LOG(ERROR) << "Config plugin not found: " << FLAGS_config_plugin;
- ::exit(EXIT_CATASTROPHIC);
- }
- ::usleep(kExtensionInitializeMLatency * 1000);
- delay += kExtensionInitializeMLatency;
- }
-
- // Try the same wait for a logger pluing too.
- while (!Registry::setActive("logger", FLAGS_logger_plugin)) {
+ while (!Registry::setActive(type, name)) {
if (!Watcher::hasManagedExtensions() || delay > timeout * 1000) {
- LOG(ERROR) << "Logger plugin not found: " << FLAGS_logger_plugin;
+ LOG(ERROR) << "Active " << type << " plugin not found: " << name;
::exit(EXIT_CATASTROPHIC);
}
::usleep(kExtensionInitializeMLatency * 1000);
// Bind to an extensions socket and wait for registry additions.
osquery::startExtensionManager();
- // Then set the config/logger plugins, which use a single/active plugin.
- initConfigLogger();
+ // Then set the config plugin, which uses a single/active plugin.
+ initActivePlugin("config", FLAGS_config_plugin);
// Run the setup for all lazy registries (tables, SQL).
Registry::setUp();
}
// Initialize the status and result plugin logger.
+ initActivePlugin("logger", FLAGS_logger_plugin);
initLogger(binary_);
// Start event threads.
osquery::attachEvents();
- osquery::EventFactory::delay();
+ EventFactory::delay();
}
void Initializer::shutdown() {
*
*/
+#include <ctime>
#include <sstream>
#include <sys/types.h>
}
std::string getAsciiTime() {
- std::time_t result = std::time(NULL);
- std::string time_str = std::string(std::asctime(std::localtime(&result)));
+ auto result = std::time(nullptr);
+ auto time_str = std::string(std::asctime(std::gmtime(&result)));
boost::algorithm::trim(time_str);
- return time_str;
+ return time_str + " UTC";
}
int getUnixTime() {
- std::time_t result = std::time(NULL);
+ auto result = std::time(nullptr);
return result;
}
#include <osquery/logger.h>
#include <osquery/tables.h>
+namespace pt = boost::property_tree;
+
namespace osquery {
namespace tables {
void TablePlugin::setRequestFromContext(const QueryContext& context,
PluginRequest& request) {
- boost::property_tree::ptree tree;
+ pt::ptree tree;
tree.put("limit", context.limit);
// The QueryContext contains a constraint map from column to type information
// and the list of operand/expression constraints applied to that column from
// the query given.
- boost::property_tree::ptree constraints;
+ pt::ptree constraints;
for (const auto& constraint : context.constraints) {
- boost::property_tree::ptree child;
+ pt::ptree child;
child.put("name", constraint.first);
constraint.second.serialize(child);
constraints.push_back(std::make_pair("", child));
// Write the property tree as a JSON string into the PluginRequest.
std::ostringstream output;
- boost::property_tree::write_json(output, tree, false);
+ pt::write_json(output, tree, false);
request["context"] = output.str();
}
}
// Read serialized context from PluginRequest.
- std::stringstream input;
- input << request.at("context");
- boost::property_tree::ptree tree;
- boost::property_tree::read_json(input, tree);
+ pt::ptree tree;
+ try {
+ std::stringstream input;
+ input << request.at("context");
+ pt::read_json(input, tree);
+ } catch (const pt::json_parser::json_parser_error& e) {
+ return;
+ }
// Set the context limit and deserialize each column constraint list.
context.limit = tree.get<int>("limit");
#include "osquery/core/test_util.h"
-namespace pt = boost::property_tree;
-
namespace osquery {
-const std::string kTestQuery = "SELECT * FROM test_table";
-
-const std::string kTestDataPath = "../../../tools/tests/";
-
QueryData getTestDBExpectedResults() {
QueryData d;
Row row1;
return results;
}
-osquery::ScheduledQuery getOsqueryScheduledQuery() {
+ScheduledQuery getOsqueryScheduledQuery() {
ScheduledQuery sq;
sq.query = "SELECT filename FROM fs WHERE path = '/bin' ORDER BY filename";
sq.interval = 5;
return sq;
}
-std::pair<boost::property_tree::ptree, Row> getSerializedRow() {
+std::pair<pt::ptree, Row> getSerializedRow() {
Row r;
r["foo"] = "bar";
r["meaning_of_life"] = "42";
return std::make_pair(arr, r);
}
-std::pair<boost::property_tree::ptree, QueryData> getSerializedQueryData() {
+std::pair<pt::ptree, QueryData> getSerializedQueryData() {
auto r = getSerializedRow();
QueryData q = {r.second, r.second};
pt::ptree arr;
return std::make_pair(arr, q);
}
-std::pair<boost::property_tree::ptree, DiffResults> getSerializedDiffResults() {
+std::pair<pt::ptree, DiffResults> getSerializedDiffResults() {
auto qd = getSerializedQueryData();
DiffResults diff_results;
diff_results.added = qd.second;
std::pair<std::string, DiffResults> getSerializedDiffResultsJSON() {
auto results = getSerializedDiffResults();
-
std::ostringstream ss;
pt::write_json(ss, results.first, false);
-
return std::make_pair(ss.str(), results.second);
}
-std::pair<pt::ptree, HistoricalQueryResults>
-getSerializedHistoricalQueryResults() {
- auto qd = getSerializedQueryData();
- auto dr = getSerializedDiffResults();
- HistoricalQueryResults r;
- r.mostRecentResults.first = 2;
- r.mostRecentResults.second = qd.second;
-
- pt::ptree root;
-
- pt::ptree mostRecentResults;
- mostRecentResults.add_child("2", qd.first);
- root.add_child("mostRecentResults", mostRecentResults);
-
- return std::make_pair(root, r);
-}
-
-std::pair<std::string, HistoricalQueryResults>
-getSerializedHistoricalQueryResultsJSON() {
- auto results = getSerializedHistoricalQueryResults();
-
+std::pair<std::string, QueryData> getSerializedQueryDataJSON() {
+ auto results = getSerializedQueryData();
std::ostringstream ss;
pt::write_json(ss, results.first, false);
-
return std::make_pair(ss.str(), results.second);
}
-std::pair<boost::property_tree::ptree, ScheduledQueryLogItem>
-getSerializedScheduledQueryLogItem() {
- ScheduledQueryLogItem i;
+std::pair<pt::ptree, QueryLogItem> getSerializedQueryLogItem() {
+ QueryLogItem i;
pt::ptree root;
auto dr = getSerializedDiffResults();
- i.diffResults = dr.second;
+ i.results = dr.second;
i.name = "foobar";
- i.calendarTime = "Mon Aug 25 12:10:57 2014";
- i.unixTime = 1408993857;
- i.hostIdentifier = "foobaz";
+ i.calendar_time = "Mon Aug 25 12:10:57 2014";
+ i.time = 1408993857;
+ i.identifier = "foobaz";
root.add_child("diffResults", dr.first);
root.put<std::string>("name", "foobar");
root.put<std::string>("hostIdentifier", "foobaz");
return std::make_pair(root, i);
}
-std::pair<std::string, ScheduledQueryLogItem>
-getSerializedScheduledQueryLogItemJSON() {
- auto results = getSerializedScheduledQueryLogItem();
+std::pair<std::string, QueryLogItem> getSerializedQueryLogItemJSON() {
+ auto results = getSerializedQueryLogItem();
std::ostringstream ss;
pt::write_json(ss, results.first, false);
return content;
}
+std::string getEtcProtocolsContent() {
+ std::string content;
+ readFile(kTestDataPath + "test_protocols.txt", content);
+ return content;
+}
+
QueryData getEtcHostsExpectedResults() {
Row row1;
Row row2;
Row row3;
Row row4;
+ Row row5;
+ Row row6;
row1["address"] = "127.0.0.1";
row1["hostnames"] = "localhost";
row3["hostnames"] = "localhost";
row4["address"] = "fe80::1%lo0";
row4["hostnames"] = "localhost";
- return {row1, row2, row3, row4};
+ row5["address"] = "127.0.0.1";
+ row5["hostnames"] = "example.com example";
+ row6["address"] = "127.0.0.1";
+ row6["hostnames"] = "example.net";
+ return {row1, row2, row3, row4, row5, row6};
}
::std::ostream& operator<<(::std::ostream& os, const Status& s) {
return os << "Status(" << s.getCode() << ", \"" << s.getMessage() << "\")";
}
+QueryData getEtcProtocolsExpectedResults() {
+ Row row1;
+ Row row2;
+ Row row3;
+
+ row1["name"] = "ip";
+ row1["number"] = "0";
+ row1["alias"] = "IP";
+ row1["comment"] = "internet protocol, pseudo protocol number";
+ row2["name"] = "icmp";
+ row2["number"] = "1";
+ row2["alias"] = "ICMP";
+ row2["comment"] = "internet control message protocol";
+ row3["name"] = "tcp";
+ row3["number"] = "6";
+ row3["alias"] = "TCP";
+ row3["comment"] = "transmission control protocol";
+
+ return {row1, row2, row3};
+}
+
void createMockFileStructure() {
boost::filesystem::create_directories(kFakeDirectory +
"/deep11/deep2/deep3/");
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#include <osquery/database.h>
#include <osquery/filesystem.h>
+namespace pt = boost::property_tree;
+
namespace osquery {
-// kTestQuery is a test query that can be executed against the database
-// returned from createTestDB() to result in the dataset returned from
-// getTestDBExpectedResults()
-extern const std::string kTestQuery;
-extern const std::string kTestDataPath;
+/// Any SQL-dependent tests should use kTestQuery for a pre-populated example.
+const std::string kTestQuery = "SELECT * FROM test_table";
+
+const std::string kTestDataPath = "../../tools/tests/";
+
+/// Tests should limit intermediate input/output to a working directory.
+/// Config data, logging results, and intermediate database/caching usage.
+const std::string kTestWorkingDirectory = "/tmp/osquery-tests/";
+
+/// A fake directory tree should be used for filesystem iterator testing.
+const std::string kFakeDirectory = kTestWorkingDirectory + "fstree";
-const std::string kFakeDirectory = "/tmp/osquery-fstests-pattern";
+ScheduledQuery getOsqueryScheduledQuery();
// getTestDBExpectedResults returns the results of kTestQuery of the table that
// initially gets returned from createTestDB()
// need to be performed on the dataset to make the results be pair.second
std::vector<std::pair<std::string, QueryData> > getTestDBResultStream();
-// getOsqueryScheduledQuery returns a test scheduled query which would normally
-// be returned via the config
-ScheduledQuery getOsqueryScheduledQuery();
-
// getSerializedRow() return an std::pair where pair->first is a string which
// should serialize to pair->second. pair->second should deserialize
// to pair->first
-std::pair<boost::property_tree::ptree, Row> getSerializedRow();
+std::pair<pt::ptree, Row> getSerializedRow();
// getSerializedQueryData() return an std::pair where pair->first is a string
// which should serialize to pair->second. pair->second should
// deserialize to pair->first
-std::pair<boost::property_tree::ptree, QueryData> getSerializedQueryData();
+std::pair<pt::ptree, QueryData> getSerializedQueryData();
+std::pair<std::string, QueryData> getSerializedQueryDataJSON();
// getSerializedDiffResults() return an std::pair where pair->first is a string
// which should serialize to pair->second. pair->second should
// deserialize to pair->first
-std::pair<boost::property_tree::ptree, DiffResults> getSerializedDiffResults();
-
+std::pair<pt::ptree, DiffResults> getSerializedDiffResults();
std::pair<std::string, DiffResults> getSerializedDiffResultsJSON();
-// getSerializedHistoricalQueryResults() return an std::pair where pair->first
+// getSerializedQueryLogItem() return an std::pair where pair->first
// is a string which should serialize to pair->second. pair->second
// should deserialize to pair->first
-std::pair<boost::property_tree::ptree, HistoricalQueryResults>
-getSerializedHistoricalQueryResults();
-
-std::pair<std::string, HistoricalQueryResults>
-getSerializedHistoricalQueryResultsJSON();
-
-// getSerializedScheduledQueryLogItem() return an std::pair where pair->first
-// is a string which should serialize to pair->second. pair->second
-// should deserialize to pair->first
-std::pair<boost::property_tree::ptree, ScheduledQueryLogItem>
-getSerializedScheduledQueryLogItem();
-
-std::pair<std::string, ScheduledQueryLogItem>
-getSerializedScheduledQueryLogItemJSON();
+std::pair<pt::ptree, QueryLogItem> getSerializedQueryLogItem();
+std::pair<std::string, QueryLogItem> getSerializedQueryLogItemJSON();
// generate content for a PEM-encoded certificate
std::string getCACertificateContent();
// generate the content that would be found in an /etc/hosts file
std::string getEtcHostsContent();
+// generate the content that would be found in an /etc/protocols file
+std::string getEtcProtocolsContent();
+
// generate the expected data that getEtcHostsContent() should parse into
QueryData getEtcHostsExpectedResults();
+// generate the expected data that getEtcProtocolsContent() should parse into
+QueryData getEtcProtocolsExpectedResults();
+
// the three items that you need to test osquery::splitString
struct SplitStringTestData {
std::string test_string;
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
EXPECT_EQ(unencoded, unencoded2);
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
EXPECT_EQ(value, "test3");
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
EXPECT_EQ(digest, "88ee11f2aa7903f34b8b8785d92208b1");
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
EXPECT_EQ(s.toString(), "foobar");
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
}
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
EXPECT_EQ(split(content, ":", 1), expected);
}
}
-
-int main(int argc, char* argv[]) {
- google::InitGoogleLogging(argv[0]);
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
#include <osquery/sql.h>
#include "osquery/core/watcher.h"
+#include "osquery/dispatcher/dispatcher.h"
extern char** environ;
const std::map<WatchdogLimitType, std::vector<size_t> > kWatchdogLimits = {
// Maximum MB worker can privately allocate.
- {MEMORY_LIMIT, {50, 30, 10, 10}},
+ {MEMORY_LIMIT, {50, 30, 10, 1000}},
// Percent of user or system CPU worker can utilize for LATENCY_LIMIT
// seconds.
- {UTILIZATION_LIMIT, {90, 80, 60, 50}},
+ {UTILIZATION_LIMIT, {90, 80, 60, 1000}},
// Number of seconds the worker should run, else consider the exit fatal.
{RESPAWN_LIMIT, {20, 20, 20, 5}},
// If the worker respawns too quickly, backoff on creating additional.
// Seconds of tolerable UTILIZATION_LIMIT sustained latency.
{LATENCY_LIMIT, {12, 6, 3, 1}},
// How often to poll for performance limit violations.
- {INTERVAL, {3, 3, 3, 1}}, };
+ {INTERVAL, {3, 3, 3, 1}},
+};
const std::string kExtensionExtension = ".ext";
void WatcherRunner::stopChild(pid_t child) {
kill(child, SIGKILL);
- child = 0;
// Clean up the defunct (zombie) process.
waitpid(-1, 0, WNOHANG);
// Compare CPU utilization since last check.
BIGINT_LITERAL footprint, user_time, system_time, parent;
// IV is the check interval in seconds, and utilization is set per-second.
- auto iv = getWorkerLimit(INTERVAL);
+ auto iv = std::max(getWorkerLimit(INTERVAL), (size_t)1);
{
WatcherLocker locker;
// Check if the sustained difference exceeded the acceptable latency limit.
sustained_latency = state.sustained_latency;
+
+ // Set the memory footprint as the amount of resident bytes allocated
+ // since the process image was created (estimate).
+ // A more-meaningful check would limit this to writable regions.
+ if (state.initial_footprint == 0) {
+ state.initial_footprint = footprint;
+ }
+
+ // Set the measured/limit-applied footprint to the post-launch allocations.
+ if (footprint < state.initial_footprint) {
+ footprint = 0;
+ } else {
+ footprint = footprint - state.initial_footprint;
+ }
}
// Only make a decision about the child sanity if it is still the watcher's
// Watcher died, the worker must follow.
VLOG(1) << "osqueryd worker (" << getpid()
<< ") detected killed watcher (" << watcher_ << ")";
+ Dispatcher::removeServices();
+ Dispatcher::joinServices();
::exit(EXIT_SUCCESS);
}
interruptableSleep(getWorkerLimit(INTERVAL) * 1000);
/// A timestamp when the process/worker was last created.
size_t last_respawn_time;
+ /// The initial (or as close as possible) process image footprint.
+ size_t initial_footprint;
+
PerformanceState() {
sustained_latency = 0;
user_time = 0;
system_time = 0;
last_respawn_time = 0;
+ initial_footprint = 0;
}
};
* autoloaded extension ended. Tables may also report on the historic worker
* or extension utilizations.
*
- * Though not critical, it is perferred to remove the extension's broadcasted
+ * Though not critical, it is preferred to remove the extension's broadcasted
* routes quickly. Locking access to the extensions list between signals and
* the WatcherRunner thread allows osquery to tearDown registry changes before
* attempting to respawn an extension process.
/// Reset pid and performance counters for a worker or extension process.
static void reset(pid_t child);
- /// Return the number of autoloadable extensions.
+ /**
+ * @brief Return the state of autoloadable extensions.
+ *
+ * Some initialization decisions are made based on waiting for plugins to
+ * broadcast from potentially-loaded extensions. If no extensions are loaded
+ * and an active (selected at command line) plugin is missing, fail quickly.
+ */
static bool hasManagedExtensions();
private:
pid_t worker_;
/// Keep a list of resolved extension paths and their managed pids.
std::map<std::string, pid_t> extensions_;
- /// Path to autoload extensions from.
+ /// Paths to autoload extensions.
std::vector<std::string> extensions_paths_;
private:
/**
* @brief The watchdog thread responsible for spawning/monitoring children.
*
- * The WatcherRunner thread will spawn any autoloaded modules or optional
+ * The WatcherRunner thread will spawn any autoloaded extensions or optional
* osquery daemon worker processes. It will then poll for their performance
- * state and kill/respawn osquery child processes.
+ * state and kill/respawn osquery child processes if they violate limits.
*/
class WatcherRunner : public InternalRunnable {
public:
}
private:
+ /// Dispatcher (this service thread's) entry point.
void enter();
/// Boilerplate function to sleep for some configured latency
bool ok();
bool isChildSane(pid_t child);
private:
- /// Fork a worker process.
+ /// Fork and execute a worker process.
void createWorker();
/// Fork an extension process.
bool createExtension(const std::string& extension);
private:
/// Keep the invocation daemon's argc to iterate through argv.
int argc_;
- /// When a worker child is spawned the argv will be scrubed.
+ /// When a worker child is spawned the argv will be scrubbed.
char** argv_;
/// Spawn/monitor a worker process.
bool use_worker_;
ADD_OSQUERY_LIBRARY(TRUE osquery_database db_handle.cpp
- query.cpp
- results.cpp)
+ query.cpp
+ results.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_query_tests query_tests.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_db_handle_tests db_handle_tests.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_results_tests results_tests.cpp)
+FILE(GLOB OSQUERY_DATABASE_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(TRUE ${OSQUERY_DATABASE_TESTS})
#include <mutex>
#include <stdexcept>
+#include <sys/stat.h>
+
#include <rocksdb/env.h>
#include <rocksdb/options.h>
cf_name, rocksdb::ColumnFamilyOptions()));
}
+ VLOG(1) << "Opening DB handle: " << path;
auto s = rocksdb::DB::Open(options_, path, column_families_, &handles_, &db_);
if (!s.ok()) {
throw std::runtime_error(s.ToString());
}
+
+ // RocksDB may not create/append a directory with acceptable permissions.
+ if (chmod(path.c_str(), S_IRWXU) != 0) {
+ throw std::runtime_error("Cannot set permissions on RocksDB path: " + path);
+ }
}
DBHandle::~DBHandle() {
namespace osquery {
-const std::string kQueryNameNotFoundError = "query name not found in database";
-
/////////////////////////////////////////////////////////////////////////////
// Getters and setters
/////////////////////////////////////////////////////////////////////////////
// Data access methods
/////////////////////////////////////////////////////////////////////////////
-Status Query::getHistoricalQueryResults(HistoricalQueryResults& hQR) {
- return getHistoricalQueryResults(hQR, DBHandle::getInstance());
+Status Query::getPreviousQueryResults(QueryData& results) {
+ return getPreviousQueryResults(results, DBHandle::getInstance());
}
-Status Query::getHistoricalQueryResults(HistoricalQueryResults& hQR,
- std::shared_ptr<DBHandle> db) {
- if (isQueryNameInDatabase()) {
- std::string raw;
- auto get_status = db->Get(kQueries, name_, raw);
- if (get_status.ok()) {
- auto deserialize_status = deserializeHistoricalQueryResultsJSON(raw, hQR);
- if (!deserialize_status.ok()) {
- return deserialize_status;
- }
- } else {
- return get_status;
- }
- } else {
- return Status(1, kQueryNameNotFoundError);
+Status Query::getPreviousQueryResults(QueryData& results, DBHandleRef db) {
+ if (!isQueryNameInDatabase()) {
+ return Status(1, "Query name not found in database");
+ }
+
+ std::string raw;
+ auto status = db->Get(kQueries, name_, raw);
+ if (!status.ok()) {
+ return status;
+ }
+
+ status = deserializeQueryDataJSON(raw, results);
+ if (!status.ok()) {
+ return status;
}
return Status(0, "OK");
}
return getStoredQueryNames(DBHandle::getInstance());
}
-std::vector<std::string> Query::getStoredQueryNames(
- std::shared_ptr<DBHandle> db) {
+std::vector<std::string> Query::getStoredQueryNames(DBHandleRef db) {
std::vector<std::string> results;
db->Scan(kQueries, results);
return results;
return isQueryNameInDatabase(DBHandle::getInstance());
}
-bool Query::isQueryNameInDatabase(std::shared_ptr<DBHandle> db) {
+bool Query::isQueryNameInDatabase(DBHandleRef db) {
auto names = Query::getStoredQueryNames(db);
return std::find(names.begin(), names.end(), name_) != names.end();
}
-Status Query::addNewResults(const osquery::QueryData& qd, int unix_time) {
- return addNewResults(qd, unix_time, DBHandle::getInstance());
+Status Query::addNewResults(const osquery::QueryData& qd) {
+ return addNewResults(qd, DBHandle::getInstance());
}
-Status Query::addNewResults(const QueryData& qd,
- int unix_time,
- std::shared_ptr<DBHandle> db) {
+Status Query::addNewResults(const QueryData& qd, DBHandleRef db) {
DiffResults dr;
- return addNewResults(qd, dr, false, unix_time, db);
+ return addNewResults(qd, dr, false, db);
}
-osquery::Status Query::addNewResults(const osquery::QueryData& qd,
- osquery::DiffResults& dr,
- int unix_time) {
- return addNewResults(qd, dr, true, unix_time, DBHandle::getInstance());
+Status Query::addNewResults(const QueryData& qd, DiffResults& dr) {
+ return addNewResults(qd, dr, true, DBHandle::getInstance());
}
-osquery::Status Query::addNewResults(const osquery::QueryData& qd,
- osquery::DiffResults& dr,
- bool calculate_diff,
- int unix_time,
- std::shared_ptr<DBHandle> db) {
- HistoricalQueryResults hQR;
- auto hqr_status = getHistoricalQueryResults(hQR, db);
- if (!hqr_status.ok() && hqr_status.toString() != kQueryNameNotFoundError) {
- return hqr_status;
- }
-
- QueryData escaped_qd;
- // remove all non-ascii characters from the string
- escapeQueryData(qd, escaped_qd);
-
+Status Query::addNewResults(const QueryData& current_qd,
+ DiffResults& dr,
+ bool calculate_diff,
+ DBHandleRef db) {
+ // Get the rows from the last run of this query name.
+ QueryData previous_qd;
+ auto status = getPreviousQueryResults(previous_qd);
+
+ // Sanitize all non-ASCII characters from the query data values.
+ QueryData escaped_current_qd;
+ escapeQueryData(current_qd, escaped_current_qd);
+ // Calculate the differential between previous and current query results.
if (calculate_diff) {
- dr = diff(hQR.mostRecentResults.second, escaped_qd);
+ dr = diff(previous_qd, escaped_current_qd);
}
- hQR.mostRecentResults.first = unix_time;
- hQR.mostRecentResults.second = escaped_qd;
+
+ // Replace the "previous" query data with the current.
std::string json;
- auto serialize_status = serializeHistoricalQueryResultsJSON(hQR, json);
- if (!serialize_status.ok()) {
- return serialize_status;
- }
- auto put_status = db->Put(kQueries, name_, json);
- if (!put_status.ok()) {
- return put_status;
+ status = serializeQueryDataJSON(escaped_current_qd, json);
+ if (!status.ok()) {
+ return status;
}
- return Status(0, "OK");
-}
-osquery::Status Query::getCurrentResults(osquery::QueryData& qd) {
- return getCurrentResults(qd, DBHandle::getInstance());
-}
-
-Status Query::getCurrentResults(QueryData& qd, std::shared_ptr<DBHandle> db) {
- HistoricalQueryResults hQR;
- auto s = getHistoricalQueryResults(hQR, db);
- if (s.ok()) {
- qd = hQR.mostRecentResults.second;
+ status = db->Put(kQueries, name_, json);
+ if (!status.ok()) {
+ return status;
}
- return s;
+ return Status(0, "OK");
}
}
+++ /dev/null
-/*
- * Copyright (c) 2014, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
- *
- */
-
-#include <algorithm>
-#include <ctime>
-#include <deque>
-
-#include <boost/filesystem/operations.hpp>
-
-#include <gtest/gtest.h>
-
-#include <osquery/database/query.h>
-
-#include "osquery/core/test_util.h"
-
-const std::string kTestingQueryDBPath = "/tmp/rocksdb-osquery-querytests";
-
-namespace osquery {
-
-class QueryTests : public testing::Test {
- public:
- void SetUp() { db = DBHandle::getInstanceAtPath(kTestingQueryDBPath); }
-
- public:
- std::shared_ptr<DBHandle> db;
-};
-
-TEST_F(QueryTests, test_get_column_family_name) {
- auto query = getOsqueryScheduledQuery();
- auto cf = Query("foobar", query);
- EXPECT_EQ(cf.getQueryName(), "foobar");
-}
-
-TEST_F(QueryTests, test_get_query) {
- auto query = getOsqueryScheduledQuery();
- auto cf = Query("foobar", query);
- EXPECT_EQ(cf.getQuery(), query.query);
-}
-
-TEST_F(QueryTests, test_get_interval) {
- auto query = getOsqueryScheduledQuery();
- auto cf = Query("foobar", query);
- EXPECT_EQ(cf.getInterval(), query.interval);
-}
-
-TEST_F(QueryTests, test_private_members) {
- auto query = getOsqueryScheduledQuery();
- auto cf = Query("foobar", query);
- EXPECT_EQ(cf.query_, query);
-}
-
-TEST_F(QueryTests, test_add_and_get_current_results) {
- auto query = getOsqueryScheduledQuery();
- auto cf = Query("foobar", query);
- auto s = cf.addNewResults(getTestDBExpectedResults(), std::time(0), db);
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(s.toString(), "OK");
- for (auto result : getTestDBResultStream()) {
- DiffResults dr;
- HistoricalQueryResults hQR;
- auto hqr_status = cf.getHistoricalQueryResults(hQR, db);
- EXPECT_TRUE(hqr_status.ok());
- EXPECT_EQ(hqr_status.toString(), "OK");
- auto s = cf.addNewResults(result.second, dr, true, std::time(0), db);
- EXPECT_TRUE(s.ok());
- DiffResults expected = diff(hQR.mostRecentResults.second, result.second);
- EXPECT_EQ(dr, expected);
- QueryData qd;
- cf.getCurrentResults(qd, db);
- EXPECT_EQ(qd, result.second);
- }
-}
-
-TEST_F(QueryTests, test_get_historical_query_results) {
- auto hQR = getSerializedHistoricalQueryResultsJSON();
- auto query = getOsqueryScheduledQuery();
- auto put_status = db->Put(kQueries, "foobar", hQR.first);
- EXPECT_TRUE(put_status.ok());
- EXPECT_EQ(put_status.toString(), "OK");
- auto cf = Query("foobar", query);
- HistoricalQueryResults from_db;
- auto query_status = cf.getHistoricalQueryResults(from_db, db);
- EXPECT_TRUE(query_status.ok());
- EXPECT_EQ(query_status.toString(), "OK");
- EXPECT_EQ(from_db, hQR.second);
-}
-
-TEST_F(QueryTests, test_query_name_not_found_in_db) {
- HistoricalQueryResults from_db;
- auto query = getOsqueryScheduledQuery();
- auto cf = Query("not_a_real_query", query);
- auto query_status = cf.getHistoricalQueryResults(from_db, db);
- EXPECT_FALSE(query_status.ok());
- EXPECT_EQ(query_status.toString(), "query name not found in database");
-}
-
-TEST_F(QueryTests, test_is_query_name_in_database) {
- auto query = getOsqueryScheduledQuery();
- auto cf = Query("foobar", query);
- auto hQR = getSerializedHistoricalQueryResultsJSON();
- auto put_status = db->Put(kQueries, "foobar", hQR.first);
- EXPECT_TRUE(put_status.ok());
- EXPECT_EQ(put_status.toString(), "OK");
- EXPECT_TRUE(cf.isQueryNameInDatabase(db));
-}
-
-TEST_F(QueryTests, test_get_stored_query_names) {
- auto query = getOsqueryScheduledQuery();
- auto cf = Query("foobar", query);
- auto hQR = getSerializedHistoricalQueryResultsJSON();
- auto put_status = db->Put(kQueries, "foobar", hQR.first);
- EXPECT_TRUE(put_status.ok());
- EXPECT_EQ(put_status.toString(), "OK");
- auto names = cf.getStoredQueryNames(db);
- auto in_vector = std::find(names.begin(), names.end(), "foobar");
- EXPECT_NE(in_vector, names.end());
-}
-
-TEST_F(QueryTests, test_get_current_results) {
- auto hQR = getSerializedHistoricalQueryResultsJSON();
- auto query = getOsqueryScheduledQuery();
- auto put_status = db->Put(kQueries, "foobar", hQR.first);
- EXPECT_TRUE(put_status.ok());
- EXPECT_EQ(put_status.toString(), "OK");
- auto cf = Query("foobar", query);
- QueryData qd;
- auto query_status = cf.getCurrentResults(qd, db);
- EXPECT_TRUE(query_status.ok());
- EXPECT_EQ(query_status.toString(), "OK");
- EXPECT_EQ(qd, hQR.second.mostRecentResults.second);
-}
-}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- int status = RUN_ALL_TESTS();
- boost::filesystem::remove_all(kTestingQueryDBPath);
- return status;
-}
#include <osquery/logger.h>
namespace pt = boost::property_tree;
-using osquery::Status;
-typedef unsigned char byte;
namespace osquery {
+typedef unsigned char byte;
+
/////////////////////////////////////////////////////////////////////////////
// Row - the representation of a row in a set of database results. Row is a
// simple map where individual column names are keys, which map to the Row's
std::string escapeNonPrintableBytes(const std::string& data) {
std::string escaped;
- char const hex_chars[16] = {'0',
- '1',
- '2',
- '3',
- '4',
- '5',
- '6',
- '7',
- '8',
- '9',
- 'A',
- 'B',
- 'C',
- 'D',
- 'E',
- 'F'};
+ // clang-format off
+ char const hex_chars[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F',
+ };
+ // clang-format on
for (int i = 0; i < data.length(); i++) {
if (((byte)data[i]) < 0x20 || ((byte)data[i]) >= 0x80) {
escaped += "\\x";
Status serializeRowJSON(const Row& r, std::string& json) {
pt::ptree tree;
- try {
- auto status = serializeRow(r, tree);
- if (!status.ok()) {
- return status;
- }
- std::ostringstream ss;
- pt::write_json(ss, tree, false);
- json = ss.str();
- } catch (const std::exception& e) {
- return Status(1, e.what());
+ auto status = serializeRow(r, tree);
+ if (!status.ok()) {
+ return status;
}
+
+ std::ostringstream output;
+ pt::write_json(output, tree, false);
+ json = output.str();
return Status(0, "OK");
}
Status deserializeRow(const pt::ptree& tree, Row& r) {
- try {
- for (auto& i : tree) {
- if (i.first.length() > 0) {
- r[i.first] = i.second.data();
- }
+ for (const auto& i : tree) {
+ if (i.first.length() > 0) {
+ r[i.first] = i.second.data();
}
- return Status(0, "OK");
- } catch (const std::exception& e) {
- LOG(ERROR) << e.what();
- return Status(1, e.what());
}
+ return Status(0, "OK");
}
Status deserializeRowJSON(const std::string& json, Row& r) {
pt::ptree tree;
try {
- std::stringstream j;
- j << json;
- pt::read_json(j, tree);
- } catch (const std::exception& e) {
+ std::stringstream input;
+ input << json;
+ pt::read_json(input, tree);
+ } catch (const pt::json_parser::json_parser_error& e) {
return Status(1, e.what());
}
return deserializeRow(tree, r);
/////////////////////////////////////////////////////////////////////////////
Status serializeQueryData(const QueryData& q, pt::ptree& tree) {
- try {
- for (const auto& r : q) {
- pt::ptree serialized;
- auto s = serializeRow(r, serialized);
- if (!s.ok()) {
- return s;
- }
- tree.push_back(std::make_pair("", serialized));
+ for (const auto& r : q) {
+ pt::ptree serialized;
+ auto s = serializeRow(r, serialized);
+ if (!s.ok()) {
+ return s;
}
- } catch (const std::exception& e) {
- return Status(1, e.what());
+ tree.push_back(std::make_pair("", serialized));
+ }
+ return Status(0, "OK");
+}
+
+Status serializeQueryDataJSON(const QueryData& q, std::string& json) {
+ pt::ptree tree;
+ auto status = serializeQueryData(q, tree);
+ if (!status.ok()) {
+ return status;
+ }
+
+ std::ostringstream output;
+ pt::write_json(output, tree, false);
+ json = output.str();
+ return Status(0, "OK");
+}
+
+Status deserializeQueryData(const pt::ptree& tree, QueryData& qd) {
+ for (const auto& i : tree) {
+ Row r;
+ auto status = deserializeRow(i.second, r);
+ if (!status.ok()) {
+ return status;
+ }
+ qd.push_back(r);
}
return Status(0, "OK");
}
+Status deserializeQueryDataJSON(const std::string& json, QueryData& qd) {
+ pt::ptree tree;
+ try {
+ std::stringstream input;
+ input << json;
+ pt::read_json(input, tree);
+ } catch (const pt::json_parser::json_parser_error& e) {
+ return Status(1, e.what());
+ }
+ return deserializeQueryData(tree, qd);
+}
+
/////////////////////////////////////////////////////////////////////////////
// DiffResults - the representation of two diffed QueryData result sets.
// Given and old and new QueryData, DiffResults indicates the "added" subset
/////////////////////////////////////////////////////////////////////////////
Status serializeDiffResults(const DiffResults& d, pt::ptree& tree) {
- try {
- pt::ptree added;
- auto added_status = serializeQueryData(d.added, added);
- if (!added_status.ok()) {
- return added_status;
+ pt::ptree added;
+ auto status = serializeQueryData(d.added, added);
+ if (!status.ok()) {
+ return status;
+ }
+ tree.add_child("added", added);
+
+ pt::ptree removed;
+ status = serializeQueryData(d.removed, removed);
+ if (!status.ok()) {
+ return status;
+ }
+ tree.add_child("removed", removed);
+ return Status(0, "OK");
+}
+
+Status deserializeDiffResults(const pt::ptree& tree, DiffResults& dr) {
+ if (tree.count("added") > 0) {
+ auto status = deserializeQueryData(tree.get_child("added"), dr.added);
+ if (!status.ok()) {
+ return status;
}
- tree.add_child("added", added);
+ }
- pt::ptree removed;
- auto removed_status = serializeQueryData(d.removed, removed);
- if (!removed_status.ok()) {
- return removed_status;
+ if (tree.count("removed") > 0) {
+ auto status = deserializeQueryData(tree.get_child("removed"), dr.removed);
+ if (!status.ok()) {
+ return status;
}
- tree.add_child("removed", removed);
- } catch (const std::exception& e) {
- return Status(1, e.what());
}
return Status(0, "OK");
}
Status serializeDiffResultsJSON(const DiffResults& d, std::string& json) {
- try {
- pt::ptree tree;
- auto s = serializeDiffResults(d, tree);
- if (!s.ok()) {
- return s;
- }
- std::ostringstream ss;
- pt::write_json(ss, tree, false);
- json = ss.str();
- } catch (const std::exception& e) {
- return Status(1, e.what());
+ pt::ptree tree;
+ auto status = serializeDiffResults(d, tree);
+ if (!status.ok()) {
+ return status;
}
+
+ std::ostringstream output;
+ pt::write_json(output, tree, false);
+ json = output.str();
return Status(0, "OK");
}
-DiffResults diff(const QueryData& old_, const QueryData& new_) {
+DiffResults diff(const QueryData& old, const QueryData& current) {
DiffResults r;
QueryData overlap;
- for (const auto& i : new_) {
- auto item = std::find(old_.begin(), old_.end(), i);
- if (item != old_.end()) {
+ for (const auto& i : current) {
+ auto item = std::find(old.begin(), old.end(), i);
+ if (item != old.end()) {
overlap.push_back(i);
} else {
r.added.push_back(i);
}
std::multiset<Row> overlap_set(overlap.begin(), overlap.end());
-
- std::multiset<Row> old_set(old_.begin(), old_.end());
-
+ std::multiset<Row> old_set(old.begin(), old.end());
std::set_difference(old_set.begin(),
old_set.end(),
overlap_set.begin(),
overlap_set.end(),
std::back_inserter(r.removed));
-
return r;
}
/////////////////////////////////////////////////////////////////////////////
-// HistoricalQueryResults - the representation of the historical results of
-// a particlar scheduled database query.
+// QueryLogItem - the representation of a log result occuring when a
+// scheduled query yields operating system state change.
/////////////////////////////////////////////////////////////////////////////
-Status serializeHistoricalQueryResultsJSON(const HistoricalQueryResults& r,
- std::string& json) {
- try {
- pt::ptree tree;
- auto s = serializeHistoricalQueryResults(r, tree);
- if (!s.ok()) {
- return s;
+Status serializeQueryLogItem(const QueryLogItem& i, pt::ptree& tree) {
+ pt::ptree results_tree;
+ if (i.results.added.size() > 0 || i.results.removed.size() > 0) {
+ auto status = serializeDiffResults(i.results, results_tree);
+ if (!status.ok()) {
+ return status;
}
- std::ostringstream ss;
- pt::write_json(ss, tree, false);
- json = ss.str();
- } catch (const std::exception& e) {
- return Status(1, e.what());
+ tree.add_child("diffResults", results_tree);
+ } else {
+ auto status = serializeQueryData(i.snapshot_results, results_tree);
+ if (!status.ok()) {
+ return status;
+ }
+ tree.add_child("snapshot", results_tree);
}
+
+ tree.put<std::string>("name", i.name);
+ tree.put<std::string>("hostIdentifier", i.identifier);
+ tree.put<std::string>("calendarTime", i.calendar_time);
+ tree.put<int>("unixTime", i.time);
return Status(0, "OK");
}
-Status serializeHistoricalQueryResults(const HistoricalQueryResults& r,
- pt::ptree& tree) {
- try {
- pt::ptree mostRecentResults;
-
- pt::ptree most_recent_serialized;
- auto mrr_status =
- serializeQueryData(r.mostRecentResults.second, most_recent_serialized);
- if (!mrr_status.ok()) {
- return mrr_status;
- }
- mostRecentResults.add_child(
- boost::lexical_cast<std::string>(r.mostRecentResults.first),
- most_recent_serialized);
- tree.add_child("mostRecentResults", mostRecentResults);
- } catch (const std::exception& e) {
- return Status(1, e.what());
+Status serializeQueryLogItemJSON(const QueryLogItem& i, std::string& json) {
+ pt::ptree tree;
+ auto status = serializeQueryLogItem(i, tree);
+ if (!status.ok()) {
+ return status;
}
+
+ std::ostringstream output;
+ pt::write_json(output, tree, false);
+ json = output.str();
return Status(0, "OK");
}
-Status deserializeHistoricalQueryResults(const pt::ptree& tree,
- HistoricalQueryResults& r) {
- try {
- for (const auto& v : tree.get_child("mostRecentResults")) {
- try {
- int execution = boost::lexical_cast<int>(v.first);
- r.mostRecentResults.first = execution;
- } catch (const boost::bad_lexical_cast& e) {
- return Status(1, e.what());
- }
-
- QueryData q;
- for (const auto& each : v.second) {
- Row row_;
- for (const auto& item : each.second) {
- row_[item.first] = item.second.get_value<std::string>();
- }
- q.push_back(row_);
- }
- r.mostRecentResults.second = q;
+Status deserializeQueryLogItem(const pt::ptree& tree, QueryLogItem& item) {
+ if (tree.count("diffResults") > 0) {
+ auto status =
+ deserializeDiffResults(tree.get_child("diffResults"), item.results);
+ if (!status.ok()) {
+ return status;
+ }
+ } else if (tree.count("snapshot") > 0) {
+ auto status =
+ deserializeQueryData(tree.get_child("snapshot"), item.snapshot_results);
+ if (!status.ok()) {
+ return status;
}
-
- return Status(0, "OK");
- } catch (const std::exception& e) {
- LOG(ERROR) << e.what();
- return Status(1, e.what());
}
-}
-Status deserializeHistoricalQueryResultsJSON(const std::string& json,
- HistoricalQueryResults& r) {
- pt::ptree tree;
- try {
- std::stringstream j;
- j << json;
- pt::read_json(j, tree);
- } catch (const std::exception& e) {
- return Status(1, e.what());
- }
- return deserializeHistoricalQueryResults(tree, r);
+ item.name = tree.get<std::string>("name", "");
+ item.identifier = tree.get<std::string>("hostIdentifier", "");
+ item.calendar_time = tree.get<std::string>("calendarTime", "");
+ item.time = tree.get<int>("unixTime", 0);
+ return Status(0, "OK");
}
-/////////////////////////////////////////////////////////////////////////////
-// ScheduledQueryLogItem - the representation of a log result occuring when a
-// scheduled query yields operating system state change.
-/////////////////////////////////////////////////////////////////////////////
-
-Status serializeScheduledQueryLogItem(const ScheduledQueryLogItem& i,
- boost::property_tree::ptree& tree) {
+Status deserializeQueryLogItemJSON(const std::string& json,
+ QueryLogItem& item) {
+ pt::ptree tree;
try {
- pt::ptree diffResults;
- auto diff_results_status = serializeDiffResults(i.diffResults, diffResults);
- if (!diff_results_status.ok()) {
- return diff_results_status;
- }
-
- tree.add_child("diffResults", diffResults);
- tree.put<std::string>("name", i.name);
- tree.put<std::string>("hostIdentifier", i.hostIdentifier);
- tree.put<std::string>("calendarTime", i.calendarTime);
- tree.put<int>("unixTime", i.unixTime);
- } catch (const std::exception& e) {
+ std::stringstream input;
+ input << json;
+ pt::read_json(input, tree);
+ } catch (const pt::json_parser::json_parser_error& e) {
return Status(1, e.what());
}
- return Status(0, "OK");
+ return deserializeQueryLogItem(tree, item);
}
-Status serializeEvent(const ScheduledQueryLogItem& item,
- const boost::property_tree::ptree& event,
- boost::property_tree::ptree& tree) {
+Status serializeEvent(const QueryLogItem& item,
+ const pt::ptree& event,
+ pt::ptree& tree) {
tree.put<std::string>("name", item.name);
- tree.put<std::string>("hostIdentifier", item.hostIdentifier);
- tree.put<std::string>("calendarTime", item.calendarTime);
- tree.put<int>("unixTime", item.unixTime);
+ tree.put<std::string>("hostIdentifier", item.identifier);
+ tree.put<std::string>("calendarTime", item.calendar_time);
+ tree.put<int>("unixTime", item.time);
pt::ptree columns;
for (auto& i : event) {
+ // Yield results as a "columns." map to avoid namespace collisions.
columns.put<std::string>(i.first, i.second.get_value<std::string>());
}
return Status(0, "OK");
}
-Status serializeScheduledQueryLogItemAsEvents(
- const ScheduledQueryLogItem& item, boost::property_tree::ptree& tree) {
- try {
- pt::ptree diff_results;
- auto status = serializeDiffResults(item.diffResults, diff_results);
- if (!status.ok()) {
- return status;
- }
+Status serializeQueryLogItemAsEvents(const QueryLogItem& i, pt::ptree& tree) {
+ pt::ptree diff_results;
+ auto status = serializeDiffResults(i.results, diff_results);
+ if (!status.ok()) {
+ return status;
+ }
- for (auto& i : diff_results) {
- for (auto& j : i.second) {
- pt::ptree event;
- serializeEvent(item, j.second, event);
- event.put<std::string>("action", i.first);
- tree.push_back(std::make_pair("", event));
- }
+ for (auto& action : diff_results) {
+ for (auto& row : action.second) {
+ pt::ptree event;
+ serializeEvent(i, row.second, event);
+ event.put<std::string>("action", action.first);
+ tree.push_back(std::make_pair("", event));
}
- } catch (const std::exception& e) {
- return Status(1, e.what());
}
-
return Status(0, "OK");
}
-Status serializeScheduledQueryLogItemAsEventsJSON(
- const ScheduledQueryLogItem& i, std::string& json) {
- try {
- pt::ptree tree;
- auto s = serializeScheduledQueryLogItemAsEvents(i, tree);
- if (!s.ok()) {
- return s;
- }
- std::ostringstream ss;
- for (auto& event : tree) {
- pt::write_json(ss, event.second, false);
- }
- json = ss.str();
- } catch (const std::exception& e) {
- return Status(1, e.what());
+Status serializeQueryLogItemAsEventsJSON(const QueryLogItem& i,
+ std::string& json) {
+ pt::ptree tree;
+ auto status = serializeQueryLogItemAsEvents(i, tree);
+ if (!status.ok()) {
+ return status;
}
- return Status(0, "OK");
-}
-Status serializeScheduledQueryLogItemJSON(const ScheduledQueryLogItem& i,
- std::string& json) {
- try {
- pt::ptree tree;
- auto s = serializeScheduledQueryLogItem(i, tree);
- if (!s.ok()) {
- return s;
- }
- std::ostringstream ss;
- pt::write_json(ss, tree, false);
- json = ss.str();
- } catch (const std::exception& e) {
- return Status(1, e.what());
+ std::ostringstream output;
+ for (auto& event : tree) {
+ pt::write_json(output, event.second, false);
}
+ json = output.str();
return Status(0, "OK");
}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
DBHandle::getInstance()->getHandleForColumnFamily("foobartest");
}
+ void TearDown() { boost::filesystem::remove_all(kTestingDBHandlePath); }
+
public:
rocksdb::ColumnFamilyHandle* cfh_queries;
rocksdb::ColumnFamilyHandle* cfh_foobar;
}
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- google::InitGoogleLogging(argv[0]);
- int status = RUN_ALL_TESTS();
- boost::filesystem::remove_all(kTestingDBHandlePath);
- return status;
-}
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <algorithm>
+#include <ctime>
+#include <deque>
+
+#include <boost/filesystem/operations.hpp>
+
+#include <gtest/gtest.h>
+
+#include <osquery/database/query.h>
+
+#include "osquery/core/test_util.h"
+
+const std::string kTestingQueryDBPath = "/tmp/rocksdb-osquery-querytests";
+
+namespace osquery {
+
+class QueryTests : public testing::Test {
+ public:
+ void SetUp() { db_ = DBHandle::getInstanceAtPath(kTestingQueryDBPath); }
+ void TearDown() { boost::filesystem::remove_all(kTestingQueryDBPath); }
+
+ public:
+ std::shared_ptr<DBHandle> db_;
+};
+
+TEST_F(QueryTests, test_get_column_family_name) {
+ auto query = getOsqueryScheduledQuery();
+ auto cf = Query("foobar", query);
+ EXPECT_EQ(cf.getQueryName(), "foobar");
+}
+
+TEST_F(QueryTests, test_get_query) {
+ auto query = getOsqueryScheduledQuery();
+ auto cf = Query("foobar", query);
+ EXPECT_EQ(cf.getQuery(), query.query);
+}
+
+TEST_F(QueryTests, test_get_interval) {
+ auto query = getOsqueryScheduledQuery();
+ auto cf = Query("foobar", query);
+ EXPECT_EQ(cf.getInterval(), query.interval);
+}
+
+TEST_F(QueryTests, test_private_members) {
+ auto query = getOsqueryScheduledQuery();
+ auto cf = Query("foobar", query);
+ EXPECT_EQ(cf.query_, query);
+}
+
+TEST_F(QueryTests, test_add_and_get_current_results) {
+ // Test adding a "current" set of results to a scheduled query instance.
+ auto query = getOsqueryScheduledQuery();
+ auto cf = Query("foobar", query);
+ auto status = cf.addNewResults(getTestDBExpectedResults(), db_);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(status.toString(), "OK");
+
+ // Simulate results from several schedule runs, calculate differentials.
+ for (auto result : getTestDBResultStream()) {
+ // Get the results from the previous query execution (from RocksDB).
+ QueryData previous_qd;
+ auto status = cf.getPreviousQueryResults(previous_qd, db_);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(status.toString(), "OK");
+
+ // Add the "current" results and output the differentials.
+ DiffResults dr;
+ auto s = cf.addNewResults(result.second, dr, true, db_);
+ EXPECT_TRUE(s.ok());
+
+ // Call the diffing utility directly.
+ DiffResults expected = diff(previous_qd, result.second);
+ EXPECT_EQ(dr, expected);
+
+ // After Query::addNewResults the previous results are now current.
+ QueryData qd;
+ cf.getPreviousQueryResults(qd, db_);
+ EXPECT_EQ(qd, result.second);
+ }
+}
+
+TEST_F(QueryTests, test_get_query_results) {
+ // Grab an expected set of query data and add it as the previous result.
+ auto encoded_qd = getSerializedQueryDataJSON();
+ auto query = getOsqueryScheduledQuery();
+ auto status = db_->Put(kQueries, "foobar", encoded_qd.first);
+ EXPECT_TRUE(status.ok());
+
+ // Use the Query retrieval API to check the now "previous" result.
+ QueryData previous_qd;
+ auto cf = Query("foobar", query);
+ status = cf.getPreviousQueryResults(previous_qd, db_);
+ EXPECT_TRUE(status.ok());
+}
+
+TEST_F(QueryTests, test_query_name_not_found_in_db) {
+ // Try to retrieve results from a query that has not executed.
+ QueryData previous_qd;
+ auto query = getOsqueryScheduledQuery();
+ auto cf = Query("not_a_real_query", query);
+ auto status = cf.getPreviousQueryResults(previous_qd, db_);
+ EXPECT_FALSE(status.ok());
+}
+
+TEST_F(QueryTests, test_is_query_name_in_database) {
+ auto query = getOsqueryScheduledQuery();
+ auto cf = Query("foobar", query);
+ auto encoded_qd = getSerializedQueryDataJSON();
+ auto status = db_->Put(kQueries, "foobar", encoded_qd.first);
+ EXPECT_TRUE(status.ok());
+ // Now test that the query name exists.
+ EXPECT_TRUE(cf.isQueryNameInDatabase(db_));
+}
+
+TEST_F(QueryTests, test_get_stored_query_names) {
+ auto query = getOsqueryScheduledQuery();
+ auto cf = Query("foobar", query);
+ auto encoded_qd = getSerializedQueryDataJSON();
+ auto status = db_->Put(kQueries, "foobar", encoded_qd.first);
+ EXPECT_TRUE(status.ok());
+
+ // Stored query names is a factory method included alongside every query.
+ // It will include the set of query names with existing "previous" results.
+ auto names = cf.getStoredQueryNames(db_);
+ auto in_vector = std::find(names.begin(), names.end(), "foobar");
+ EXPECT_NE(in_vector, names.end());
+}
+}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
EXPECT_EQ(results.first, tree);
}
+TEST_F(ResultsTests, test_deserialize_row_json) {
+ auto results = getSerializedRow();
+ std::string input;
+ serializeRowJSON(results.second, input);
+
+ // Pull the serialized JSON back into a Row output container.
+ Row output;
+ auto s = deserializeRowJSON(input, output);
+ EXPECT_TRUE(s.ok());
+ // The output container should match the input row.
+ EXPECT_EQ(output, results.second);
+}
+
TEST_F(ResultsTests, test_serialize_query_data) {
auto results = getSerializedQueryData();
pt::ptree tree;
EXPECT_EQ(results.first, tree);
}
+TEST_F(ResultsTests, test_serialize_query_data_json) {
+ auto results = getSerializedQueryDataJSON();
+ std::string json;
+ auto s = serializeQueryDataJSON(results.second, json);
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(s.toString(), "OK");
+ EXPECT_EQ(results.first, json);
+}
+
+TEST_F(ResultsTests, test_deserialize_query_data_json) {
+ auto results = getSerializedQueryDataJSON();
+
+ // Pull the serialized JSON back into a QueryData output container.
+ QueryData output;
+ auto s = deserializeQueryDataJSON(results.first, output);
+ EXPECT_TRUE(s.ok());
+ // The output container should match the input query data.
+ EXPECT_EQ(output, results.second);
+}
+
TEST_F(ResultsTests, test_serialize_diff_results) {
auto results = getSerializedDiffResults();
pt::ptree tree;
EXPECT_EQ(results.first, json);
}
-TEST_F(ResultsTests, test_serialize_historical_query_results) {
- auto results = getSerializedHistoricalQueryResults();
+TEST_F(ResultsTests, test_serialize_query_log_item) {
+ auto results = getSerializedQueryLogItem();
pt::ptree tree;
- auto s = serializeHistoricalQueryResults(results.second, tree);
+ auto s = serializeQueryLogItem(results.second, tree);
EXPECT_TRUE(s.ok());
EXPECT_EQ(s.toString(), "OK");
EXPECT_EQ(results.first, tree);
}
-TEST_F(ResultsTests, test_serialize_historical_query_results_json) {
- auto results = getSerializedHistoricalQueryResultsJSON();
+TEST_F(ResultsTests, test_serialize_query_log_item_json) {
+ auto results = getSerializedQueryLogItemJSON();
std::string json;
- auto s = serializeHistoricalQueryResultsJSON(results.second, json);
+ auto s = serializeQueryLogItemJSON(results.second, json);
EXPECT_TRUE(s.ok());
EXPECT_EQ(s.toString(), "OK");
EXPECT_EQ(results.first, json);
}
-TEST_F(ResultsTests, test_deserialize_historical_query_results) {
- auto results = getSerializedHistoricalQueryResults();
- HistoricalQueryResults r;
- auto s = deserializeHistoricalQueryResults(results.first, r);
- EXPECT_EQ(results.second, r);
- EXPECT_EQ(results.second.mostRecentResults, r.mostRecentResults);
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(s.toString(), "OK");
-}
-
-TEST_F(ResultsTests, test_deserialize_historical_query_results_json) {
- auto results = getSerializedHistoricalQueryResultsJSON();
- HistoricalQueryResults r;
- auto s = deserializeHistoricalQueryResultsJSON(results.first, r);
- EXPECT_EQ(results.second, r);
- EXPECT_EQ(results.second.mostRecentResults, r.mostRecentResults);
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(s.toString(), "OK");
-}
-
-TEST_F(ResultsTests, test_serialize_scheduled_query_log_item) {
- auto results = getSerializedScheduledQueryLogItem();
- pt::ptree tree;
- auto s = serializeScheduledQueryLogItem(results.second, tree);
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(s.toString(), "OK");
- EXPECT_EQ(results.first, tree);
-}
+TEST_F(ResultsTests, test_deserialize_query_log_item_json) {
+ auto results = getSerializedQueryLogItemJSON();
-TEST_F(ResultsTests, test_serialize_scheduled_query_log_item_json) {
- auto results = getSerializedScheduledQueryLogItemJSON();
- std::string json;
- auto s = serializeScheduledQueryLogItemJSON(results.second, json);
+ // Pull the serialized JSON back into a QueryLogItem output container.
+ QueryLogItem output;
+ auto s = deserializeQueryLogItemJSON(results.first, output);
EXPECT_TRUE(s.ok());
- EXPECT_EQ(s.toString(), "OK");
- EXPECT_EQ(results.first, json);
+ // The output container should match the input query data.
+ EXPECT_EQ(output, results.second);
}
TEST_F(ResultsTests, test_unicode_to_ascii_conversion) {
EXPECT_EQ(q.size(), 2);
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- google::InitGoogleLogging(argv[0]);
- return RUN_ALL_TESTS();
-}
ADD_OSQUERY_LIBRARY(FALSE osquery_devtools printer.cpp)
-ADD_OSQUERY_TEST(FALSE osquery_printer_tests printer_tests.cpp)
+FILE(GLOB OSQUERY_DEVTOOLS_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(FALSE ${OSQUERY_DEVTOOLS_TESTS})
#include <string>
#include <osquery/database/results.h>
+#include <osquery/flags.h>
namespace osquery {
+/// Show all tables and exit the shell.
+DECLARE_bool(L);
+/// Select all from a table an exit the shell.
+DECLARE_string(A);
+/// The shell may need to disable events for fast operations.
+DECLARE_bool(disable_events);
+
/**
* @brief Run an interactive SQL query shell.
*
#include <signal.h>
#include <stdio.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include <readline/readline.h>
#include <readline/history.h>
namespace osquery {
/// Define flags used by the shell. They are parsed by the drop-in shell.
-SHELL_FLAG(bool, bail, false, "stop after hitting an error");
-SHELL_FLAG(bool, batch, false, "force batch I/O");
-SHELL_FLAG(bool, column, false, "set output mode to 'column'");
-SHELL_FLAG(bool, csv, false, "set output mode to 'csv'");
-SHELL_FLAG(bool, json, false, "set output mode to 'json'");
-SHELL_FLAG(bool, echo, false, "print commands before execution");
-SHELL_FLAG(bool, explain, false, "Explain each query by default");
-SHELL_FLAG(bool, header, true, "turn headers on or off");
-SHELL_FLAG(bool, html, false, "set output mode to HTML");
-SHELL_FLAG(bool, interactive, false, "force interactive I/O");
-SHELL_FLAG(bool, line, false, "set output mode to 'line'");
-SHELL_FLAG(bool, list, false, "set output mode to 'list'");
-SHELL_FLAG(string,
- nullvalue,
- "",
- "set text string for NULL values. Default ''");
-SHELL_FLAG(string, separator, "|", "set output field separator. Default: '|'");
-SHELL_FLAG(bool, stats, false, "print memory stats before each finalize");
+SHELL_FLAG(bool, csv, false, "Set output mode to 'csv'");
+SHELL_FLAG(bool, json, false, "Set output mode to 'json'");
+SHELL_FLAG(bool, line, false, "Set output mode to 'line'");
+SHELL_FLAG(bool, list, false, "Set output mode to 'list'");
+SHELL_FLAG(string, nullvalue, "", "Set string for NULL values, default ''");
+SHELL_FLAG(string, separator, "|", "Set output field separator, default '|'");
+
+/// Define short-hand shell switches.
+SHELL_FLAG(bool, L, false, "List all table names");
+SHELL_FLAG(string, A, "", "Select all from a table");
}
/* Make sure isatty() has a prototype.
return t;
}
-#include <sys/time.h>
-#include <sys/resource.h>
-
/* Saved resource information for the beginning of an operation */
static struct rusage sBegin; /* CPU time at start */
static sqlite3_int64 iBegin; /* Wall-clock time at start */
static int stdin_is_interactive = 1;
/*
-** The following is the open SQLite database. We make a pointer
-** to this database a static variable so that it can be accessed
-** by the SIGINT handler to interrupt database processing.
-*/
-static sqlite3 *db = 0;
-
-/*
** True if an interrupt (Control-C) has been received.
*/
static volatile int seenInterrupt = 0;
** state and mode information.
*/
struct callback_data {
- sqlite3 *db; /* The database */
int echoOn; /* True to echo input commands */
int autoEQP; /* Run EXPLAIN QUERY PLAN prior to seach SQL statement */
- int statsOn; /* True to display memory stats before each finalize */
int cnt; /* Number of records displayed so far */
FILE *out; /* Write results here */
FILE *traceOut; /* Output for sqlite3_trace() */
#define MODE_Column 1 /* One record per line in neat columns */
#define MODE_List 2 /* One record per line with a separator */
#define MODE_Semi 3 /* Same as MODE_List but append ";" to each line */
-#define MODE_Html 4 /* Generate an XHTML table */
-#define MODE_Tcl 6 /* Generate ANSI-C or TCL quoted elements */
#define MODE_Csv 7 /* Quote strings, numbers are plain */
-#define MODE_Explain 8 /* Like MODE_Column, but do not truncate data */
#define MODE_Pretty 9 /* Pretty print the SQL results */
static const char *modeDescr[] = {
"column",
"list",
"semi",
- "html",
- "tcl",
"csv",
- "explain",
"pretty",
};
}
/*
-** Output the given string with characters that are special to
-** HTML escaped.
-*/
-static void output_html_string(FILE *out, const char *z) {
- int i;
- if (z == 0)
- z = "";
- while (*z) {
- for (i = 0; z[i] && z[i] != '<' && z[i] != '&' && z[i] != '>' &&
- z[i] != '\"' && z[i] != '\'';
- i++) {
- }
- if (i > 0) {
- fprintf(out, "%.*s", i, z);
- }
- if (z[i] == '<') {
- fprintf(out, "<");
- } else if (z[i] == '&') {
- fprintf(out, "&");
- } else if (z[i] == '>') {
- fprintf(out, ">");
- } else if (z[i] == '\"') {
- fprintf(out, """);
- } else if (z[i] == '\'') {
- fprintf(out, "'");
- } else {
- break;
- }
- z += i + 1;
- }
-}
-
-/*
** If a field contains any character identified by a 1 in the following
** array, then the string must be quoted for CSV.
*/
static void interrupt_handler(int NotUsed) {
UNUSED_PARAMETER(NotUsed);
seenInterrupt = 1;
- if (db)
- sqlite3_interrupt(db);
}
#endif
}
break;
}
- case MODE_Explain:
case MODE_Column: {
if (p->cnt++ == 0) {
for (i = 0; i < nArg; i++) {
} else {
w = 10;
}
- if (p->mode == MODE_Explain && azArg[i] && strlen30(azArg[i]) > w) {
- w = strlen30(azArg[i]);
- }
if (i == 1 && p->aiIndent && p->pStmt) {
if (p->iIndent < p->nIndent) {
fprintf(p->out, "%*.s", p->aiIndent[p->iIndent], "");
}
break;
}
- case MODE_Html: {
- if (p->cnt++ == 0 && p->showHeader) {
- fprintf(p->out, "<TR>");
- for (i = 0; i < nArg; i++) {
- fprintf(p->out, "<TH>");
- output_html_string(p->out, azCol[i]);
- fprintf(p->out, "</TH>\n");
- }
- fprintf(p->out, "</TR>\n");
- }
- if (azArg == 0)
- break;
- fprintf(p->out, "<TR>");
- for (i = 0; i < nArg; i++) {
- fprintf(p->out, "<TD>");
- output_html_string(p->out, azArg[i] ? azArg[i] : p->nullvalue);
- fprintf(p->out, "</TD>\n");
- }
- fprintf(p->out, "</TR>\n");
- break;
- }
- case MODE_Tcl: {
- if (p->cnt++ == 0 && p->showHeader) {
- for (i = 0; i < nArg; i++) {
- output_c_string(p->out, azCol[i] ? azCol[i] : "");
- if (i < nArg - 1)
- fprintf(p->out, "%s", p->separator);
- }
- fprintf(p->out, "\n");
- }
- if (azArg == 0)
- break;
- for (i = 0; i < nArg; i++) {
- output_c_string(p->out, azArg[i] ? azArg[i] : p->nullvalue);
- if (i < nArg - 1)
- fprintf(p->out, "%s", p->separator);
- }
- fprintf(p->out, "\n");
- break;
- }
case MODE_Csv: {
if (p->cnt++ == 0 && p->showHeader) {
for (i = 0; i < nArg; i++) {
}
/*
-** Display memory stats.
-*/
-static int display_stats(sqlite3 *db, /* Database to query */
- struct callback_data *pArg, /* Pointer to struct
- callback_data */
- int bReset /* True to reset the stats */
- ) {
- int iCur;
- int iHiwtr;
-
- if (pArg && pArg->out) {
-
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_MEMORY_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Memory Used: %d (max %d) bytes\n",
- iCur,
- iHiwtr);
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_MALLOC_COUNT, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Number of Outstanding Allocations: %d (max %d)\n",
- iCur,
- iHiwtr);
- /*
- ** Not currently used by the CLI.
- ** iHiwtr = iCur = -1;
- ** sqlite3_status(SQLITE_STATUS_PAGECACHE_USED, &iCur, &iHiwtr, bReset);
- ** fprintf(pArg->out, "Number of Pcache Pages Used: %d (max %d)
- *pages\n", iCur, iHiwtr);
- */
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_PAGECACHE_OVERFLOW, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Number of Pcache Overflow Bytes: %d (max %d) bytes\n",
- iCur,
- iHiwtr);
- /*
- ** Not currently used by the CLI.
- ** iHiwtr = iCur = -1;
- ** sqlite3_status(SQLITE_STATUS_SCRATCH_USED, &iCur, &iHiwtr, bReset);
- ** fprintf(pArg->out, "Number of Scratch Allocations Used: %d (max
- *%d)\n", iCur, iHiwtr);
- */
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_SCRATCH_OVERFLOW, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Number of Scratch Overflow Bytes: %d (max %d) bytes\n",
- iCur,
- iHiwtr);
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_MALLOC_SIZE, &iCur, &iHiwtr, bReset);
- fprintf(
- pArg->out, "Largest Allocation: %d bytes\n", iHiwtr);
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_PAGECACHE_SIZE, &iCur, &iHiwtr, bReset);
- fprintf(
- pArg->out, "Largest Pcache Allocation: %d bytes\n", iHiwtr);
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_SCRATCH_SIZE, &iCur, &iHiwtr, bReset);
- fprintf(
- pArg->out, "Largest Scratch Allocation: %d bytes\n", iHiwtr);
-#ifdef YYTRACKMAXSTACKDEPTH
- iHiwtr = iCur = -1;
- sqlite3_status(SQLITE_STATUS_PARSER_STACK, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Deepest Parser Stack: %d (max %d)\n",
- iCur,
- iHiwtr);
-#endif
- }
-
- if (pArg && pArg->out && db) {
- iHiwtr = iCur = -1;
- sqlite3_db_status(
- db, SQLITE_DBSTATUS_LOOKASIDE_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out,
- "Lookaside Slots Used: %d (max %d)\n",
- iCur,
- iHiwtr);
- sqlite3_db_status(
- db, SQLITE_DBSTATUS_LOOKASIDE_HIT, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Successful lookaside attempts: %d\n", iHiwtr);
- sqlite3_db_status(
- db, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Lookaside failures due to size: %d\n", iHiwtr);
- sqlite3_db_status(
- db, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Lookaside failures due to OOM: %d\n", iHiwtr);
- iHiwtr = iCur = -1;
- sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Pager Heap Usage: %d bytes\n", iCur);
- iHiwtr = iCur = -1;
- sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_HIT, &iCur, &iHiwtr, 1);
- fprintf(pArg->out, "Page cache hits: %d\n", iCur);
- iHiwtr = iCur = -1;
- sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_MISS, &iCur, &iHiwtr, 1);
- fprintf(pArg->out, "Page cache misses: %d\n", iCur);
- iHiwtr = iCur = -1;
- sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_WRITE, &iCur, &iHiwtr, 1);
- fprintf(pArg->out, "Page cache writes: %d\n", iCur);
- iHiwtr = iCur = -1;
- sqlite3_db_status(db, SQLITE_DBSTATUS_SCHEMA_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Schema Heap Usage: %d bytes\n", iCur);
- iHiwtr = iCur = -1;
- sqlite3_db_status(db, SQLITE_DBSTATUS_STMT_USED, &iCur, &iHiwtr, bReset);
- fprintf(pArg->out, "Statement Heap/Lookaside Usage: %d bytes\n", iCur);
- }
-
- if (pArg && pArg->out && db && pArg->pStmt) {
- iCur = sqlite3_stmt_status(
- pArg->pStmt, SQLITE_STMTSTATUS_FULLSCAN_STEP, bReset);
- fprintf(pArg->out, "Fullscan Steps: %d\n", iCur);
- iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_SORT, bReset);
- fprintf(pArg->out, "Sort Operations: %d\n", iCur);
- iCur =
- sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_AUTOINDEX, bReset);
- fprintf(pArg->out, "Autoindex Inserts: %d\n", iCur);
- iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_VM_STEP, bReset);
- fprintf(pArg->out, "Virtual Machine Steps: %d\n", iCur);
- }
-
- return 0;
-}
-
-/*
-** Parameter azArray points to a zero-terminated array of strings. zStr
-** points to a single nul-terminated string. Return non-zero if zStr
-** is equal, according to strcmp(), to any of the strings in the array.
-** Otherwise, return zero.
-*/
-static int str_in_array(const char *zStr, const char **azArray) {
- int i;
- for (i = 0; azArray[i]; i++) {
- if (0 == strcmp(zStr, azArray[i]))
- return 1;
- }
- return 0;
-}
-
-/*
-** If compiled statement pSql appears to be an EXPLAIN statement, allocate
-** and populate the callback_data.aiIndent[] array with the number of
-** spaces each opcode should be indented before it is output.
-**
-** The indenting rules are:
-**
-** * For each "Next", "Prev", "VNext" or "VPrev" instruction, indent
-** all opcodes that occur between the p2 jump destination and the opcode
-** itself by 2 spaces.
-**
-** * For each "Goto", if the jump destination is earlier in the program
-** and ends on one of:
-** Yield SeekGt SeekLt RowSetRead Rewind
-** or if the P1 parameter is one instead of zero,
-** then indent all opcodes between the earlier instruction
-** and "Goto" by 2 spaces.
-*/
-static void explain_data_prepare(struct callback_data *p, sqlite3_stmt *pSql) {
- const char *zSql; /* The text of the SQL statement */
- const char *z; /* Used to check if this is an EXPLAIN */
- int *abYield = 0; /* True if op is an OP_Yield */
- int nAlloc = 0; /* Allocated size of p->aiIndent[], abYield */
- int iOp; /* Index of operation in p->aiIndent[] */
-
- const char *azNext[] = {"Next", "Prev", "VPrev", "VNext", "SorterNext", 0};
- const char *azYield[] = {
- "Yield", "SeekLt", "SeekGt", "RowSetRead", "Rewind", 0};
- const char *azGoto[] = {"Goto", 0};
-
- /* Try to figure out if this is really an EXPLAIN statement. If this
- ** cannot be verified, return early. */
- zSql = sqlite3_sql(pSql);
- if (zSql == 0)
- return;
- for (z = zSql;
- *z == ' ' || *z == '\t' || *z == '\n' || *z == '\f' || *z == '\r';
- z++)
- ;
- if (sqlite3_strnicmp(z, "explain", 7))
- return;
-
- for (iOp = 0; SQLITE_ROW == sqlite3_step(pSql); iOp++) {
- int i;
- int iAddr = sqlite3_column_int(pSql, 0);
- const char *zOp = (const char *)sqlite3_column_text(pSql, 1);
-
- /* Set p2 to the P2 field of the current opcode. Then, assuming that
- ** p2 is an instruction address, set variable p2op to the index of that
- ** instruction in the aiIndent[] array. p2 and p2op may be different if
- ** the current instruction is part of a sub-program generated by an
- ** SQL trigger or foreign key. */
- int p2 = sqlite3_column_int(pSql, 3);
- int p2op = (p2 + (iOp - iAddr));
-
- /* Grow the p->aiIndent array as required */
- if (iOp >= nAlloc) {
- nAlloc += 100;
- p->aiIndent = (int *)sqlite3_realloc(p->aiIndent, nAlloc * sizeof(int));
- abYield = (int *)sqlite3_realloc(abYield, nAlloc * sizeof(int));
- }
- abYield[iOp] = str_in_array(zOp, azYield);
- p->aiIndent[iOp] = 0;
- p->nIndent = iOp + 1;
-
- if (str_in_array(zOp, azNext)) {
- for (i = p2op; i < iOp; i++)
- p->aiIndent[i] += 2;
- }
- if (str_in_array(zOp, azGoto) && p2op < p->nIndent &&
- (abYield[p2op] || sqlite3_column_int(pSql, 2))) {
- for (i = p2op + 1; i < iOp; i++)
- p->aiIndent[i] += 2;
- }
- }
-
- p->iIndent = 0;
- sqlite3_free(abYield);
- sqlite3_reset(pSql);
-}
-
-/*
-** Free the array allocated by explain_data_prepare().
-*/
-static void explain_data_delete(struct callback_data *p) {
- sqlite3_free(p->aiIndent);
- p->aiIndent = 0;
- p->nIndent = 0;
- p->iIndent = 0;
-}
-
-/*
** Execute a statement or set of statements. Print
** any result rows/columns depending on the current mode
** set via the supplied callback.
** and callback data argument.
*/
static int shell_exec(
- sqlite3 *db, /* An open database */
const char *zSql, /* SQL to be evaluated */
int (*xCallback)(
void *, int, char **, char **, int *), /* Callback function */
struct callback_data *pArg, /* Pointer to struct callback_data */
char **pzErrMsg /* Error msg written here */
) {
- sqlite3_stmt *pStmt = NULL; /* Statement to execute. */
+ // Grab a lock on the managed DB instance.
+ auto dbc = osquery::SQLiteDBManager::get();
+ auto db = dbc.db();
+
+ sqlite3_stmt *pStmt = nullptr; /* Statement to execute. */
int rc = SQLITE_OK; /* Return Code */
int rc2;
const char *zLeftover; /* Tail of unprocessed SQL */
if (pzErrMsg) {
- *pzErrMsg = NULL;
+ *pzErrMsg = nullptr;
}
while (zSql[0] && (SQLITE_OK == rc)) {
fprintf(pArg->out, "%s\n", zStmtSql ? zStmtSql : zSql);
}
- /* Show the EXPLAIN QUERY PLAN if .eqp is on */
- if (pArg && pArg->autoEQP) {
- sqlite3_stmt *pExplain;
- char *zEQP =
- sqlite3_mprintf("EXPLAIN QUERY PLAN %s", sqlite3_sql(pStmt));
- rc = sqlite3_prepare_v2(db, zEQP, -1, &pExplain, 0);
- if (rc == SQLITE_OK) {
- while (sqlite3_step(pExplain) == SQLITE_ROW) {
- fprintf(pArg->out, "--EQP-- %d,", sqlite3_column_int(pExplain, 0));
- fprintf(pArg->out, "%d,", sqlite3_column_int(pExplain, 1));
- fprintf(pArg->out, "%d,", sqlite3_column_int(pExplain, 2));
- fprintf(pArg->out, "%s\n", sqlite3_column_text(pExplain, 3));
- }
- }
- sqlite3_finalize(pExplain);
- sqlite3_free(zEQP);
- }
-
- /* Output TESTCTRL_EXPLAIN text of requested */
- if (pArg && pArg->mode == MODE_Explain) {
- const char *zExplain = 0;
- sqlite3_test_control(SQLITE_TESTCTRL_EXPLAIN_STMT, pStmt, &zExplain);
- if (zExplain && zExplain[0]) {
- fprintf(pArg->out, "%s", zExplain);
- }
- }
-
- /* If the shell is currently in ".explain" mode, gather the extra
- ** data required to add indents to the output.*/
- if (pArg && pArg->mode == MODE_Explain) {
- explain_data_prepare(pArg, pStmt);
- }
-
/* perform the first step. this will tell us if we
** have a result set or not and how wide it is.
*/
}
}
- explain_data_delete(pArg);
-
- /* print usage stats if stats on */
- if (pArg && pArg->statsOn) {
- display_stats(db, pArg, 0);
- }
-
/* Finalize the statement just executed. If this fails, save a
** copy of the error message. Otherwise, set zSql to point to the
** next statement to execute. */
/* clear saved stmt handle */
if (pArg) {
- pArg->pStmt = NULL;
+ pArg->pStmt = nullptr;
}
}
} /* end while */
- if (pArg->mode == MODE_Pretty) {
+ if (pArg && pArg->mode == MODE_Pretty) {
if (osquery::FLAGS_json) {
osquery::jsonPrint(pArg->prettyPrint->results);
} else {
** Text of a help message
*/
static char zHelp[] =
- ".bail ON|OFF Stop after hitting an error. Default OFF\n"
- ".echo ON|OFF Turn command echo on or off\n"
- ".exit Exit this program\n"
- ".explain ?ON|OFF? Turn output mode suitable for EXPLAIN on or off.\n"
- " With no args, it turns EXPLAIN on.\n"
- ".header(s) ON|OFF Turn display of headers on or off\n"
- ".help Show this message\n"
- ".indices ?TABLE? Show names of all indices\n"
- " If TABLE specified, only show indices for "
- "tables\n"
- " matching LIKE pattern TABLE.\n"
- ".mode MODE ?TABLE? Set output mode where MODE is one of:\n"
- " csv Comma-separated values\n"
- " column Left-aligned columns. (See .width)\n"
- " html HTML <table> code\n"
- " line One value per line\n"
- " list Values delimited by .separator string\n"
- " pretty Pretty printed SQL results\n"
- " tabs Tab-separated values\n"
- " tcl TCL list elements\n"
- ".nullvalue STRING Use STRING in place of NULL values\n"
- ".print STRING... Print literal STRING\n"
- ".quit Exit this program\n"
- ".schema ?TABLE? Show the CREATE statements\n"
- " If TABLE specified, only show tables matching\n"
- " LIKE pattern TABLE.\n"
- ".separator STRING Change separator used by output mode and .import\n"
- ".show Show the current values for various settings\n"
- ".stats ON|OFF Turn stats on or off\n"
- ".tables ?TABLE? List names of tables\n"
- " If TABLE specified, only list tables matching\n"
- " LIKE pattern TABLE.\n"
- ".trace FILE|off Output each SQL statement as it is run\n"
- ".width NUM1 NUM2 ... Set column widths for \"column\" mode\n";
+ "Welcome to the osquery shell. Please explore your OS!\n"
+ "You are connected to a transient 'in-memory' virtual database.\n"
+ "\n"
+ ".all [TABLE] Select all from a table\n"
+ ".bail ON|OFF Stop after hitting an error; default OFF\n"
+ ".echo ON|OFF Turn command echo on or off\n"
+ ".exit Exit this program\n"
+ ".header(s) ON|OFF Turn display of headers on or off\n"
+ ".help Show this message\n"
+ ".indices [TABLE] Show names of all indices\n"
+ ".mode MODE Set output mode where MODE is one of:\n"
+ " csv Comma-separated values\n"
+ " column Left-aligned columns. (See .width)\n"
+ " line One value per line\n"
+ " list Values delimited by .separator string\n"
+ " pretty Pretty printed SQL results\n"
+ ".nullvalue STR Use STRING in place of NULL values\n"
+ ".print STR... Print literal STRING\n"
+ ".quit Exit this program\n"
+ ".schema [TABLE] Show the CREATE statements\n"
+ ".separator STR Change separator used by output mode and .import\n"
+ ".show Show the current values for various settings\n"
+ ".tables [TABLE] List names of tables\n"
+ ".trace FILE|off Output each SQL statement as it is run\n"
+ ".width [NUM1]+ Set column widths for \"column\" mode\n";
static char zTimerHelp[] =
- ".timer ON|OFF Turn the CPU timer measurement on or off\n";
+ ".timer ON|OFF Turn the CPU timer measurement on or off\n";
/* Forward reference */
static int process_input(struct callback_data *p, FILE *in);
}
/*
-** A routine for handling output from sqlite3_trace().
-*/
-static void sql_trace_callback(void *pArg, const char *z) {
- FILE *f = (FILE *)pArg;
- if (f)
- fprintf(f, "%s\n", z);
-}
-
-/*
** If an input line begins with "." then invoke this routine to
** process that line.
**
int rc = 0;
char *azArg[50];
+ // A meta command may act on the database, grab a lock and instance.
+ auto dbc = osquery::SQLiteDBManager::get();
+ auto db = dbc.db();
+
/* Parse the input line into tokens.
*/
while (zLine[i] && nArg < ArraySize(azArg)) {
return 0; /* no tokens, no error */
n = strlen30(azArg[0]);
c = azArg[0][0];
- if (c == 'b' && n >= 3 && strncmp(azArg[0], "bail", n) == 0 &&
+ if (c == 'a' && strncmp(azArg[0], "all", n) == 0 && nArg == 2) {
+ struct callback_data data;
+ memcpy(&data, p, sizeof(data));
+ auto query = std::string("SELECT * FROM ") + azArg[1];
+ rc = shell_exec(query.c_str(), shell_callback, &data, nullptr);
+ if (rc != SQLITE_OK) {
+ fprintf(stderr, "Error querying table: %s\n", azArg[1]);
+ }
+ } else if (c == 'b' && n >= 3 && strncmp(azArg[0], "bail", n) == 0 &&
nArg > 1 && nArg < 3) {
bail_on_error = booleanValue(azArg[1]);
} else if (c == 'e' && strncmp(azArg[0], "echo", n) == 0 && nArg > 1 &&
nArg < 3) {
p->echoOn = booleanValue(azArg[1]);
- } else if (c == 'e' && strncmp(azArg[0], "eqp", n) == 0 && nArg > 1 &&
- nArg < 3) {
- p->autoEQP = booleanValue(azArg[1]);
} else if (c == 'e' && strncmp(azArg[0], "exit", n) == 0) {
if (nArg > 1 && (rc = (int)integerValue(azArg[1])) != 0)
exit(rc);
rc = 2;
- } else if (c == 'e' && strncmp(azArg[0], "explain", n) == 0 && nArg < 3) {
- int val = nArg >= 2 ? booleanValue(azArg[1]) : 1;
- if (val == 1) {
- if (!p->explainPrev.valid) {
- p->explainPrev.valid = 1;
- p->explainPrev.mode = p->mode;
- p->explainPrev.showHeader = p->showHeader;
- memcpy(p->explainPrev.colWidth, p->colWidth, sizeof(p->colWidth));
- }
- /* We could put this code under the !p->explainValid
- ** condition so that it does not execute if we are already in
- ** explain mode. However, always executing it allows us an easy
- ** was to reset to explain mode in case the user previously
- ** did an .explain followed by a .width, .mode or .header
- ** command.
- */
- p->mode = MODE_Explain;
- p->showHeader = 1;
- memset(p->colWidth, 0, sizeof(p->colWidth));
- p->colWidth[0] = 4; /* addr */
- p->colWidth[1] = 13; /* opcode */
- p->colWidth[2] = 4; /* P1 */
- p->colWidth[3] = 4; /* P2 */
- p->colWidth[4] = 4; /* P3 */
- p->colWidth[5] = 13; /* P4 */
- p->colWidth[6] = 2; /* P5 */
- p->colWidth[7] = 13; /* Comment */
- } else if (p->explainPrev.valid) {
- p->explainPrev.valid = 0;
- p->mode = p->explainPrev.mode;
- p->showHeader = p->explainPrev.showHeader;
- memcpy(p->colWidth, p->explainPrev.colWidth, sizeof(p->colWidth));
- }
} else if (c == 'h' && (strncmp(azArg[0], "header", n) == 0 ||
strncmp(azArg[0], "headers", n) == 0) &&
nArg > 1 && nArg < 3) {
data.showHeader = 0;
data.mode = MODE_List;
if (nArg == 1) {
- rc = sqlite3_exec(p->db,
+ rc = sqlite3_exec(db,
"SELECT name FROM sqlite_master "
"WHERE type='index' AND name NOT LIKE 'sqlite_%' "
"UNION ALL "
&zErrMsg);
} else {
zShellStatic = azArg[1];
- rc = sqlite3_exec(p->db,
+ rc = sqlite3_exec(db,
"SELECT name FROM sqlite_master "
"WHERE type='index' AND tbl_name LIKE shellstatic() "
"UNION ALL "
p->mode = MODE_List;
} else if (n2 == 6 && strncmp(azArg[1], "pretty", n2) == 0) {
p->mode = MODE_Pretty;
- } else if (n2 == 4 && strncmp(azArg[1], "html", n2) == 0) {
- p->mode = MODE_Html;
- } else if (n2 == 3 && strncmp(azArg[1], "tcl", n2) == 0) {
- p->mode = MODE_Tcl;
- sqlite3_snprintf(sizeof(p->separator), p->separator, " ");
} else if (n2 == 3 && strncmp(azArg[1], "csv", n2) == 0) {
p->mode = MODE_Csv;
sqlite3_snprintf(sizeof(p->separator), p->separator, ",");
- } else if (n2 == 4 && strncmp(azArg[1], "tabs", n2) == 0) {
- p->mode = MODE_List;
- sqlite3_snprintf(sizeof(p->separator), p->separator, "\t");
} else {
fprintf(stderr,
"Error: mode should be one of: "
rc = SQLITE_OK;
} else {
zShellStatic = azArg[1];
- rc = sqlite3_exec(p->db,
+ rc = sqlite3_exec(db,
"SELECT sql FROM "
" (SELECT sql sql, type type, tbl_name tbl_name, "
"name name, rowid x"
}
} else {
rc = sqlite3_exec(
- p->db,
+ db,
"SELECT sql FROM "
" (SELECT sql sql, type type, tbl_name tbl_name, name name, rowid x"
" FROM sqlite_master UNION ALL"
} else if (c == 's' && strncmp(azArg[0], "show", n) == 0 && nArg == 1) {
int i;
fprintf(p->out, "%9.9s: %s\n", "echo", p->echoOn ? "on" : "off");
- fprintf(p->out, "%9.9s: %s\n", "eqp", p->autoEQP ? "on" : "off");
- fprintf(
- p->out, "%9.9s: %s\n", "explain", p->explainPrev.valid ? "on" : "off");
fprintf(p->out, "%9.9s: %s\n", "headers", p->showHeader ? "on" : "off");
fprintf(p->out, "%9.9s: %s\n", "mode", modeDescr[p->mode]);
fprintf(p->out, "%9.9s: ", "nullvalue");
fprintf(p->out, "%9.9s: ", "separator");
output_c_string(p->out, p->separator);
fprintf(p->out, "\n");
- fprintf(p->out, "%9.9s: %s\n", "stats", p->statsOn ? "on" : "off");
fprintf(p->out, "%9.9s: ", "width");
for (i = 0; i < (int)ArraySize(p->colWidth) && p->colWidth[i] != 0; i++) {
fprintf(p->out, "%d ", p->colWidth[i]);
}
fprintf(p->out, "\n");
- } else if (c == 's' && strncmp(azArg[0], "stats", n) == 0 && nArg > 1 &&
- nArg < 3) {
- p->statsOn = booleanValue(azArg[1]);
} else if (c == 't' && n > 1 && strncmp(azArg[0], "tables", n) == 0 &&
nArg < 3) {
sqlite3_stmt *pStmt;
int nRow, nAlloc;
char *zSql = 0;
int ii;
- rc = sqlite3_prepare_v2(p->db, "PRAGMA database_list", -1, &pStmt, 0);
+ rc = sqlite3_prepare_v2(db, "PRAGMA database_list", -1, &pStmt, 0);
if (rc)
return rc;
zSql = sqlite3_mprintf(
}
sqlite3_finalize(pStmt);
zSql = sqlite3_mprintf("%z ORDER BY 1", zSql);
- rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
sqlite3_free(zSql);
if (rc)
return rc;
sqlite3_free(azResult);
} else if (c == 't' && n > 4 && strncmp(azArg[0], "timeout", n) == 0 &&
nArg == 2) {
- sqlite3_busy_timeout(p->db, (int)integerValue(azArg[1]));
+ sqlite3_busy_timeout(db, (int)integerValue(azArg[1]));
} else if (HAS_TIMER && c == 't' && n >= 5 &&
strncmp(azArg[0], "timer", n) == 0 && nArg == 2) {
enableTimer = booleanValue(azArg[1]);
} else if (c == 't' && strncmp(azArg[0], "trace", n) == 0 && nArg > 1) {
output_file_close(p->traceOut);
p->traceOut = output_file_open(azArg[1]);
-#if !defined(SQLITE_OMIT_TRACE) && !defined(SQLITE_OMIT_FLOATING_POINT)
- if (p->traceOut == 0) {
- sqlite3_trace(p->db, 0, 0);
- } else {
- sqlite3_trace(p->db, sql_trace_callback, p->traceOut);
- }
-#endif
} else if (c == 'v' && strncmp(azArg[0], "version", n) == 0) {
- fprintf(p->out, "osquery %s\n", TEXT(OSQUERY_VERSION).c_str());
+ fprintf(p->out, "osquery %s\n", TEXT(OSQUERY_VERSION).c_str());
fprintf(p->out,
"SQLite %s %s\n" /*extra-version-info*/,
sqlite3_libversion(),
*/
static int line_contains_semicolon(const char *z, int N) {
int i;
+ if (z == nullptr) {
+ return 0;
+ }
+
for (i = 0; i < N; i++) {
if (z[i] == ';')
return 1;
int i;
for (i = 0; zLine[i] && IsSpace(zLine[i]); i++) {
}
- assert(nAlloc > 0 && zSql != 0);
- memcpy(zSql, zLine + i, nLine + 1 - i);
+ assert(nAlloc > 0 && zSql != nullptr);
+ if (zSql != nullptr) {
+ memcpy(zSql, zLine + i, nLine + 1 - i);
+ }
startline = lineno;
nSql = nLine - i;
} else {
sqlite3_complete(zSql)) {
p->cnt = 0;
BEGIN_TIMER;
- rc = shell_exec(p->db, zSql, shell_callback, p, &zErrMsg);
+ rc = shell_exec(zSql, shell_callback, p, &zErrMsg);
END_TIMER;
if (rc || zErrMsg) {
char zPrefix[100];
fprintf(stderr, "%s %s\n", zPrefix, zErrMsg);
sqlite3_free(zErrMsg);
zErrMsg = 0;
- } else {
- fprintf(stderr, "%s %s\n", zPrefix, sqlite3_errmsg(p->db));
}
errCnt++;
}
struct callback_data data;
main_init(&data);
- // Create and hold a DB instance that the registry can attach.
- auto dbc = SQLiteDBManager::get();
- db = dbc.db();
- data.db = db;
-
- // Add some shell-specific functions to the instance.
- sqlite3_create_function(
- db, "shellstatic", 0, SQLITE_UTF8, 0, shellstaticFunc, 0, 0);
+ {
+ // Hold the manager connection instance again in callbacks.
+ auto dbc = SQLiteDBManager::get();
+ // Add some shell-specific functions to the instance.
+ sqlite3_create_function(
+ dbc.db(), "shellstatic", 0, SQLITE_UTF8, 0, shellstaticFunc, 0, 0);
+ }
Argv0 = argv[0];
stdin_is_interactive = isatty(0);
// SQLite: Make sure we have a valid signal handler early
signal(SIGINT, interrupt_handler);
- if (FLAGS_batch) {
- // SQLite: Need to check for batch mode here to so we can avoid printing
- // informational messages (like from process_sqliterc) before we do the
- // actual processing of arguments later in a second pass.
- stdin_is_interactive = 0;
- }
-
int warnInmemoryDb = 1;
data.zDbFilename = ":memory:";
data.out = stdout;
// Set modes and settings from CLI flags.
- if (FLAGS_html) {
- data.mode = MODE_Html;
- } else if (FLAGS_list) {
+ if (FLAGS_list) {
data.mode = MODE_List;
} else if (FLAGS_line) {
data.mode = MODE_Line;
- } else if (FLAGS_column) {
- data.mode = MODE_Column;
} else if (FLAGS_csv) {
- data.mode = FLAGS_csv;
+ data.mode = MODE_Csv;
memcpy(data.separator, ",", 2);
} else {
data.mode = MODE_Pretty;
}
- if (FLAGS_interactive) {
- stdin_is_interactive = 1;
- }
-
- data.statsOn = (FLAGS_stats) ? 1 : 0;
- data.autoEQP = (FLAGS_explain) ? 1 : 0;
- data.echoOn = (FLAGS_echo) ? 1 : 0;
- data.showHeader = (FLAGS_header) ? 1 : 0;
- bail_on_error = (FLAGS_bail) ? 1 : 0;
-
sqlite3_snprintf(sizeof(data.separator), data.separator, "%s",
FLAGS_separator.c_str());
sqlite3_snprintf(sizeof(data.nullvalue), data.nullvalue, "%s",
FLAGS_nullvalue.c_str());
int rc = 0;
- if (argc > 1 && argv[1] != nullptr) {
+ if (FLAGS_L == true || FLAGS_A.size() > 0) {
+ // Helper meta commands from shell switches.
+ std::string query = (FLAGS_L) ? ".tables" : ".all " + FLAGS_A;
+ char *cmd = new char[query.size() + 1];
+ memset(cmd, 0, query.size() + 1);
+ std::copy(query.begin(), query.end(), cmd);
+ rc = do_meta_command(cmd, &data);
+ } else if (argc > 1 && argv[1] != nullptr) {
// Run a command or statement from CLI
char *query = argv[1];
char *error = 0;
rc = do_meta_command(query, &data);
rc = (rc == 2) ? 0 : rc;
} else {
- rc = shell_exec(data.db, query, shell_callback, &data, &error);
+ rc = shell_exec(query, shell_callback, &data, &error);
if (error != 0) {
fprintf(stderr, "Error: %s\n", error);
return (rc != 0) ? rc : 1;
} else {
// Run commands received from standard input
if (stdin_is_interactive) {
- printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printBold("osquery");
printf(
" - being built, with love, at Samsung(not Facebook)\n"
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
if (warnInmemoryDb) {
- printf("Connected to a ");
- printBold("transient in-memory database");
- printf(".\n");
+ printf("Using a ");
+ printBold("virtual database");
+ printf(". Need help, type '.help'\n");
}
auto history_file = osquery::osqueryHomeDirectory() + "/.history";
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
EXPECT_EQ(lengths, expected);
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- google::InitGoogleLogging(argv[0]);
- return RUN_ALL_TESTS();
-}
ADD_OSQUERY_LIBRARY(TRUE osquery_dispatcher dispatcher.cpp
scheduler.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_dispatcher_tests dispatcher_tests.cpp)
+FILE(GLOB OSQUERY_DISPATCHER_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(TRUE ${OSQUERY_DISPATCHER_TESTS})
boost::this_thread::sleep(boost::posix_time::milliseconds(milli));
}
-Dispatcher& Dispatcher::getInstance() {
- static Dispatcher d;
- return d;
-}
-
Dispatcher::Dispatcher() {
thread_manager_ = InternalThreadManager::newSimpleThreadManager(
(size_t)FLAGS_worker_threads, 0);
Status Dispatcher::add(ThriftInternalRunnableRef task) {
try {
- thread_manager_->add(task, 0, 0);
+ instance().thread_manager_->add(task, 0, 0);
} catch (std::exception& e) {
return Status(1, e.what());
}
return Status(1, "Cannot schedule a service twice");
}
+ auto& self = instance();
auto thread = std::make_shared<boost::thread>(
boost::bind(&InternalRunnable::run, &*service));
- service_threads_.push_back(thread);
- services_.push_back(std::move(service));
+ self.service_threads_.push_back(thread);
+ self.services_.push_back(std::move(service));
return Status(0, "OK");
}
-InternalThreadManagerRef Dispatcher::getThreadManager() {
- return thread_manager_;
+InternalThreadManagerRef Dispatcher::getThreadManager() const {
+ return instance().thread_manager_;
}
-void Dispatcher::join() { thread_manager_->join(); }
+void Dispatcher::join() { instance().thread_manager_->join(); }
void Dispatcher::joinServices() {
- for (auto& thread : getInstance().service_threads_) {
+ for (auto& thread : instance().service_threads_) {
thread->join();
}
}
void Dispatcher::removeServices() {
- for (const auto& service : services_) {
+ auto& self = instance();
+ for (const auto& service : self.services_) {
while (true) {
// Wait for each thread's entry point (enter) meaning the thread context
// was allocated and (run) was called by boost::thread started.
if (service->hasRun()) {
break;
}
- // We only need to check if std::terminate is call very quickly after
+ // We only need to check if std::terminate is called very quickly after
// the boost::thread is created.
::usleep(200);
}
}
- for (auto& thread : service_threads_) {
+ for (auto& thread : self.service_threads_) {
thread->interrupt();
}
// Deallocate services.
- service_threads_.clear();
- services_.clear();
+ self.service_threads_.clear();
+ self.services_.clear();
}
InternalThreadManager::STATE Dispatcher::state() const {
- return thread_manager_->state();
+ return instance().thread_manager_->state();
}
-void Dispatcher::addWorker(size_t value) { thread_manager_->addWorker(value); }
+void Dispatcher::addWorker(size_t value) {
+ instance().thread_manager_->addWorker(value);
+}
void Dispatcher::removeWorker(size_t value) {
- thread_manager_->removeWorker(value);
+ instance().thread_manager_->removeWorker(value);
}
size_t Dispatcher::idleWorkerCount() const {
- return thread_manager_->idleWorkerCount();
+ return instance().thread_manager_->idleWorkerCount();
}
size_t Dispatcher::workerCount() const {
- return thread_manager_->workerCount();
+ return instance().thread_manager_->workerCount();
}
size_t Dispatcher::pendingTaskCount() const {
- return thread_manager_->pendingTaskCount();
+ return instance().thread_manager_->pendingTaskCount();
}
size_t Dispatcher::totalTaskCount() const {
- return thread_manager_->totalTaskCount();
+ return instance().thread_manager_->totalTaskCount();
}
size_t Dispatcher::pendingTaskCountMax() const {
- return thread_manager_->pendingTaskCountMax();
+ return instance().thread_manager_->pendingTaskCountMax();
}
size_t Dispatcher::expiredTaskCount() const {
- return thread_manager_->expiredTaskCount();
+ return instance().thread_manager_->expiredTaskCount();
}
}
#include <string>
#include <vector>
+#include <boost/noncopyable.hpp>
#include <boost/thread.hpp>
#include <osquery/core.h>
* execution of asynchronous tasks across an application. Internally,
* Dispatcher is back by the Apache Thrift thread pool.
*/
-class Dispatcher {
+class Dispatcher : private boost::noncopyable {
public:
/**
- * @brief The primary way to access the Dispatcher singleton.
+ * @brief The primary way to access the Dispatcher factory facility.
*
- * osquery::Dispatcher::getInstance() provides access to the Dispatcher
- * singleton.
+ * @code{.cpp} auto dispatch = osquery::Dispatcher::instance(); @endcode
*
- * @code{.cpp} auto dispatch = osquery::Dispatcher::getInstance(); @endcode
- *
- * @return a shared pointer to an instance of osquery::Dispatch.
+ * @return The osquery::Dispatcher instance.
*/
- static Dispatcher& getInstance();
+ static Dispatcher& instance() {
+ static Dispatcher instance;
+ return instance;
+ }
/**
- * @brief add a task to the dispatcher.
+ * @brief Add a task to the dispatcher.
*
* Adding tasks to the Dispatcher's thread pool requires you to create a
* "runnable" class which publicly implements Apache Thrift's Runnable
* virtual void run() { ++*i; }
* };
*
- * auto dispatch = osquery::Dispatcher::getInstance(); int i = 5;
- * dispatch->add(std::make_shared<TestRunnable>(&i);
+ * int i = 5;
+ * Dispatcher::add(std::make_shared<TestRunnable>(&i);
* while (dispatch->totalTaskCount() > 0) {}
* assert(i == 6);
* @endcode
* @param task a C++11 std shared pointer to an instance of a class which
* publicly inherits from `apache::thrift::concurrency::Runnable`.
*
- * @return an instance of osquery::Status, indicating the success or failure
- * of the operation.
+ * @return osquery success status
*/
- Status add(ThriftInternalRunnableRef task);
+ static Status add(ThriftInternalRunnableRef task);
/// See `add`, but services are not limited to a thread poll size.
- Status addService(InternalRunnableRef service);
+ static Status addService(InternalRunnableRef service);
/**
* @brief Getter for the underlying thread manager instance.
* underlying thread manager has been determined to be necessary.
*
* @code{.cpp}
- * auto t = osquery::Dispatcher::getInstance()->getThreadManager();
+ * auto t = osquery::Dispatcher::getThreadManager();
* @endcode
*
* @return a shared pointer to the Apache Thrift `ThreadManager` instance
* which is currently being used to orchestrate multi-threaded operations.
*/
- InternalThreadManagerRef getThreadManager();
+ InternalThreadManagerRef getThreadManager() const;
/**
* @brief Joins the thread manager.
* have finished their work. At that point the ThreadManager will transition
* into the STOPPED state.
*/
- void join();
+ static void join();
/// See `join`, but applied to osquery services.
static void joinServices();
/// Destroy and stop all osquery service threads and service objects.
- void removeServices();
+ static void removeServices();
/**
* @brief Get the current state of the thread manager.
*
* @see osquery::Dispatcher::removeWorker
*/
- void addWorker(size_t value = 1);
+ static void addWorker(size_t value = 1);
/**
* @brief Remove a worker thread.
*
* @see osquery::Dispatcher::addWorker
*/
- void removeWorker(size_t value = 1);
+ static void removeWorker(size_t value = 1);
/**
* @brief Gets the current number of idle worker threads.
/**
* @brief Default constructor.
*
- * Since instances of Dispatcher should only be created via getInstance(),
+ * Since instances of Dispatcher should only be created via instance(),
* Dispatcher's constructor is private.
*/
Dispatcher();
+ Dispatcher(Dispatcher const&);
+ void operator=(Dispatcher const&);
+ virtual ~Dispatcher() {}
private:
/**
* @brief Internal shared pointer which references Thrift's thread manager
*
* All thread operations occur via Apache Thrift's ThreadManager class. This
- * private member represents a shared pointer to an instantiation os that
+ * private member represents a shared pointer to an instantiation of that
* thread manager, which can be used to accomplish various threading
* objectives.
*
"hostname",
"Field used to identify the host running osquery (hostname, uuid)");
+FLAG(bool, enable_monitor, false, "Enable the schedule monitor");
+
CLI_FLAG(uint64, schedule_timeout, 0, "Limit the schedule, 0 for no limit")
Status getHostIdentifier(std::string& ident) {
auto status = db->Scan(kConfigurations, results);
if (!status.ok()) {
- VLOG(1) << "Could not access database, using hostname as the host "
- "identifier";
+ VLOG(1) << "Could not access database; using hostname as host identifier";
ident = osquery::getHostname();
return Status(0, "OK");
}
results.end()) {
status = db->Get(kConfigurations, "hostIdentifier", ident);
if (!status.ok()) {
- VLOG(1) << "Could not access database, using hostname as the host "
- "identifier";
+ VLOG(1) << "Could not access database; using hostname as host identifier";
ident = osquery::getHostname();
}
return status;
// There was no uuid stored in the database, generate one and store it.
ident = osquery::generateHostUuid();
- VLOG(1) << "Using uuid " << ident << " to identify this host";
+ VLOG(1) << "Using uuid " << ident << " as host identifier";
return db->Put(kConfigurations, "hostIdentifier", ident);
}
-void launchQuery(const std::string& name, const ScheduledQuery& query) {
- LOG(INFO) << "Executing query: " << query.query;
- int unix_time = std::time(0);
+inline SQL monitor(const std::string& name, const ScheduledQuery& query) {
+ // Snapshot the performance and times for the worker before running.
+ auto pid = std::to_string(getpid());
+ auto r0 = SQL::selectAllFrom("processes", "pid", tables::EQUALS, pid);
+ auto t0 = time(nullptr);
auto sql = SQL(query.query);
+ // Snapshot the performance after, and compare.
+ auto t1 = time(nullptr);
+ auto r1 = SQL::selectAllFrom("processes", "pid", tables::EQUALS, pid);
+ if (r0.size() > 0 && r1.size() > 0) {
+ size_t size = 0;
+ for (const auto& row : sql.rows()) {
+ for (const auto& column : row) {
+ size += column.first.size();
+ size += column.second.size();
+ }
+ }
+ Config::recordQueryPerformance(name, t1 - t0, size, r0[0], r1[0]);
+ }
+ return sql;
+}
+
+void launchQuery(const std::string& name, const ScheduledQuery& query) {
+ // Execute the scheduled query and create a named query object.
+ VLOG(1) << "Executing query: " << query.query;
+ auto sql = (FLAGS_enable_monitor) ? monitor(name, query) : SQL(query.query);
+
if (!sql.ok()) {
LOG(ERROR) << "Error executing query (" << query.query
<< "): " << sql.getMessageString();
return;
}
+ // Fill in a host identifier fields based on configuration or availability.
+ std::string ident;
+ auto status = getHostIdentifier(ident);
+ if (!status.ok() || ident.empty()) {
+ ident = "<unknown>";
+ }
+
+ // A query log item contains an optional set of differential results or
+ // a copy of the most-recent execution alongside some query metadata.
+ QueryLogItem item;
+ item.name = name;
+ item.identifier = ident;
+ item.time = osquery::getUnixTime();
+ item.calendar_time = osquery::getAsciiTime();
+
+ if (query.options.count("snapshot") && query.options.at("snapshot")) {
+ // This is a snapshot query, emit results with a differential or state.
+ item.snapshot_results = std::move(sql.rows());
+ logSnapshotQuery(item);
+ return;
+ }
+
+ // Create a database-backed set of query results.
auto dbQuery = Query(name, query);
DiffResults diff_results;
- auto status = dbQuery.addNewResults(sql.rows(), diff_results, unix_time);
+ // Add this execution's set of results to the database-tracked named query.
+ // We can then ask for a differential from the last time this named query
+ // was executed by exact matching each row.
+ status = dbQuery.addNewResults(sql.rows(), diff_results);
if (!status.ok()) {
LOG(ERROR) << "Error adding new results to database: " << status.what();
return;
return;
}
- ScheduledQueryLogItem item;
- item.diffResults = diff_results;
- item.name = name;
-
- std::string ident;
- status = getHostIdentifier(ident);
- if (status.ok()) {
- item.hostIdentifier = ident;
- } else if (ident.empty()) {
- ident = "<unknown>";
- }
-
- item.unixTime = osquery::getUnixTime();
- item.calendarTime = osquery::getAsciiTime();
-
- VLOG(1) << "Found results for query " << name << " for host: " << ident;
- status = logScheduledQueryLogItem(item);
+ VLOG(1) << "Found results for query (" << name << ") for host: " << ident;
+ item.results = diff_results;
+ status = logQueryLogItem(item);
if (!status.ok()) {
- LOG(ERROR) << "Error logging the results of query \"" << query.query << "\""
- << ": " << status.toString();
+ LOG(ERROR) << "Error logging the results of query (" << query.query
+ << "): " << status.toString();
}
}
void SchedulerRunner::enter() {
- time_t t = time(0);
- struct tm* local = localtime(&t);
+ time_t t = std::time(nullptr);
+ struct tm* local = std::localtime(&t);
unsigned long int i = local->tm_sec;
for (; (timeout_ == 0) || (i <= timeout_); ++i) {
{
}
}
}
- // Put the thread into an interruptable sleep without a config instance.
+ // Put the thread into an interruptible sleep without a config instance.
osquery::interruptableSleep(interval_ * 1000);
}
}
}
Status startScheduler(unsigned long int timeout, size_t interval) {
- Dispatcher::getInstance().addService(
- std::make_shared<SchedulerRunner>(timeout, interval));
+ Dispatcher::addService(std::make_shared<SchedulerRunner>(timeout, interval));
return Status(0, "OK");
}
}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
class DispatcherTests : public testing::Test {};
TEST_F(DispatcherTests, test_singleton) {
- auto& one = Dispatcher::getInstance();
- auto& two = Dispatcher::getInstance();
+ auto& one = Dispatcher::instance();
+ auto& two = Dispatcher::instance();
EXPECT_EQ(one.getThreadManager().get(), two.getThreadManager().get());
}
};
TEST_F(DispatcherTests, test_add_work) {
- auto& dispatcher = Dispatcher::getInstance();
+ auto& dispatcher = Dispatcher::instance();
int base = 5;
int repetitions = 1;
EXPECT_EQ(i, base + repetitions);
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
ADD_OSQUERY_LIBRARY(TRUE osquery_distributed distributed.cpp)
-ADD_OSQUERY_TEST(FALSE osquery_distributed_tests distributed_tests.cpp)
+FILE(GLOB OSQUERY_DISTRIBUTED_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(FALSE ${OSQUERY_DISTRIBUTED_TESTS})
namespace osquery {
FLAG(int32,
- distributed_get_queries_retries,
+ distributed_retries,
3,
- "Times to retry retrieving distributed queries");
-
-FLAG(int32,
- distributed_write_results_retries,
- 3,
- "Times to retry writing distributed query results");
+ "Times to retry reading/writing distributed queries");
Status MockDistributedProvider::getQueriesJSON(std::string& query_json) {
query_json = queriesJSON_;
// Parse the JSON into a ptree
pt::ptree tree;
try {
- std::istringstream query_stream(query_json);
+ std::stringstream query_stream(query_json);
pt::read_json(query_stream, tree);
- }
- catch (const std::exception& e) {
+ } catch (const pt::json_parser::json_parser_error& e) {
return Status(1, std::string("Error loading query JSON: ") + e.what());
}
do {
status = provider_->getQueriesJSON(query_json);
++retries;
- } while (!status.ok() && retries <= FLAGS_distributed_get_queries_retries);
+ } while (!status.ok() && retries <= FLAGS_distributed_retries);
if (!status.ok()) {
return status;
}
do {
status = provider_->writeResultsJSON(json);
++retries;
- } while (!status.ok() && retries <= FLAGS_distributed_write_results_retries);
+ } while (!status.ok() && retries <= FLAGS_distributed_retries);
if (!status.ok()) {
return status;
}
}
TEST_F(DistributedTests, test_parse_query_json) {
- std::string request_json = R"([{"query": "foo", "id": "bar"}])";
+ std::string request_json = "[{\"query\": \"foo\", \"id\": \"bar\"}]";
std::vector<DistributedQueryRequest> requests;
Status s = DistributedQueryHandler::parseQueriesJSON(request_json, requests);
ASSERT_EQ(Status(), s);
EXPECT_EQ("foo", requests[0].query);
EXPECT_EQ("bar", requests[0].id);
- std::string bad_json = R"([{"query": "foo", "id": "bar"}, {"query": "b"}])";
+ std::string bad_json =
+ "[{\"query\": \"foo\", \"id\": \"bar\"}, {\"query\": \"b\"}]";
requests.clear();
s = DistributedQueryHandler::parseQueriesJSON(bad_json, requests);
ASSERT_FALSE(s.ok());
}
TEST_F(DistributedTests, test_handle_query) {
-// Access to the internal SQL implementation is only available in core.
+ // Access to the internal SQL implementation is only available in core.
SQL query = DistributedQueryHandler::handleQuery("SELECT hour from time");
ASSERT_TRUE(query.ok());
QueryData rows = query.rows();
TEST_F(DistributedTests, test_serialize_results_basic) {
DistributedQueryRequest r0("foo", "foo_id");
- QueryData rows0 = {{{"foo0", "foo0_val"}, {"bar0", "bar0_val"}},
- {{"foo1", "foo1_val"}, {"bar1", "bar1_val"}}, };
+ QueryData rows0 = {
+ {{"foo0", "foo0_val"}, {"bar0", "bar0_val"}},
+ {{"foo1", "foo1_val"}, {"bar1", "bar1_val"}},
+ };
MockSQL q0 = MockSQL(rows0);
pt::ptree tree;
TEST_F(DistributedTests, test_serialize_results_multiple) {
DistributedQueryRequest r0("foo", "foo_id");
- QueryData rows0 = {{{"foo0", "foo0_val"}, {"bar0", "bar0_val"}},
- {{"foo1", "foo1_val"}, {"bar1", "bar1_val"}}, };
+ QueryData rows0 = {
+ {{"foo0", "foo0_val"}, {"bar0", "bar0_val"}},
+ {{"foo1", "foo1_val"}, {"bar1", "bar1_val"}},
+ };
MockSQL q0 = MockSQL(rows0);
DistributedQueryRequest r1("bar", "bar_id");
}
TEST_F(DistributedTests, test_do_queries) {
-// Access to the internal SQL implementation is only available in core.
+ // Access to the internal SQL implementation is only available in core.
auto provider_raw = new MockDistributedProvider();
provider_raw->queriesJSON_ =
- R"([
- {"query": "SELECT hour FROM time", "id": "hour"},
- {"query": "bad", "id": "bad"},
- {"query": "SELECT minutes FROM time", "id": "minutes"}
- ])";
+ "[ \
+ {\"query\": \"SELECT hour FROM time\", \"id\": \"hour\"},\
+ {\"query\": \"bad\", \"id\": \"bad\"},\
+ {\"query\": \"SELECT minutes FROM time\", \"id\": \"minutes\"}\
+ ]";
std::unique_ptr<MockDistributedProvider>
provider(provider_raw);
DistributedQueryHandler handler(std::move(provider));
}
TEST_F(DistributedTests, test_duplicate_request) {
-// Access to the internal SQL implementation is only available in core.
+ // Access to the internal SQL implementation is only available in core.
auto provider_raw = new MockDistributedProvider();
provider_raw->queriesJSON_ =
- R"([
- {"query": "SELECT hour FROM time", "id": "hour"}
- ])";
+ "[{\"query\": \"SELECT hour FROM time\", \"id\": \"hour\"}]";
std::unique_ptr<MockDistributedProvider>
provider(provider_raw);
DistributedQueryHandler handler(std::move(provider));
EXPECT_EQ(0, tree.get<int>("results.hour.status"));
const pt::ptree& tree_rows = tree.get_child("results.hour.rows");
EXPECT_EQ(1, tree_rows.size());
+
auto row = tree_rows.begin();
EXPECT_GE(row->second.get<int>("hour"), 0);
EXPECT_LE(row->second.get<int>("hour"), 24);
EXPECT_EQ(0, tree.get_child("results").size());
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
ADD_OSQUERY_LIBRARY(FALSE osquery_events_linux linux/inotify.cpp
linux/udev.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_events_tests events_tests.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_events_database_tests events_database_tests.cpp)
-ADD_OSQUERY_TEST(FALSE osquery_inotify_tests linux/inotify_tests.cpp)
+FILE(GLOB OSQUERY_EVENTS_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(TRUE ${OSQUERY_EVENTS_TESTS})
+FILE(GLOB OSQUERY_LINUX_EVENTS_TESTS "linux/tests/*.cpp")
+ADD_OSQUERY_TEST(FALSE ${OSQUERY_LINUX_EVENTS_TESTS})
#include <boost/lexical_cast.hpp>
#include <osquery/core.h>
+#include <osquery/database.h>
#include <osquery/events.h>
#include <osquery/flags.h>
#include <osquery/logger.h>
}
// Set the optional string-verion of the time for DB columns.
- ec->time_string = boost::lexical_cast<std::string>(ec->time);
+ ec->time_string = std::to_string(ec->time);
}
for (const auto& subscription : subscriptions_) {
- fireCallback(subscription, ec);
+ auto es = EventFactory::getEventSubscriber(subscription->subscriber_name);
+ if (es->state() == SUBSCRIBER_RUNNING) {
+ fireCallback(subscription, ec);
+ }
}
}
}
if (specialized_sub == nullptr || specialized_sub.get() == nullptr) {
- return Status(0, "Invalid subscriber");
+ return Status(1, "Invalid subscriber");
}
// Let the module initialize any Subscriptions.
- specialized_sub->init();
+ auto status = specialized_sub->init();
auto& ef = EventFactory::getInstance();
- ef.event_subs_[specialized_sub->name()] = specialized_sub;
- return Status(0, "OK");
+ ef.event_subs_[specialized_sub->getName()] = specialized_sub;
+
+ // Set state of subscriber.
+ if (!status.ok()) {
+ specialized_sub->state(SUBSCRIBER_FAILED);
+ return Status(1, status.getMessage());
+ } else {
+ specialized_sub->state(SUBSCRIBER_RUNNING);
+ return Status(0, "OK");
+ }
}
Status EventFactory::addSubscription(EventPublisherID& type_id,
+ EventSubscriberID& name_id,
const SubscriptionContextRef& mc,
EventCallback cb,
void* user_data) {
- auto subscription = Subscription::create(mc, cb, user_data);
+ auto subscription = Subscription::create(name_id, mc, cb, user_data);
return EventFactory::addSubscription(type_id, subscription);
}
Status EventFactory::addSubscription(EventPublisherID& type_id,
const SubscriptionRef& subscription) {
- EventPublisherRef publisher;
- try {
- publisher = getInstance().getEventPublisher(type_id);
- } catch (std::out_of_range& e) {
- return Status(1, "No event type found");
+ EventPublisherRef publisher = getInstance().getEventPublisher(type_id);
+ if (publisher == nullptr) {
+ return Status(1, "Unknown event publisher");
}
// The event factory is responsible for configuring the event types.
EventPublisherRef EventFactory::getEventPublisher(EventPublisherID& type_id) {
if (getInstance().event_pubs_.count(type_id) == 0) {
LOG(ERROR) << "Requested unknown event publisher: " + type_id;
+ return nullptr;
}
return getInstance().event_pubs_.at(type_id);
}
EventSubscriberRef EventFactory::getEventSubscriber(
EventSubscriberID& name_id) {
- if (getInstance().event_subs_.count(name_id) == 0) {
+ if (!exists(name_id)) {
LOG(ERROR) << "Requested unknown event subscriber: " + name_id;
+ return nullptr;
}
return getInstance().event_subs_.at(name_id);
}
+bool EventFactory::exists(EventSubscriberID& name_id) {
+ return (getInstance().event_subs_.count(name_id) > 0);
+}
+
Status EventFactory::deregisterEventPublisher(const EventPublisherRef& pub) {
return EventFactory::deregisterEventPublisher(pub->type());
}
const auto& subscribers = Registry::all("event_subscriber");
for (const auto& subscriber : subscribers) {
- EventFactory::registerEventSubscriber(subscriber.second);
+ auto status = EventFactory::registerEventSubscriber(subscriber.second);
+ if (!status.ok()) {
+ LOG(ERROR) << "Error registering subscriber: " << status.getMessage();
+ }
}
}
}
#include <gtest/gtest.h>
+#include <osquery/database.h>
#include <osquery/events.h>
#include <osquery/tables.h>
// The most basic event publisher uses useless Subscription/Event.
class BasicEventPublisher
: public EventPublisher<SubscriptionContext, EventContext> {};
+
class AnotherBasicEventPublisher
: public EventPublisher<SubscriptionContext, EventContext> {};
// Make sure a subscription cannot be added for a non-existent event type.
// Note: It normally would not make sense to create a blank subscription.
- auto subscription = Subscription::create();
+ auto subscription = Subscription::create("FakeSubscriber");
auto status = EventFactory::addSubscription("FakePublisher", subscription);
EXPECT_FALSE(status.ok());
auto pub = std::make_shared<BasicEventPublisher>();
EventFactory::registerEventPublisher(pub);
- auto subscription = Subscription::create();
+ auto subscription = Subscription::create("subscriber");
status = EventFactory::addSubscription("publisher", subscription);
status = EventFactory::addSubscription("publisher", subscription);
sc->smallest = -1;
// Step 3, add the subscription to the event type
- status = EventFactory::addSubscription("TestPublisher", sc);
+ status = EventFactory::addSubscription("TestPublisher", "TestSubscriber", sc);
EXPECT_TRUE(status.ok());
EXPECT_EQ(pub->numSubscriptions(), 1);
}
class FakeEventSubscriber : public EventSubscriber<FakeEventPublisher> {
- DECLARE_SUBSCRIBER("FakeSubscriber");
-
public:
bool bellHathTolled;
bool contextBellHathTolled;
bool shouldFireBethHathTolled;
FakeEventSubscriber() {
+ setName("FakeSubscriber");
bellHathTolled = false;
contextBellHathTolled = false;
shouldFireBethHathTolled = false;
TEST_F(EventsTests, test_event_sub) {
auto sub = std::make_shared<FakeEventSubscriber>();
EXPECT_EQ(sub->type(), "FakePublisher");
- EXPECT_EQ(sub->name(), "FakeSubscriber");
+ EXPECT_EQ(sub->getName(), "FakeSubscriber");
}
TEST_F(EventsTests, test_event_sub_subscribe) {
auto pub = std::make_shared<BasicEventPublisher>();
status = EventFactory::registerEventPublisher(pub);
- auto subscription = Subscription::create();
+ auto sub = std::make_shared<FakeEventSubscriber>();
+ auto subscription = Subscription::create("FakeSubscriber");
subscription->callback = TestTheeCallback;
status = EventFactory::addSubscription("publisher", subscription);
pub->fire(ec, 0);
EXPECT_EQ(kBellHathTolled, 1);
- auto second_subscription = Subscription::create();
+ auto second_subscription = Subscription::create("FakeSubscriber");
status = EventFactory::addSubscription("publisher", second_subscription);
// Now there are two subscriptions (one sans callback).
EXPECT_EQ(kBellHathTolled, 4);
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- int status = RUN_ALL_TESTS();
- boost::filesystem::remove_all(osquery::kTestingEventsDBPath);
- return status;
-}
#include <linux/limits.h>
-#include <osquery/events.h>
#include <osquery/filesystem.h>
#include <osquery/logger.h>
inotify_handle_ = ::inotify_init();
// If this does not work throw an exception.
if (inotify_handle_ == -1) {
- return Status(1, "Could not init inotify.");
+ return Status(1, "Could not init inotify");
}
return Status(0, "OK");
}
return Status(1, "Overflow");
}
last_restart_ = getUnixTime();
- VLOG(1) << "Got an overflow, trying to restart...";
+ VLOG(1) << "inotify was overflown, attempting to restart handle";
for(const auto& desc : descriptors_){
removeMonitor(desc, 1);
}
if (!isPathMonitored(path)) {
int watch = ::inotify_add_watch(getHandle(), path.c_str(), IN_ALL_EVENTS);
if (watch == -1) {
- LOG(ERROR) << "Could not add inotfy watch on: " << path;
+ LOG(ERROR) << "Could not add inotify watch on: " << path;
return false;
}
return false;
}
- std::string path = descriptor_paths_[watch];
+ auto path = descriptor_paths_[watch];
return removeMonitor(path, force);
}
#include <sys/stat.h>
#include <osquery/events.h>
-#include <osquery/status.h>
namespace osquery {
std::string path;
/// A string action representing the event action `inotify` bit.
std::string action;
+ /// A no-op event transaction id.
+ uint32_t transaction_id;
+
+ INotifyEventContext() : event(nullptr), transaction_id(0) {}
};
typedef std::shared_ptr<INotifyEventContext> INotifyEventContextRef;
Status run();
- INotifyEventPublisher() : EventPublisher() { inotify_handle_ = -1; }
+ INotifyEventPublisher()
+ : EventPublisher(), inotify_handle_(-1), last_restart_(-1) {}
/// Check if the application-global `inotify` handle is alive.
bool isHandleOpen() { return inotify_handle_ > 0; }
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#include <boost/filesystem/path.hpp>
#include <boost/thread.hpp>
-#define GTEST_HAS_TR1_TUPLE 0
-
#include <gtest/gtest.h>
#include <osquery/events.h>
#include <osquery/tables.h>
#include "osquery/events/linux/inotify.h"
+#include "osquery/core/test_util.h"
namespace osquery {
-const std::string kRealTestPath = "/tmp/osquery-inotify-trigger";
-const std::string kRealTestDir = "/tmp/osquery-inotify-triggers";
-const std::string kRealTestDirPath = "/tmp/osquery-inotify-triggers/1";
-const std::string kRealTestSubDir = "/tmp/osquery-inotify-triggers/2";
-const std::string kRealTestSubDirPath = "/tmp/osquery-inotify-triggers/2/1";
+const std::string kRealTestPath = kTestWorkingDirectory + "inotify-trigger";
+const std::string kRealTestDir = kTestWorkingDirectory + "inotify-triggers";
+const std::string kRealTestDirPath = kRealTestDir + "/1";
+const std::string kRealTestSubDir = kRealTestDir + "/2";
+const std::string kRealTestSubDirPath = kRealTestSubDir + "/1";
int kMaxEventLatency = 3000;
mc->path = path;
mc->mask = mask;
- EventFactory::addSubscription("inotify", mc, ec);
+ EventFactory::addSubscription("inotify", "TestSubscriber", mc, ec);
}
bool WaitForEvents(int max, int num_events = 0) {
auto mc = std::make_shared<INotifySubscriptionContext>();
mc->path = "/this/path/is/fake";
- auto subscription = Subscription::create(mc);
+ auto subscription = Subscription::create("TestSubscriber", mc);
auto status = EventFactory::addSubscription("inotify", subscription);
EXPECT_TRUE(status.ok());
EventFactory::deregisterEventPublisher("inotify");
auto mc = std::make_shared<INotifySubscriptionContext>();
mc->path = "/";
- auto subscription = Subscription::create(mc);
+ auto subscription = Subscription::create("TestSubscriber", mc);
auto status = EventFactory::addSubscription("inotify", subscription);
EXPECT_TRUE(status.ok());
EventFactory::deregisterEventPublisher("inotify");
}
-TEST_F(INotifyTests, test_inotify_run) {
- // Assume event type is registered.
- event_pub_ = std::make_shared<INotifyEventPublisher>();
- auto status = EventFactory::registerEventPublisher(event_pub_);
- EXPECT_TRUE(status.ok());
-
- // Create a temporary file to watch, open writeable
- FILE* fd = fopen(kRealTestPath.c_str(), "w");
-
- // Create a subscriptioning context
- auto mc = std::make_shared<INotifySubscriptionContext>();
- mc->path = kRealTestPath;
- status = EventFactory::addSubscription("inotify", Subscription::create(mc));
- EXPECT_TRUE(status.ok());
-
- // Create an event loop thread (similar to main)
- boost::thread temp_thread(EventFactory::run, "inotify");
- EXPECT_TRUE(event_pub_->numEvents() == 0);
-
- // Cause an inotify event by writing to the watched path.
- TriggerEvent(kRealTestPath);
-
-// TBD: Prevous opened fd cannot caught event.
-// fputs("inotify", fd);
-// fclose(fd);
-
- // Wait for the thread's run loop to select.
- WaitForEvents(kMaxEventLatency);
- EXPECT_TRUE(event_pub_->numEvents() > 0);
- EventFactory::end();
- temp_thread.join();
-}
-
class TestINotifyEventSubscriber
: public EventSubscriber<INotifyEventPublisher> {
- DECLARE_SUBSCRIBER("TestINotifyEventSubscriber");
-
public:
- void init() { callback_count_ = 0; }
+ TestINotifyEventSubscriber() : callback_count_(0) {
+ setName("TestINotifyEventSubscriber");
+ }
+
+ Status init() {
+ callback_count_ = 0;
+ return Status(0, "OK");
+ }
+
Status SimpleCallback(const INotifyEventContextRef& ec,
const void* user_data) {
callback_count_ += 1;
std::vector<std::string> actions_;
};
+TEST_F(INotifyTests, test_inotify_run) {
+ // Assume event type is registered.
+ event_pub_ = std::make_shared<INotifyEventPublisher>();
+ auto status = EventFactory::registerEventPublisher(event_pub_);
+ EXPECT_TRUE(status.ok());
+
+ // Create a temporary file to watch, open writeable
+ FILE* fd = fopen(kRealTestPath.c_str(), "w");
+
+ // Create a subscriber.
+ auto sub = std::make_shared<TestINotifyEventSubscriber>();
+ EventFactory::registerEventSubscriber(sub);
+
+ // Create a subscriptioning context
+ auto mc = std::make_shared<INotifySubscriptionContext>();
+ mc->path = kRealTestPath;
+ status = EventFactory::addSubscription(
+ "inotify", Subscription::create("TestINotifyEventSubscriber", mc));
+ EXPECT_TRUE(status.ok());
+
+ // Create an event loop thread (similar to main)
+ boost::thread temp_thread(EventFactory::run, "inotify");
+ EXPECT_TRUE(event_pub_->numEvents() == 0);
+
+ // Cause an inotify event by writing to the watched path.
+ fputs("inotify", fd);
+ fclose(fd);
+
+ // Wait for the thread's run loop to select.
+ WaitForEvents(kMaxEventLatency);
+ EXPECT_TRUE(event_pub_->numEvents() > 0);
+ EventFactory::end();
+ temp_thread.join();
+}
+
TEST_F(INotifyTests, test_inotify_fire_event) {
// Assume event type is registered.
StartEventLoop();
sub->subscribe(&TestINotifyEventSubscriber::Callback, sc, nullptr);
TriggerEvent(kRealTestPath);
- sub->WaitForEvents(kMaxEventLatency, 1);
+ sub->WaitForEvents(kMaxEventLatency, 4);
// Make sure the inotify action was expected.
- EXPECT_TRUE(sub->actions().size() > 0);
+ EXPECT_EQ(sub->actions().size(), 4);
EXPECT_EQ(sub->actions()[0], "UPDATED");
-// EXPECT_EQ(sub->actions()[1], "OPENED");
-// EXPECT_EQ(sub->actions()[2], "UPDATED");
-// EXPECT_EQ(sub->actions()[3], "UPDATED");
+ EXPECT_EQ(sub->actions()[1], "OPENED");
+ EXPECT_EQ(sub->actions()[2], "UPDATED");
+ EXPECT_EQ(sub->actions()[3], "UPDATED");
StopEventLoop();
}
StopEventLoop();
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#include <boost/algorithm/string.hpp>
#include <boost/filesystem/operations.hpp>
-#define GTEST_HAS_TR1_TUPLE 0
-
#include <gtest/gtest.h>
+#include <osquery/database.h>
#include <osquery/events.h>
#include <osquery/tables.h>
// Setup a testing DB instance
DBHandle::getInstanceAtPath(kTestingEventsDBPath);
}
+
+ void TearDown() {
+ // Todo: each test set should operate on a clear working directory.
+ boost::filesystem::remove_all(osquery::kTestingEventsDBPath);
+ }
};
class FakeEventPublisher
};
class FakeEventSubscriber : public EventSubscriber<FakeEventPublisher> {
- DECLARE_SUBSCRIBER("FakeSubscriber");
-
public:
+ FakeEventSubscriber() { setName("FakeSubscriber"); }
/// Add a fake event at time t
Status testAdd(int t) {
Row r;
TEST_F(EventsDatabaseTests, test_event_module_id) {
auto sub = std::make_shared<FakeEventSubscriber>();
sub->doNotExpire();
-
+
// Not normally available outside of EventSubscriber->Add().
auto event_id1 = sub->getEventID();
EXPECT_EQ(event_id1, "1");
EXPECT_EQ(event_id2, "2");
}
-
TEST_F(EventsDatabaseTests, test_event_add) {
auto sub = std::make_shared<FakeEventSubscriber>();
auto status = sub->testAdd(1);
EXPECT_EQ(records.size(), 1); // 11
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- int status = RUN_ALL_TESTS();
- boost::filesystem::remove_all(osquery::kTestingEventsDBPath);
- return status;
-}
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <typeinfo>
+
+#include <boost/filesystem/operations.hpp>
+
+#include <gtest/gtest.h>
+
+#include <osquery/database.h>
+#include <osquery/events.h>
+#include <osquery/tables.h>
+
+namespace osquery {
+
+const std::string kTestingEventsDBPath = "/tmp/rocksdb-osquery-testevents";
+
+class EventsTests : public ::testing::Test {
+ public:
+ void SetUp() {
+ // Setup a testing DB instance
+ DBHandle::getInstanceAtPath(kTestingEventsDBPath);
+ }
+
+ void TearDown() {
+ EventFactory::end();
+ boost::filesystem::remove_all(osquery::kTestingEventsDBPath);
+ }
+};
+
+// The most basic event publisher uses useless Subscription/Event.
+class BasicEventPublisher
+ : public EventPublisher<SubscriptionContext, EventContext> {};
+
+class AnotherBasicEventPublisher
+ : public EventPublisher<SubscriptionContext, EventContext> {};
+
+// Create some semi-useless subscription and event structures.
+struct FakeSubscriptionContext : SubscriptionContext {
+ int require_this_value;
+};
+struct FakeEventContext : EventContext {
+ int required_value;
+};
+
+// Typdef the shared_ptr accessors.
+typedef std::shared_ptr<FakeSubscriptionContext> FakeSubscriptionContextRef;
+typedef std::shared_ptr<FakeEventContext> FakeEventContextRef;
+
+// Now a publisher with a type.
+class FakeEventPublisher
+ : public EventPublisher<FakeSubscriptionContext, FakeEventContext> {
+ DECLARE_PUBLISHER("FakePublisher");
+};
+
+class AnotherFakeEventPublisher
+ : public EventPublisher<FakeSubscriptionContext, FakeEventContext> {
+ DECLARE_PUBLISHER("AnotherFakePublisher");
+};
+
+TEST_F(EventsTests, test_event_pub) {
+ auto pub = std::make_shared<FakeEventPublisher>();
+ EXPECT_EQ(pub->type(), "FakePublisher");
+
+ // Test type names.
+ auto pub_sub = pub->createSubscriptionContext();
+ EXPECT_EQ(typeid(FakeSubscriptionContext), typeid(*pub_sub));
+}
+
+TEST_F(EventsTests, test_register_event_pub) {
+ auto basic_pub = std::make_shared<BasicEventPublisher>();
+ auto status = EventFactory::registerEventPublisher(basic_pub);
+
+ // This class is the SAME, there was no type override.
+ auto another_basic_pub = std::make_shared<AnotherBasicEventPublisher>();
+ status = EventFactory::registerEventPublisher(another_basic_pub);
+ EXPECT_FALSE(status.ok());
+
+ // This class is different but also uses different types!
+ auto fake_pub = std::make_shared<FakeEventPublisher>();
+ status = EventFactory::registerEventPublisher(fake_pub);
+ EXPECT_TRUE(status.ok());
+
+ // May also register the event_pub instance
+ auto another_fake_pub = std::make_shared<AnotherFakeEventPublisher>();
+ status = EventFactory::registerEventPublisher(another_fake_pub);
+ EXPECT_TRUE(status.ok());
+}
+
+TEST_F(EventsTests, test_event_pub_types) {
+ auto pub = std::make_shared<FakeEventPublisher>();
+ EXPECT_EQ(pub->type(), "FakePublisher");
+
+ EventFactory::registerEventPublisher(pub);
+ auto pub2 = EventFactory::getEventPublisher("FakePublisher");
+ EXPECT_EQ(pub->type(), pub2->type());
+}
+
+TEST_F(EventsTests, test_create_event_pub) {
+ auto pub = std::make_shared<BasicEventPublisher>();
+ auto status = EventFactory::registerEventPublisher(pub);
+ EXPECT_TRUE(status.ok());
+
+ // Make sure only the first event type was recorded.
+ EXPECT_EQ(EventFactory::numEventPublishers(), 1);
+}
+
+class UniqueEventPublisher
+ : public EventPublisher<FakeSubscriptionContext, FakeEventContext> {
+ DECLARE_PUBLISHER("unique");
+};
+
+TEST_F(EventsTests, test_create_using_registry) {
+ // The events API uses attachEvents to move registry event publishers and
+ // subscribers into the events factory.
+ EXPECT_EQ(EventFactory::numEventPublishers(), 0);
+ attachEvents();
+
+ // Store the number of default event publishers (in core).
+ int default_publisher_count = EventFactory::numEventPublishers();
+
+ // Now add another registry item, but do not yet attach it.
+ auto UniqueEventPublisherRegistryItem =
+ Registry::add<UniqueEventPublisher>("event_publisher", "unique");
+ EXPECT_EQ(EventFactory::numEventPublishers(), default_publisher_count);
+
+ // Now attach and make sure it was added.
+ attachEvents();
+ EXPECT_EQ(EventFactory::numEventPublishers(), default_publisher_count + 1);
+}
+
+TEST_F(EventsTests, test_create_subscription) {
+ auto pub = std::make_shared<BasicEventPublisher>();
+ EventFactory::registerEventPublisher(pub);
+
+ // Make sure a subscription cannot be added for a non-existent event type.
+ // Note: It normally would not make sense to create a blank subscription.
+ auto subscription = Subscription::create("FakeSubscriber");
+ auto status = EventFactory::addSubscription("FakePublisher", subscription);
+ EXPECT_FALSE(status.ok());
+
+ // In this case we can still add a blank subscription to an existing event
+ // type.
+ status = EventFactory::addSubscription("publisher", subscription);
+ EXPECT_TRUE(status.ok());
+
+ // Make sure the subscription is added.
+ EXPECT_EQ(EventFactory::numSubscriptions("publisher"), 1);
+}
+
+TEST_F(EventsTests, test_multiple_subscriptions) {
+ Status status;
+
+ auto pub = std::make_shared<BasicEventPublisher>();
+ EventFactory::registerEventPublisher(pub);
+
+ auto subscription = Subscription::create("subscriber");
+ status = EventFactory::addSubscription("publisher", subscription);
+ status = EventFactory::addSubscription("publisher", subscription);
+
+ EXPECT_EQ(EventFactory::numSubscriptions("publisher"), 2);
+}
+
+struct TestSubscriptionContext : public SubscriptionContext {
+ int smallest;
+};
+
+class TestEventPublisher
+ : public EventPublisher<TestSubscriptionContext, EventContext> {
+ DECLARE_PUBLISHER("TestPublisher");
+
+ public:
+ Status setUp() {
+ smallest_ever_ += 1;
+ return Status(0, "OK");
+ }
+
+ void configure() {
+ int smallest_subscription = smallest_ever_;
+
+ configure_run = true;
+ for (const auto& subscription : subscriptions_) {
+ auto subscription_context = getSubscriptionContext(subscription->context);
+ if (smallest_subscription > subscription_context->smallest) {
+ smallest_subscription = subscription_context->smallest;
+ }
+ }
+
+ smallest_ever_ = smallest_subscription;
+ }
+
+ void tearDown() { smallest_ever_ += 1; }
+
+ TestEventPublisher() : EventPublisher() {
+ smallest_ever_ = 0;
+ configure_run = false;
+ }
+
+ // Custom methods do not make sense, but for testing it exists.
+ int getTestValue() { return smallest_ever_; }
+
+ public:
+ bool configure_run;
+
+ private:
+ int smallest_ever_;
+};
+
+TEST_F(EventsTests, test_create_custom_event_pub) {
+ auto basic_pub = std::make_shared<BasicEventPublisher>();
+ EventFactory::registerEventPublisher(basic_pub);
+ auto pub = std::make_shared<TestEventPublisher>();
+ auto status = EventFactory::registerEventPublisher(pub);
+
+ // These event types have unique event type IDs
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(EventFactory::numEventPublishers(), 2);
+
+ // Make sure the setUp function was called.
+ EXPECT_EQ(pub->getTestValue(), 1);
+}
+
+TEST_F(EventsTests, test_custom_subscription) {
+ // Step 1, register event type
+ auto pub = std::make_shared<TestEventPublisher>();
+ auto status = EventFactory::registerEventPublisher(pub);
+
+ // Step 2, create and configure a subscription context
+ auto sc = std::make_shared<TestSubscriptionContext>();
+ sc->smallest = -1;
+
+ // Step 3, add the subscription to the event type
+ status = EventFactory::addSubscription("TestPublisher", "TestSubscriber", sc);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(pub->numSubscriptions(), 1);
+
+ // The event type must run configure for each added subscription.
+ EXPECT_TRUE(pub->configure_run);
+ EXPECT_EQ(pub->getTestValue(), -1);
+}
+
+TEST_F(EventsTests, test_tear_down) {
+ auto pub = std::make_shared<TestEventPublisher>();
+ auto status = EventFactory::registerEventPublisher(pub);
+
+ // Make sure set up incremented the test value.
+ EXPECT_EQ(pub->getTestValue(), 1);
+
+ status = EventFactory::deregisterEventPublisher("TestPublisher");
+ EXPECT_TRUE(status.ok());
+
+ // Make sure tear down inremented the test value.
+ EXPECT_EQ(pub->getTestValue(), 2);
+
+ // Once more, now deregistering all event types.
+ status = EventFactory::registerEventPublisher(pub);
+ EXPECT_EQ(pub->getTestValue(), 3);
+ EventFactory::end();
+ EXPECT_EQ(pub->getTestValue(), 4);
+
+ // Make sure the factory state represented.
+ EXPECT_EQ(EventFactory::numEventPublishers(), 0);
+}
+
+static int kBellHathTolled = 0;
+
+Status TestTheeCallback(EventContextRef context, const void* user_data) {
+ kBellHathTolled += 1;
+ return Status(0, "OK");
+}
+
+class FakeEventSubscriber : public EventSubscriber<FakeEventPublisher> {
+ public:
+ bool bellHathTolled;
+ bool contextBellHathTolled;
+ bool shouldFireBethHathTolled;
+
+ FakeEventSubscriber() {
+ setName("FakeSubscriber");
+ bellHathTolled = false;
+ contextBellHathTolled = false;
+ shouldFireBethHathTolled = false;
+ }
+
+ Status Callback(const EventContextRef& ec, const void* user_data) {
+ // We don't care about the subscription or the event contexts.
+ bellHathTolled = true;
+ return Status(0, "OK");
+ }
+
+ Status SpecialCallback(const FakeEventContextRef& ec, const void* user_data) {
+ // Now we care that the event context is corrected passed.
+ if (ec->required_value == 42) {
+ contextBellHathTolled = true;
+ }
+ return Status(0, "OK");
+ }
+
+ void lateInit() {
+ auto sub_ctx = createSubscriptionContext();
+ subscribe(&FakeEventSubscriber::Callback, sub_ctx, nullptr);
+ }
+
+ void laterInit() {
+ auto sub_ctx = createSubscriptionContext();
+ sub_ctx->require_this_value = 42;
+ subscribe(&FakeEventSubscriber::SpecialCallback, sub_ctx, nullptr);
+ }
+};
+
+TEST_F(EventsTests, test_event_sub) {
+ auto sub = std::make_shared<FakeEventSubscriber>();
+ EXPECT_EQ(sub->type(), "FakePublisher");
+ EXPECT_EQ(sub->getName(), "FakeSubscriber");
+}
+
+TEST_F(EventsTests, test_event_sub_subscribe) {
+ auto pub = std::make_shared<FakeEventPublisher>();
+ EventFactory::registerEventPublisher(pub);
+
+ auto sub = std::make_shared<FakeEventSubscriber>();
+ EventFactory::registerEventSubscriber(sub);
+
+ // Don't overload the normal `init` Subscription member.
+ sub->lateInit();
+ EXPECT_EQ(pub->numSubscriptions(), 1);
+
+ auto ec = pub->createEventContext();
+ pub->fire(ec, 0);
+
+ EXPECT_TRUE(sub->bellHathTolled);
+}
+
+TEST_F(EventsTests, test_event_sub_context) {
+ auto pub = std::make_shared<FakeEventPublisher>();
+ EventFactory::registerEventPublisher(pub);
+
+ auto sub = std::make_shared<FakeEventSubscriber>();
+ EventFactory::registerEventSubscriber(sub);
+
+ sub->laterInit();
+ auto ec = pub->createEventContext();
+ ec->required_value = 42;
+ pub->fire(ec, 0);
+
+ EXPECT_TRUE(sub->contextBellHathTolled);
+}
+
+TEST_F(EventsTests, test_fire_event) {
+ Status status;
+
+ auto pub = std::make_shared<BasicEventPublisher>();
+ status = EventFactory::registerEventPublisher(pub);
+
+ auto sub = std::make_shared<FakeEventSubscriber>();
+ auto subscription = Subscription::create("FakeSubscriber");
+ subscription->callback = TestTheeCallback;
+ status = EventFactory::addSubscription("publisher", subscription);
+
+ // The event context creation would normally happen in the event type.
+ auto ec = pub->createEventContext();
+ pub->fire(ec, 0);
+ EXPECT_EQ(kBellHathTolled, 1);
+
+ auto second_subscription = Subscription::create("FakeSubscriber");
+ status = EventFactory::addSubscription("publisher", second_subscription);
+
+ // Now there are two subscriptions (one sans callback).
+ pub->fire(ec, 0);
+ EXPECT_EQ(kBellHathTolled, 2);
+
+ // Now both subscriptions have callbacks.
+ second_subscription->callback = TestTheeCallback;
+ pub->fire(ec, 0);
+ EXPECT_EQ(kBellHathTolled, 4);
+}
+}
class ExampleConfigPlugin : public ConfigPlugin {
public:
+ Status setUp() {
+ LOG(WARNING) << "ExampleConfigPlugin setting up.";
+ return Status(0, "OK");
+ }
+
Status genConfig(std::map<std::string, std::string>& config) {
config["data"] = "{\"options\": [], \"scheduledQueries\": []}";
return Status(0, "OK");
extensions.cpp
interface.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_extensions_test extensions_tests.cpp)
+FILE(GLOB OSQUERY_EXTENSIONS_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(TRUE ${OSQUERY_EXTENSIONS_TESTS})
"/etc/osquery/modules.load",
"Optional path to a list of autoloaded registry modules")
-/// Alias the extensions_socket (used by core) to an alternate name reserved
-/// for extension binaries
+/**
+ * @brief Alias the extensions_socket (used by core) to a simple 'socket'.
+ *
+ * Extension binaries will more commonly set the path to an extension manager
+ * socket. Alias the long switch name to 'socket' for an easier UX.
+ *
+ * We include timeout and interval, where the 'extensions_' prefix is removed
+ * in the alias since we are already within the context of an extension.
+ */
EXTENSION_FLAG_ALIAS(socket, extensions_socket);
-
-/// An extension manager may not be immediately available.
EXTENSION_FLAG_ALIAS(timeout, extensions_timeout);
EXTENSION_FLAG_ALIAS(interval, extensions_interval);
+
void ExtensionWatcher::enter() {
// Watch the manager, if the socket is removed then the extension will die.
while (true) {
Registry::setUp();
// Start the extension's Thrift server
- Dispatcher::getInstance().addService(
+ Dispatcher::addService(
std::make_shared<ExtensionRunner>(manager_path, ext_status.uuid));
VLOG(1) << "Extension (" << name << ", " << ext_status.uuid << ", " << version
<< ", " << sdk_version << ") registered";
}
// Start a extension manager watcher, if the manager dies, so should we.
- Dispatcher::getInstance().addService(
+ Dispatcher::addService(
std::make_shared<ExtensionWatcher>(manager_path, interval, fatal));
return Status(0, "OK");
}
auto latency = atoi(FLAGS_extensions_interval.c_str()) * 1000;
// Start a extension manager watcher, if the manager dies, so should we.
- Dispatcher::getInstance().addService(
+ Dispatcher::addService(
std::make_shared<ExtensionManagerWatcher>(manager_path, latency));
// Start the extension manager thread.
- Dispatcher::getInstance().addService(
+ Dispatcher::addService(
std::make_shared<ExtensionManagerRunner>(manager_path));
return Status(0, "OK");
}
plugin_request[request_item.first] = request_item.second;
}
- auto status = Registry::call(registry, local_item, request, response);
+ auto status = Registry::call(registry, local_item, plugin_request, response);
_return.status.code = status.getCode();
_return.status.message = status.getMessage();
_return.status.uuid = uuid_;
#error "Required -DOSQUERY_THRIFT=/path/to/thrift/gen-cpp"
#endif
+namespace osquery {
+namespace extensions {
+
using namespace apache::thrift;
using namespace apache::thrift::protocol;
using namespace apache::thrift::transport;
using namespace apache::thrift::server;
using namespace apache::thrift::concurrency;
-namespace osquery {
-namespace extensions {
-
/**
* @brief The Thrift API server used by an osquery Extension process.
*
/**
* @brief Request an Extension removal and removal of Registry routes.
*
- * When an Extension process is gracefull killed it should deregister.
- * Other priviledged tools may choose to deregister an Extension by
+ * When an Extension process is graceful killed it should deregister.
+ * Other privileged tools may choose to deregister an Extension by
* the transient Extension's Route UUID, obtained using
* ExtensionManagerHandler::extensions.
*
/// removed.
void refresh();
- /// Maintain a map of extension UUID to metadata for tracking deregistrations.
+ /// Maintain a map of extension UUID to metadata for tracking deregistration.
InternalExtensionList extensions_;
};
}
class ExtensionRunner : public InternalRunnable {
public:
virtual ~ExtensionRunner();
- ExtensionRunner(const std::string& manager_path, RouteUUID uuid) {
+ ExtensionRunner(const std::string& manager_path, RouteUUID uuid)
+ : uuid_(uuid) {
path_ = getExtensionSocket(uuid, manager_path);
- uuid_ = uuid;
}
public:
class ExtensionManagerRunner : public InternalRunnable {
public:
virtual ~ExtensionManagerRunner();
- explicit ExtensionManagerRunner(const std::string& manager_path) {
- path_ = manager_path;
- }
+ explicit ExtensionManagerRunner(const std::string& manager_path)
+ : path_(manager_path) {}
public:
void enter();
class EXInternal {
public:
explicit EXInternal(const std::string& path)
- : socket_(new TSocket(path)),
- transport_(new TBufferedTransport(socket_)),
- protocol_(new TBinaryProtocol(transport_)) {}
+ : socket_(new extensions::TSocket(path)),
+ transport_(new extensions::TBufferedTransport(socket_)),
+ protocol_(new extensions::TBinaryProtocol(transport_)) {}
virtual ~EXInternal() { transport_->close(); }
protected:
- OSQUERY_THRIFT_POINTER::shared_ptr<TSocket> socket_;
- OSQUERY_THRIFT_POINTER::shared_ptr<TTransport> transport_;
- OSQUERY_THRIFT_POINTER::shared_ptr<TProtocol> protocol_;
+ OSQUERY_THRIFT_POINTER::shared_ptr<extensions::TSocket> socket_;
+ OSQUERY_THRIFT_POINTER::shared_ptr<extensions::TTransport> transport_;
+ OSQUERY_THRIFT_POINTER::shared_ptr<extensions::TProtocol> protocol_;
};
/// Internal accessor for a client to an extension (from an extension manager).
const int kDelayUS = 200;
const int kTimeoutUS = 10000;
-const std::string kTestManagerSocket = "/tmp/osquery-em.socket";
+const std::string kTestManagerSocket = kTestWorkingDirectory + "test.em";
class ExtensionsTest : public testing::Test {
protected:
}
void TearDown() {
- Dispatcher::getInstance().removeServices();
+ Dispatcher::removeServices();
Dispatcher::joinServices();
remove(kTestManagerSocket);
}
// Now allow duplicates (for testing, since EM/E are the same).
Registry::allowDuplicates(true);
status = startExtension(kTestManagerSocket, "test", "0.1", "0.0.0", "0.0.1");
- // This will be false since we are registering duplicate items
+ // This will not be false since we are allowing deplicate items.
+ // Otherwise, starting an extension and extensionManager would fatal.
ASSERT_TRUE(status.ok());
// The `startExtension` internal call (exposed for testing) returns the
RouteUUID uuid;
try {
uuid = (RouteUUID)stoi(status.getMessage(), nullptr, 0);
- }
- catch (const std::exception& e) {
+ } catch (const std::exception& e) {
EXPECT_TRUE(false);
return;
}
tearDownMockFileStructure();
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
-ADD_OSQUERY_LIBRARY(TRUE osquery_filesystem filesystem.cpp)
+ADD_OSQUERY_LIBRARY(TRUE osquery_filesystem filesystem.cpp
+ globbing.cpp)
ADD_OSQUERY_LIBRARY(TRUE osquery_filesystem_linux linux/proc.cpp
linux/mem.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_filesystem_tests filesystem_tests.cpp)
+FILE(GLOB OSQUERY_FILESYSTEM_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(TRUE ${OSQUERY_FILESYSTEM_TESTS})
#include <pwd.h>
#include <sys/stat.h>
-#include <boost/algorithm/string/join.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/filesystem/operations.hpp>
-#include <boost/filesystem/path.hpp>
-#include <boost/property_tree/ptree.hpp>
-#include <boost/property_tree/xml_parser.hpp>
+#include <boost/property_tree/json_parser.hpp>
#include <osquery/core.h>
#include <osquery/filesystem.h>
// A tri-state determination of presence
try {
- if (!boost::filesystem::exists(path)) {
+ if (!fs::exists(path)) {
return Status(1, "0");
}
} catch (const fs::filesystem_error& e) {
return Status(0, "OK");
}
-/**
- * @brief Drill down recursively and list all sub files
- *
- * This functions purpose is to take a path with no wildcards
- * and it will recursively go through all files and and return
- * them in the results vector.
- *
- * @param fs_path The entire resolved path
- * @param results The vector where results will be returned
- * @param rec_depth How many recursions deep the current execution is at
- *
- * @return An instance of osquery::Status indicating the success of failure of
- * the operation
- */
-Status doubleStarTraversal(const fs::path& fs_path,
- std::vector<std::string>& results,
- ReturnSetting setting,
- unsigned int rec_depth) {
- if (rec_depth >= kMaxDirectoryTraversalDepth) {
- return Status(2, fs_path.string().c_str());
- }
- // List files first
- if (setting & REC_LIST_FILES) {
- Status stat = listFilesInDirectory(fs_path, results);
- if (!stat.ok()) {
- return Status(0, "OK");
- }
- }
- std::vector<std::string> folders;
- Status stat = listDirectoriesInDirectory(fs_path, folders);
- if (!stat.ok()) {
- return Status(0, "OK");
- }
- if (setting & REC_LIST_FOLDERS) {
- results.push_back(fs_path.string());
- }
- for (const auto& folder : folders) {
- if (fs::is_symlink(folder)) {
- continue;
- }
-
- stat = doubleStarTraversal(folder, results, setting, rec_depth + 1);
- if (!stat.ok() && stat.getCode() == 2) {
- return stat;
- }
- }
- return Status(0, "OK");
-}
-
-/**
- * @brief Resolve the last component of a file path
- *
- * This function exists because unlike the other parts of of a file
- * path, which should only resolve to folder, a wildcard at the end
- * means to list all files in that directory, as does just listing
- * folder. Also, a double means to drill down recursively into that
- * that folder and list all sub file.
- *
- * @param fs_path The entire resolved path (except last component)
- * @param results The vector where results will be returned
- * @param components A path, split by forward slashes
- * @param rec_depth How many recursions deep the current execution is at
- *
- * @return An instance of osquery::Status indicating the success of failure of
- * the operation
- */
-Status resolveLastPathComponent(const fs::path& fs_path,
- std::vector<std::string>& results,
- ReturnSetting setting,
- const std::vector<std::string>& components,
- unsigned int rec_depth) {
-
- // Is the last component a double star?
- if (components[components.size() - 1] == kWildcardCharacterRecursive) {
- if (setting & REC_EVENT_OPT) {
- results.push_back(fs_path.parent_path().string());
- return Status(0, "OK");
- } else {
- Status stat = doubleStarTraversal(
- fs_path.parent_path(), results, setting, rec_depth);
- return stat;
- }
- }
-
- try {
- // Is the path a file
- if (setting == REC_LIST_FILES && fs::is_regular_file(fs_path)) {
- results.push_back(fs_path.string());
- return Status(0, "OK");
- }
- } catch (const fs::filesystem_error& e) {
- return Status(0, "OK");
- }
-
- std::vector<std::string> files;
- std::vector<std::string> folders;
- Status stat_file = listFilesInDirectory(fs_path.parent_path(), files);
- Status stat_fold = listDirectoriesInDirectory(fs_path.parent_path(), folders);
-
- // Is the last component a wildcard?
- if (components[components.size() - 1] == kWildcardCharacter) {
-
- if (setting & REC_EVENT_OPT) {
- results.push_back(fs_path.parent_path().string());
- return Status(0, "OK");
- }
- if (setting & REC_LIST_FOLDERS) {
- results.push_back(fs_path.parent_path().string());
- for (const auto& fold : folders) {
- results.push_back(fold);
- }
- }
- if (setting & REC_LIST_FILES) {
- for (const auto& file : files) {
- results.push_back(file);
- }
- }
- return Status(0, "OK");
- }
-
- std::string processed_path =
- "/" +
- boost::algorithm::join(
- std::vector<std::string>(components.begin(), components.end() - 1),
- "/");
-
- // Is this a (.*)% type file match
- if (components[components.size() - 1].find(kWildcardCharacter, 1) !=
- std::string::npos &&
- components[components.size() - 1][0] != kWildcardCharacter[0]) {
-
- std::string prefix =
- processed_path + "/" +
- components[components.size() - 1].substr(
- 0, components[components.size() - 1].find(kWildcardCharacter, 1));
- if (setting & REC_LIST_FOLDERS) {
- for (const auto& fold : folders) {
- if (fold.find(prefix, 0) != 0) {
- continue;
- }
- results.push_back(fold);
- }
- }
- if (setting & REC_LIST_FILES || setting & REC_EVENT_OPT) {
- for (const auto& file : files) {
- if (file.find(prefix, 0) != 0) {
- continue;
- }
- results.push_back(file);
- }
- }
- // Should be a return here?
- return Status(0, "OK");
- }
-
- // Is this a %(.*) type file match
- if (components[components.size() - 1][0] == kWildcardCharacter[0]) {
- std::string suffix = components[components.size() - 1].substr(1);
- if (setting & REC_LIST_FOLDERS) {
- for (const auto& fold : folders) {
- std::string file_name =
- boost::filesystem::path(fold).filename().string();
- size_t pos = file_name.find(suffix);
- if (pos != std::string::npos &&
- pos + suffix.length() == file_name.length()) {
- results.push_back(fold);
- }
- }
- }
- if (setting & REC_LIST_FILES || setting & REC_EVENT_OPT) {
- for (const auto& file : files) {
- boost::filesystem::path p(file);
- std::string file_name = p.filename().string();
- size_t pos = file_name.find(suffix);
- if (pos != std::string::npos &&
- pos + suffix.length() == file_name.length()) {
- results.push_back(file);
- }
- }
- }
- return Status(0, "OK");
- }
-
- // Back out if this path doesn't exist due to invalid path
- if (!(pathExists(fs_path).ok())) {
- return Status(0, "OK");
- }
-
- // Is the path a directory
- if (fs::is_directory(fs_path)) {
- results.push_back(fs_path.string());
- return Status(0, "OK");
- }
-
- return Status(1, "UNKNOWN FILE TYPE");
-}
-
-/**
- * @brief List all files in a directory recursively
- *
- * This is an overloaded version of the exported `resolveFilePattern`. This
- * version is used internally to facilitate the tracking of the recursion
- * depth.
- *
- * @param results The vector where results will be returned
- * @param components A path, split by forward slashes
- * @param processed_index What index of components has been resolved so far
- * @param rec_depth How many recursions deep the current execution is at
- *
- * @return An instance of osquery::Status indicating the success of failure of
- * the operation
- */
-Status resolveFilePattern(std::vector<std::string> components,
- std::vector<std::string>& results,
- ReturnSetting setting = REC_LIST_FILES,
- unsigned int processed_index = 0,
- unsigned int rec_depth = 0) {
-
- // Stop recursing here if we've reached out max depth
- if (rec_depth >= kMaxDirectoryTraversalDepth) {
- return Status(2, "MAX_DEPTH");
- }
-
- // Handle all parts of the path except last because then we want to get files,
- // not directories
- for (auto i = processed_index; i < components.size() - 1; i++) {
-
- // If we encounter a full recursion, that is invalid because it is not
- // the last component. So return.
- if (components[i] == kWildcardCharacterRecursive) {
- return Status(1, kWildcardCharacterRecursive + " NOT LAST COMPONENT");
- }
-
- // Create a vector to hold all the folders in the current folder
- // Build the path we're at out of components
- std::vector<std::string> folders;
-
- std::string processed_path =
- "/" +
- boost::algorithm::join(std::vector<std::string>(components.begin(),
- components.begin() + i),
- "/");
- Status stat = listDirectoriesInDirectory(processed_path, folders);
- // If we couldn't list the directories it's probably because
- // the path is invalid (or we don't have permission). Return
- // here because this branch is no good. This is not an error
- if (!stat.ok()) {
- return Status(0, "OK");
- }
- // If we just have a wildcard character then we will recurse though
- // all folders we find
- if (components[i] == kWildcardCharacter) {
- for (const auto& dir : folders) {
- boost::filesystem::path p(dir);
- components[i] = p.filename().string();
- Status stat = resolveFilePattern(
- components, results, setting, i + 1, rec_depth + 1);
- if (!stat.ok() && stat.getCode() == 2) {
- return stat;
- }
- }
- // Our subcalls that handle processing are now complete, return
- return Status(0, "OK");
-
- // The case of (.*)%
- } else if (components[i].find(kWildcardCharacter, 1) != std::string::npos &&
- components[i][0] != kWildcardCharacter[0]) {
- std::string prefix =
- processed_path + "/" +
- components[i].substr(0, components[i].find(kWildcardCharacter, 1));
- for (const auto& dir : folders) {
- if (dir.find(prefix, 0) != 0) {
- continue;
- }
- boost::filesystem::path p(dir);
- components[i] = p.filename().string();
- Status stat = resolveFilePattern(
- components, results, setting, i + 1, rec_depth + 1);
- if (!stat.ok() && stat.getCode() == 2) {
- return stat;
- }
- }
- return Status(0, "OK");
- // The case of %(.*)
- } else if (components[i][0] == kWildcardCharacter[0]) {
- std::string suffix = components[i].substr(1);
- for (const auto& dir : folders) {
- boost::filesystem::path p(dir);
- std::string folder_name = p.filename().string();
- size_t pos = folder_name.find(suffix);
- if (pos != std::string::npos &&
- pos + suffix.length() == folder_name.length()) {
- components[i] = p.filename().string();
- Status stat = resolveFilePattern(
- components, results, setting, i + 1, rec_depth + 1);
- if (!stat.ok() && stat.getCode() == 2) {
- return stat;
- }
- }
- }
- return Status(0, "OK");
- } else {
- }
- }
-
- // At this point, all of our call paths have been resolved, so know we want to
- // list the files at this point or do our ** traversal
- return resolveLastPathComponent("/" + boost::algorithm::join(components, "/"),
- results,
- setting,
- components,
- rec_depth);
-}
-
-Status resolveFilePattern(const fs::path& fs_path,
- std::vector<std::string>& results) {
- if (fs_path.string()[0] != '/') {
- return resolveFilePattern(
- split(fs::current_path().string() + "/" + fs_path.string(), "/"),
- results);
- }
- return resolveFilePattern(split(fs_path.string(), "/"), results);
-}
-
-Status resolveFilePattern(const fs::path& fs_path,
- std::vector<std::string>& results,
- ReturnSetting setting) {
- if (fs_path.string()[0] != '/') {
- return resolveFilePattern(
- split(fs::current_path().string() + "/" + fs_path.string(), "/"),
- results,
- setting);
- }
- return resolveFilePattern(split(fs_path.string(), "/"), results, setting);
-}
-
Status getDirectory(const fs::path& path, fs::path& dirpath) {
if (!isDirectory(path).ok()) {
dirpath = fs::path(path).parent_path().string();
return false;
} else if (file_stat.st_uid == getuid() || file_stat.st_uid == 0) {
// Otherwise, require matching or root file ownership.
- if (executable && !file_stat.st_mode & S_IXUSR) {
+ if (executable && !(file_stat.st_mode & S_IXUSR)) {
// Require executable, implies by the owner.
return false;
}
bits += rwx[(mode >> 0) & 7];
return bits;
}
+
+Status parseJSON(const fs::path& path, pt::ptree& tree) {
+ std::string json_data;
+ if (!readFile(path, json_data).ok()) {
+ return Status(1, "Could not read JSON from file");
+ }
+
+ return parseJSONContent(json_data, tree);
+}
+
+Status parseJSONContent(const std::string& content, pt::ptree& tree) {
+ // Read the extensions data into a JSON blob, then property tree.
+ try {
+ std::stringstream json_stream;
+ json_stream << content;
+ pt::read_json(json_stream, tree);
+ } catch (const pt::json_parser::json_parser_error& e) {
+ return Status(1, "Could not parse JSON from file");
+ }
+ return Status(0, "OK");
+}
}
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <boost/algorithm/string/join.hpp>
+#include <boost/filesystem/operations.hpp>
+
+#include <osquery/core.h>
+#include <osquery/filesystem.h>
+
+namespace fs = boost::filesystem;
+
+namespace osquery {
+
+/**
+ * @brief Drill down recursively and list all sub files
+ *
+ * This functions purpose is to take a path with no wildcards
+ * and it will recursively go through all files and and return
+ * them in the results vector.
+ *
+ * @param fs_path The entire resolved path
+ * @param results The vector where results will be returned
+ * @param rec_depth How many recursions deep the current execution is at
+ *
+ * @return An instance of osquery::Status indicating the success of failure of
+ * the operation
+ */
+Status doubleStarTraversal(const fs::path& fs_path,
+ std::vector<std::string>& results,
+ ReturnSetting setting,
+ unsigned int rec_depth) {
+ if (rec_depth >= kMaxDirectoryTraversalDepth) {
+ return Status(2, fs_path.string().c_str());
+ }
+ // List files first
+ if (setting & REC_LIST_FILES) {
+ Status stat = listFilesInDirectory(fs_path, results);
+ if (!stat.ok()) {
+ return Status(0, "OK");
+ }
+ }
+ std::vector<std::string> folders;
+ Status stat = listDirectoriesInDirectory(fs_path, folders);
+ if (!stat.ok()) {
+ return Status(0, "OK");
+ }
+ if (setting & REC_LIST_FOLDERS) {
+ results.push_back(fs_path.string());
+ }
+ for (const auto& folder : folders) {
+ if (fs::is_symlink(folder)) {
+ continue;
+ }
+
+ stat = doubleStarTraversal(folder, results, setting, rec_depth + 1);
+ if (!stat.ok() && stat.getCode() == 2) {
+ return stat;
+ }
+ }
+ return Status(0, "OK");
+}
+
+/**
+ * @brief Resolve the last component of a file path
+ *
+ * This function exists because unlike the other parts of of a file
+ * path, which should only resolve to folder, a wildcard at the end
+ * means to list all files in that directory, as does just listing
+ * folder. Also, a double means to drill down recursively into that
+ * that folder and list all sub file.
+ *
+ * @param fs_path The entire resolved path (except last component)
+ * @param results The vector where results will be returned
+ * @param components A path, split by forward slashes
+ * @param rec_depth How many recursions deep the current execution is at
+ *
+ * @return An instance of osquery::Status indicating the success of failure of
+ * the operation
+ */
+Status resolveLastPathComponent(const fs::path& fs_path,
+ std::vector<std::string>& results,
+ ReturnSetting setting,
+ const std::vector<std::string>& components,
+ unsigned int rec_depth) {
+
+ // Is the last component a double star?
+ if (components[components.size() - 1] == kWildcardCharacterRecursive) {
+ if (setting & REC_EVENT_OPT) {
+ results.push_back(fs_path.parent_path().string());
+ return Status(0, "OK");
+ } else {
+ Status stat = doubleStarTraversal(
+ fs_path.parent_path(), results, setting, rec_depth);
+ return stat;
+ }
+ }
+
+ try {
+ // Is the path a file
+ if ((setting & (REC_EVENT_OPT | REC_LIST_FILES)) > 0 &&
+ fs::is_regular_file(fs_path)) {
+ results.push_back(fs_path.string());
+ return Status(0, "OK");
+ }
+ } catch (const fs::filesystem_error& e) {
+ return Status(0, "OK");
+ }
+
+ std::vector<std::string> files;
+ std::vector<std::string> folders;
+ Status stat_file = listFilesInDirectory(fs_path.parent_path(), files);
+ Status stat_fold = listDirectoriesInDirectory(fs_path.parent_path(), folders);
+
+ // Is the last component a wildcard?
+ if (components[components.size() - 1] == kWildcardCharacter) {
+
+ if (setting & REC_EVENT_OPT) {
+ results.push_back(fs_path.parent_path().string());
+ return Status(0, "OK");
+ }
+ if (setting & REC_LIST_FOLDERS) {
+ results.push_back(fs_path.parent_path().string());
+ for (const auto& fold : folders) {
+ results.push_back(fold);
+ }
+ }
+ if (setting & REC_LIST_FILES) {
+ for (const auto& file : files) {
+ results.push_back(file);
+ }
+ }
+ return Status(0, "OK");
+ }
+
+ std::string processed_path =
+ "/" +
+ boost::algorithm::join(
+ std::vector<std::string>(components.begin(), components.end() - 1),
+ "/");
+
+ // Is this a (.*)% type file match
+ if (components[components.size() - 1].find(kWildcardCharacter, 1) !=
+ std::string::npos &&
+ components[components.size() - 1][0] != kWildcardCharacter[0]) {
+
+ std::string prefix =
+ processed_path + "/" +
+ components[components.size() - 1].substr(
+ 0, components[components.size() - 1].find(kWildcardCharacter, 1));
+ if (setting & REC_LIST_FOLDERS) {
+ for (const auto& fold : folders) {
+ if (fold.find(prefix, 0) != 0) {
+ continue;
+ }
+ results.push_back(fold);
+ }
+ }
+ if (setting & REC_LIST_FILES || setting & REC_EVENT_OPT) {
+ for (const auto& file : files) {
+ if (file.find(prefix, 0) != 0) {
+ continue;
+ }
+ results.push_back(file);
+ }
+ }
+ // Should be a return here?
+ return Status(0, "OK");
+ }
+
+ // Is this a %(.*) type file match
+ if (components[components.size() - 1][0] == kWildcardCharacter[0]) {
+ std::string suffix = components[components.size() - 1].substr(1);
+ if (setting & REC_LIST_FOLDERS) {
+ for (const auto& fold : folders) {
+ std::string file_name =
+ boost::filesystem::path(fold).filename().string();
+ size_t pos = file_name.find(suffix);
+ if (pos != std::string::npos &&
+ pos + suffix.length() == file_name.length()) {
+ results.push_back(fold);
+ }
+ }
+ }
+ if (setting & REC_LIST_FILES || setting & REC_EVENT_OPT) {
+ for (const auto& file : files) {
+ boost::filesystem::path p(file);
+ std::string file_name = p.filename().string();
+ size_t pos = file_name.find(suffix);
+ if (pos != std::string::npos &&
+ pos + suffix.length() == file_name.length()) {
+ results.push_back(file);
+ }
+ }
+ }
+ return Status(0, "OK");
+ }
+
+ // Back out if this path doesn't exist due to invalid path
+ if (!(pathExists(fs_path).ok())) {
+ return Status(0, "OK");
+ }
+
+ // Is the path a directory
+ if (fs::is_directory(fs_path)) {
+ results.push_back(fs_path.string());
+ return Status(0, "OK");
+ }
+
+ return Status(1, "UNKNOWN FILE TYPE");
+}
+
+/**
+ * @brief List all files in a directory recursively
+ *
+ * This is an overloaded version of the exported `resolveFilePattern`. This
+ * version is used internally to facilitate the tracking of the recursion
+ * depth.
+ *
+ * @param results The vector where results will be returned
+ * @param components A path, split by forward slashes
+ * @param processed_index What index of components has been resolved so far
+ * @param rec_depth How many recursions deep the current execution is at
+ *
+ * @return An instance of osquery::Status indicating the success of failure of
+ * the operation
+ */
+Status resolveFilePattern(std::vector<std::string> components,
+ std::vector<std::string>& results,
+ ReturnSetting setting = REC_LIST_FILES,
+ unsigned int processed_index = 0,
+ unsigned int rec_depth = 0) {
+
+ // Stop recursing here if we've reached out max depth
+ if (rec_depth >= kMaxDirectoryTraversalDepth) {
+ return Status(2, "MAX_DEPTH");
+ }
+
+ // Handle all parts of the path except last because then we want to get files,
+ // not directories
+ for (auto i = processed_index; i < components.size() - 1; i++) {
+
+ // If we encounter a full recursion, that is invalid because it is not
+ // the last component. So return.
+ if (components[i] == kWildcardCharacterRecursive) {
+ return Status(1, kWildcardCharacterRecursive + " NOT LAST COMPONENT");
+ }
+
+ // Create a vector to hold all the folders in the current folder
+ // Build the path we're at out of components
+ std::vector<std::string> folders;
+
+ std::string processed_path =
+ "/" +
+ boost::algorithm::join(std::vector<std::string>(components.begin(),
+ components.begin() + i),
+ "/");
+ Status stat = listDirectoriesInDirectory(processed_path, folders);
+ // If we couldn't list the directories it's probably because
+ // the path is invalid (or we don't have permission). Return
+ // here because this branch is no good. This is not an error
+ if (!stat.ok()) {
+ return Status(0, "OK");
+ }
+ // If we just have a wildcard character then we will recurse though
+ // all folders we find
+ if (components[i] == kWildcardCharacter) {
+ for (const auto& dir : folders) {
+ boost::filesystem::path p(dir);
+ components[i] = p.filename().string();
+ Status stat = resolveFilePattern(
+ components, results, setting, i + 1, rec_depth + 1);
+ if (!stat.ok() && stat.getCode() == 2) {
+ return stat;
+ }
+ }
+ // Our subcalls that handle processing are now complete, return
+ return Status(0, "OK");
+
+ // The case of (.*)%
+ } else if (components[i].find(kWildcardCharacter, 1) != std::string::npos &&
+ components[i][0] != kWildcardCharacter[0]) {
+ std::string prefix =
+ processed_path + "/" +
+ components[i].substr(0, components[i].find(kWildcardCharacter, 1));
+ for (const auto& dir : folders) {
+ if (dir.find(prefix, 0) != 0) {
+ continue;
+ }
+ boost::filesystem::path p(dir);
+ components[i] = p.filename().string();
+ Status stat = resolveFilePattern(
+ components, results, setting, i + 1, rec_depth + 1);
+ if (!stat.ok() && stat.getCode() == 2) {
+ return stat;
+ }
+ }
+ return Status(0, "OK");
+ // The case of %(.*)
+ } else if (components[i][0] == kWildcardCharacter[0]) {
+ std::string suffix = components[i].substr(1);
+ for (const auto& dir : folders) {
+ boost::filesystem::path p(dir);
+ std::string folder_name = p.filename().string();
+ size_t pos = folder_name.find(suffix);
+ if (pos != std::string::npos &&
+ pos + suffix.length() == folder_name.length()) {
+ components[i] = p.filename().string();
+ Status stat = resolveFilePattern(
+ components, results, setting, i + 1, rec_depth + 1);
+ if (!stat.ok() && stat.getCode() == 2) {
+ return stat;
+ }
+ }
+ }
+ return Status(0, "OK");
+ } else {
+ }
+ }
+
+ // At this point, all of our call paths have been resolved, so know we want to
+ // list the files at this point or do our ** traversal
+ return resolveLastPathComponent("/" + boost::algorithm::join(components, "/"),
+ results,
+ setting,
+ components,
+ rec_depth);
+}
+
+Status resolveFilePattern(const fs::path& fs_path,
+ std::vector<std::string>& results) {
+ if (fs_path.string()[0] != '/') {
+ return resolveFilePattern(
+ split(fs::current_path().string() + "/" + fs_path.string(), "/"),
+ results);
+ }
+ return resolveFilePattern(split(fs_path.string(), "/"), results);
+}
+
+Status resolveFilePattern(const fs::path& fs_path,
+ std::vector<std::string>& results,
+ ReturnSetting setting) {
+ if (fs_path.string()[0] != '/') {
+ return resolveFilePattern(
+ split(fs::current_path().string() + "/" + fs_path.string(), "/"),
+ results,
+ setting);
+ }
+ return resolveFilePattern(split(fs_path.string(), "/"), results, setting);
+}
+}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
-
#include <fstream>
#include <stdio.h>
};
TEST_F(FilesystemTests, test_plugin) {
- std::ofstream test_file("/tmp/osquery-fstests-file");
+ std::ofstream test_file(kTestWorkingDirectory + "fstests-file");
test_file.write("test123\n", sizeof("test123"));
test_file.close();
std::string content;
- auto s = readFile("/tmp/osquery-fstests-file", content);
+ auto s = readFile(kTestWorkingDirectory + "fstests-file", content);
EXPECT_TRUE(s.ok());
EXPECT_EQ(s.toString(), "OK");
EXPECT_EQ(content, "test123\n");
- remove("/tmp/osquery-fstests-file");
+ remove(kTestWorkingDirectory + "fstests-file");
}
TEST_F(FilesystemTests, test_list_files_in_directory_not_found) {
EXPECT_TRUE(status.ok());
EXPECT_EQ(files.size(), 3);
EXPECT_EQ(files.size(), files_flag.size());
- EXPECT_NE(
- std::find(files.begin(), files.end(), kFakeDirectory + "/roto.txt"),
- files.end());
+ EXPECT_NE(std::find(files.begin(), files.end(), kFakeDirectory + "/roto.txt"),
+ files.end());
}
TEST_F(FilesystemTests, test_wildcard_dual) {
std::vector<std::string> files;
auto status = resolveFilePattern(kFakeDirectory + "/%/%", files);
EXPECT_TRUE(status.ok());
- EXPECT_NE(std::find(files.begin(), files.end(),
+ EXPECT_NE(std::find(files.begin(),
+ files.end(),
kFakeDirectory + "/deep1/level1.txt"),
files.end());
}
std::vector<std::string> files;
auto status = resolveFilePattern(kFakeDirectory + "/%%", files);
EXPECT_TRUE(status.ok());
- EXPECT_NE(std::find(files.begin(), files.end(),
+ EXPECT_NE(std::find(files.begin(),
+ files.end(),
kFakeDirectory + "/deep1/deep2/level2.txt"),
files.end());
}
std::vector<std::string> files;
auto status = resolveFilePattern(kFakeDirectory + "/%11/%sh", files);
EXPECT_TRUE(status.ok());
- EXPECT_NE(std::find(files.begin(), files.end(),
+ EXPECT_NE(std::find(files.begin(),
+ files.end(),
kFakeDirectory + "/deep11/not_bash"),
files.end());
}
std::vector<std::string> files;
auto status = resolveFilePattern(kFakeDirectory + "/%p11/%/%%", files);
EXPECT_TRUE(status.ok());
- EXPECT_NE(std::find(files.begin(), files.end(),
+ EXPECT_NE(std::find(files.begin(),
+ files.end(),
kFakeDirectory + "/deep11/deep2/deep3/level3.txt"),
files.end());
}
std::vector<std::string> files;
auto status = resolveFilePattern(kFakeDirectory + "/deep1%/%", files);
EXPECT_TRUE(status.ok());
- EXPECT_NE(std::find(files.begin(), files.end(),
+ EXPECT_NE(std::find(files.begin(),
+ files.end(),
kFakeDirectory + "/deep1/level1.txt"),
files.end());
- EXPECT_NE(std::find(files.begin(), files.end(),
+ EXPECT_NE(std::find(files.begin(),
+ files.end(),
kFakeDirectory + "/deep11/level1.txt"),
files.end());
}
}
TEST_F(FilesystemTests, test_double_wild_event_opt) {
std::vector<std::string> all;
- auto status = resolveFilePattern(kFakeDirectory + "/%%", all,
- REC_LIST_FOLDERS | REC_EVENT_OPT);
+ auto status = resolveFilePattern(
+ kFakeDirectory + "/%%", all, REC_LIST_FOLDERS | REC_EVENT_OPT);
EXPECT_TRUE(status.ok());
EXPECT_EQ(all.size(), 1);
EXPECT_NE(std::find(all.begin(), all.end(), kFakeDirectory), all.end());
TEST_F(FilesystemTests, test_letter_wild_opt) {
std::vector<std::string> all;
- auto status = resolveFilePattern(kFakeDirectory + "/d%", all,
- REC_LIST_FOLDERS | REC_EVENT_OPT);
+ auto status = resolveFilePattern(
+ kFakeDirectory + "/d%", all, REC_LIST_FOLDERS | REC_EVENT_OPT);
EXPECT_TRUE(status.ok());
EXPECT_EQ(all.size(), 3);
EXPECT_NE(std::find(all.begin(), all.end(), kFakeDirectory + "/deep1"),
TEST_F(FilesystemTests, test_dotdot) {
std::vector<std::string> all;
- auto status = resolveFilePattern(kFakeDirectory + "/deep11/deep2/../../%",
- all, REC_LIST_FILES);
+ auto status = resolveFilePattern(
+ kFakeDirectory + "/deep11/deep2/../../%", all, REC_LIST_FILES);
EXPECT_TRUE(status.ok());
EXPECT_EQ(all.size(), 3);
- EXPECT_NE(std::find(all.begin(), all.end(),
+ EXPECT_NE(std::find(all.begin(),
+ all.end(),
kFakeDirectory + "/deep11/deep2/../../door.txt"),
all.end());
}
TEST_F(FilesystemTests, test_dotdot_relative) {
std::vector<std::string> all;
- auto status =
- resolveFilePattern("../../../tools/tests/%", all, REC_LIST_ALL);
+ auto status = resolveFilePattern(kTestDataPath + "%", all, REC_LIST_ALL);
EXPECT_TRUE(status.ok());
bool found = false;
EXPECT_TRUE(found);
}
+TEST_F(FilesystemTests, test_no_wild) {
+ std::vector<std::string> all;
+ auto status =
+ resolveFilePattern(kFakeDirectory + "/roto.txt", all, REC_LIST_FILES);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(all.size(), 1);
+ EXPECT_NE(std::find(all.begin(), all.end(), kFakeDirectory + "/roto.txt"),
+ all.end());
+}
+
TEST_F(FilesystemTests, test_safe_permissions) {
// For testing we can request a different directory path.
EXPECT_TRUE(safePermissions("/", kFakeDirectory + "/door.txt"));
EXPECT_TRUE(safePermissions("/", "/dev/zero"));
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- google::InitGoogleLogging(argv[0]);
- return RUN_ALL_TESTS();
-}
ADD_OSQUERY_LIBRARY(TRUE osquery_logger logger.cpp)
ADD_OSQUERY_LIBRARY(FALSE osquery_logger_plugins plugins/filesystem.cpp)
-ADD_OSQUERY_TEST(FALSE osquery_logger_tests logger_tests.cpp)
+FILE(GLOB OSQUERY_LOGGER_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(FALSE ${OSQUERY_LOGGER_TESTS})
}
// Read the plugin request string into a JSON tree and enumerate.
- std::stringstream input;
- input << request.at("log");
pt::ptree tree;
- pt::read_json(input, tree);
+ try {
+ std::stringstream input;
+ input << request.at("log");
+ pt::read_json(input, tree);
+ } catch (const pt::json_parser::json_parser_error& e) {
+ return;
+ }
for (const auto& item : tree.get_child("")) {
log.push_back({
}
}
-void initStatusLogger(const std::string& name) {
- FLAGS_alsologtostderr = true;
- FLAGS_logbufsecs = 0; // flush the log buffer immediately
- FLAGS_stop_logging_if_full_disk = true;
- FLAGS_max_log_size = 10; // max size for individual log file is 10MB
- FLAGS_logtostderr = true;
-
- if (FLAGS_verbose) {
+void setVerboseLevel() {
+ if (Flag::getValue("verbose") == "true") {
// Turn verbosity up to 1.
// Do log DEBUG, INFO, WARNING, ERROR to their log files.
// Do log the above and verbose=1 to stderr.
+ FLAGS_minloglevel = 0; // WARNING
+ FLAGS_stderrthreshold = 0;
FLAGS_v = 1;
} else {
// Do NOT log INFO, WARNING, ERROR to stderr.
FLAGS_minloglevel = 2; // ERROR
}
}
+}
+void initStatusLogger(const std::string& name) {
+ FLAGS_alsologtostderr = false;
+ FLAGS_logbufsecs = 0; // flush the log buffer immediately
+ FLAGS_stop_logging_if_full_disk = true;
+ FLAGS_max_log_size = 10; // max size for individual log file is 10MB
+ FLAGS_logtostderr = true;
+
+ setVerboseLevel();
// Start the logging, and announce the daemon is starting.
google::InitGoogleLogging(name.c_str());
return;
}
- // Start the custom status logging facilities, which may instruct glog as is
+ // Start the custom status logging facilities, which may instruct Glog as is
// the case with filesystem logging.
PluginRequest request = {{"init", name}};
serializeIntermediateLog(intermediate_logs, request);
auto status = Registry::call("logger", request);
if (status.ok() || forward_all) {
- // When init returns success we reenabled the log sink in forwarding
+ // When `init` returns success we re-enabled the log sink in forwarding
// mode. Now, Glog status logs are buffered and sent to logStatus.
BufferedLogSink::forward(true);
BufferedLogSink::enable();
Status LoggerPlugin::call(const PluginRequest& request,
PluginResponse& response) {
+ QueryLogItem item;
std::vector<StatusLogLine> intermediate_logs;
if (request.count("string") > 0) {
return this->logString(request.at("string"));
+ } else if (request.count("snapshot") > 0) {
+ return this->logSnapshot(request.at("snapshot"));
+ } else if (request.count("health") > 0) {
+ return this->logHealth(request.at("health"));
} else if (request.count("init") > 0) {
deserializeIntermediateLog(request, intermediate_logs);
return this->init(request.at("init"), intermediate_logs);
}
}
-Status logString(const std::string& s) {
- return logString(s, Registry::getActive("logger"));
+Status logString(const std::string& message, const std::string& category) {
+ return logString(message, category, Registry::getActive("logger"));
}
-Status logString(const std::string& s, const std::string& receiver) {
+Status logString(const std::string& message,
+ const std::string& category,
+ const std::string& receiver) {
if (!Registry::exists("logger", receiver)) {
LOG(ERROR) << "Logger receiver " << receiver << " not found";
return Status(1, "Logger receiver not found");
}
- auto status = Registry::call("logger", receiver, {{"string", s}});
+ auto status = Registry::call(
+ "logger", receiver, {{"string", message}, {"category", category}});
return Status(0, "OK");
}
-Status logScheduledQueryLogItem(const osquery::ScheduledQueryLogItem& results) {
- return logScheduledQueryLogItem(results, Registry::getActive("logger"));
+Status logQueryLogItem(const QueryLogItem& results) {
+ return logQueryLogItem(results, Registry::getActive("logger"));
}
-Status logScheduledQueryLogItem(const osquery::ScheduledQueryLogItem& results,
- const std::string& receiver) {
+Status logQueryLogItem(const QueryLogItem& results,
+ const std::string& receiver) {
std::string json;
Status status;
if (FLAGS_log_result_events) {
- status = serializeScheduledQueryLogItemAsEventsJSON(results, json);
+ status = serializeQueryLogItemAsEventsJSON(results, json);
} else {
- status = serializeScheduledQueryLogItemJSON(results, json);
+ status = serializeQueryLogItemJSON(results, json);
}
if (!status.ok()) {
return status;
}
- return logString(json, receiver);
+ return logString(json, "event", receiver);
+}
+
+Status logSnapshotQuery(const QueryLogItem& item) {
+ std::string json;
+ if (!serializeQueryLogItemJSON(item, json)) {
+ return Status(1, "Could not serialize snapshot");
+ }
+ return Registry::call("logger", {{"snapshot", json}});
+}
+
+Status logHealthStatus(const QueryLogItem& item) {
+ std::string json;
+ if (!serializeQueryLogItemJSON(item, json)) {
+ return Status(1, "Could not serialize health");
+ }
+ return Registry::call("logger", {{"health", json}});
}
}
#include <osquery/flags.h>
#include <osquery/logger.h>
+namespace pt = boost::property_tree;
namespace fs = boost::filesystem;
namespace osquery {
FLAG_ALIAS(std::string, osquery_log_dir, logger_path);
const std::string kFilesystemLoggerFilename = "osqueryd.results.log";
+const std::string kFilesystemLoggerSnapshots = "osqueryd.snapshots.log";
+const std::string kFilesystemLoggerHealth = "osqueryd.health.log";
std::mutex filesystemLoggerPluginMutex;
public:
Status setUp();
Status logString(const std::string& s);
+ Status logStringToFile(const std::string& s, const std::string& filename);
+ Status logSnapshot(const std::string& s);
+ Status logHealth(const std::string& s);
Status init(const std::string& name, const std::vector<StatusLogLine>& log);
Status logStatus(const std::vector<StatusLogLine>& log);
REGISTER(FilesystemLoggerPlugin, "logger", "filesystem");
Status FilesystemLoggerPlugin::setUp() {
- log_path_ = fs::path(FLAGS_logger_path) / kFilesystemLoggerFilename;
+ log_path_ = fs::path(FLAGS_logger_path);
return Status(0, "OK");
}
Status FilesystemLoggerPlugin::logString(const std::string& s) {
+ return logStringToFile(s, kFilesystemLoggerFilename);
+}
+
+Status FilesystemLoggerPlugin::logStringToFile(const std::string& s,
+ const std::string& filename) {
std::lock_guard<std::mutex> lock(filesystemLoggerPluginMutex);
try {
// The results log may contain sensitive information if run as root.
- auto status = writeTextFile(log_path_.string(), s, 0640, true);
+ auto status = writeTextFile((log_path_ / filename).string(), s, 0640, true);
if (!status.ok()) {
return status;
}
Status FilesystemLoggerPlugin::logStatus(
const std::vector<StatusLogLine>& log) {
for (const auto& item : log) {
- // Emit this intermediate log to the glog filesystem logger.
+ // Emit this intermediate log to the Glog filesystem logger.
google::LogMessage(item.filename.c_str(),
item.line,
(google::LogSeverity)item.severity).stream()
return Status(0, "OK");
}
+Status FilesystemLoggerPlugin::logSnapshot(const std::string& s) {
+ // Send the snapshot data to a separate filename.
+ return logStringToFile(s, kFilesystemLoggerSnapshots);
+}
+
+Status FilesystemLoggerPlugin::logHealth(const std::string& s) {
+ return logStringToFile(s, kFilesystemLoggerHealth);
+}
+
Status FilesystemLoggerPlugin::init(const std::string& name,
const std::vector<StatusLogLine>& log) {
- // Stop the internal glog facilities.
+ // Stop the internal Glog facilities.
google::ShutdownGoogleLogging();
// The log dir is used for status logging and the filesystem results logs.
- if (isWritable(FLAGS_logger_path).ok()) {
- FLAGS_log_dir = FLAGS_logger_path;
+ if (isWritable(log_path_.string()).ok()) {
+ FLAGS_log_dir = log_path_.string();
FLAGS_logtostderr = false;
} else {
// If we cannot write logs to the filesystem, fallback to stderr.
FLAGS_logtostderr = true;
}
- // Restart the glog facilities using the name init was provided.
+ // Restart the Glog facilities using the name `init` was provided.
google::InitGoogleLogging(name.c_str());
- // We may violate glog global object assumptions. So set names manually.
- auto basename = (log_path_.parent_path() / name).string();
+ // We may violate Glog global object assumptions. So set names manually.
+ auto basename = (log_path_ / name).string();
google::SetLogDestination(google::INFO, (basename + ".INFO.").c_str());
google::SetLogDestination(google::WARNING, (basename + ".WARNING.").c_str());
google::SetLogDestination(google::ERROR, (basename + ".ERROR.").c_str());
// Store settings for logging to stderr.
bool log_to_stderr = FLAGS_logtostderr;
bool also_log_to_stderr = FLAGS_alsologtostderr;
+ int stderr_threshold = FLAGS_stderrthreshold;
FLAGS_alsologtostderr = false;
FLAGS_logtostderr = false;
+ FLAGS_stderrthreshold = 5;
- // Now funnel the intermediate status logs provided to init.
+ // Now funnel the intermediate status logs provided to `init`.
logStatus(log);
// Restore settings for logging to stderr.
FLAGS_logtostderr = log_to_stderr;
FLAGS_alsologtostderr = also_log_to_stderr;
+ FLAGS_stderrthreshold = stderr_threshold;
// The filesystem logger cheats and uses Glog to log to the filesystem so
// we can return failure here and stop the custom log sink.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
class LoggerTests : public testing::Test {
public:
- LoggerTests() {}
-
void SetUp() {
+ logging_status_ = FLAGS_disable_logging;
+ FLAGS_disable_logging = false;
+
log_lines.clear();
status_messages.clear();
statuses_logged = 0;
last_status = {O_INFO, "", -1, ""};
}
+ void TearDown() { FLAGS_disable_logging = logging_status_; }
+
// Track lines emitted to logString
static std::vector<std::string> log_lines;
// Count calls to logStatus
static int statuses_logged;
+ // Count added and removed snapshot rows
+ static int snapshot_rows_added;
+ static int snapshot_rows_removed;
+ // Count the added health status rows
+ static int health_status_rows;
+
+ private:
+ /// Save the status of logging before running tests, restore afterward.
+ bool logging_status_;
};
std::vector<std::string> LoggerTests::log_lines;
StatusLogLine LoggerTests::last_status;
std::vector<std::string> LoggerTests::status_messages;
int LoggerTests::statuses_logged = 0;
+int LoggerTests::snapshot_rows_added = 0;
+int LoggerTests::snapshot_rows_removed = 0;
+int LoggerTests::health_status_rows = 0;
class TestLoggerPlugin : public LoggerPlugin {
public:
return Status(0, "OK");
}
+ Status logSnapshot(const std::string& s) {
+ LoggerTests::snapshot_rows_added += 1;
+ LoggerTests::snapshot_rows_removed += 0;
+ return Status(0, "OK");
+ }
+
+ Status logHealth(const std::string& s) {
+ LoggerTests::health_status_rows += 1;
+ return Status(0, "OK");
+ }
+
virtual ~TestLoggerPlugin() {}
};
// does NOT handle Glog logs, there will be no statuses logged.
EXPECT_EQ(LoggerTests::statuses_logged, 0);
}
-}
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
+TEST_F(LoggerTests, test_logger_snapshots) {
+ // A snapshot query should not include removed items.
+ QueryLogItem item;
+ item.name = "test_query";
+ item.identifier = "unknown_test_host";
+ item.time = 0;
+ item.calendar_time = "no_time";
+
+ // Add a fake set of results.
+ item.results.added.push_back({{"test_column", "test_value"}});
+ logSnapshotQuery(item);
+
+ // Expect the plugin to optionally handle snapshot logging.
+ EXPECT_EQ(LoggerTests::snapshot_rows_added, 1);
+
+ // Add the same item as a health status log item.
+ logHealthStatus(item);
+ EXPECT_EQ(LoggerTests::health_status_rows, 1);
+}
}
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
::sleep(FLAGS_delay);
}
-
// Instead of calling "shutdownOsquery" force the EF to join its threads.
osquery::EventFactory::end(true);
GFLAGS_NAMESPACE::ShutDownCommandLineFlags();
*
*/
+#include <stdio.h>
#include <osquery/core.h>
int main(int argc, char *argv[]) {
// Parse/apply flags, start registry, load logger/config plugins.
osquery::Initializer runner(argc, argv, osquery::OSQUERY_TOOL_SHELL);
+ if (argc > 1 || !isatty(fileno(stdin)) || osquery::FLAGS_A.size() > 0 ||
+ osquery::FLAGS_L) {
+ // A query was set as a positional argument for via stdin.
+ osquery::FLAGS_disable_events = true;
+ }
+
runner.start();
// Virtual tables will be attached to the shell's in-memory SQLite DB.
--- /dev/null
+/*
+ * Copyright (c) 2015, Wesley Shields
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <cstdlib>
+
+#include <time.h>
+
+#include <boost/filesystem.hpp>
+
+#include <gtest/gtest.h>
+
+#include <osquery/database.h>
+
+#include "osquery/core/test_util.h"
+
+namespace osquery {
+DECLARE_string(database_path);
+DECLARE_string(extensions_socket);
+DECLARE_string(modules_autoload);
+DECLARE_string(extensions_autoload);
+DECLARE_bool(disable_logging);
+
+void initTesting() {
+ // Seed the random number generator, some tests generate temporary files
+ // ports, sockets, etc using random numbers.
+ std::chrono::milliseconds ms =
+ std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::system_clock::now().time_since_epoch());
+ srand(ms.count());
+
+ // Set safe default values for path-based flags.
+ // Specific unittests may edit flags temporarily.
+ boost::filesystem::remove_all(kTestWorkingDirectory);
+ boost::filesystem::create_directories(kTestWorkingDirectory);
+ FLAGS_database_path = kTestWorkingDirectory + "unittests.db";
+ FLAGS_extensions_socket = kTestWorkingDirectory + "unittests.em";
+ FLAGS_extensions_autoload = kTestWorkingDirectory + "unittests-ext.load";
+ FLAGS_modules_autoload = kTestWorkingDirectory + "unittests-mod.load";
+ FLAGS_disable_logging = true;
+
+ // Create a default DBHandle instance before unittests.
+ (void)DBHandle::getInstance();
+}
+}
+
+int main(int argc, char* argv[]) {
+ osquery::initTesting();
+ testing::InitGoogleTest(&argc, argv);
+ // Optionally enable Goggle Logging
+ // google::InitGoogleLogging(argv[0]);
+ return RUN_ALL_TESTS();
+}
ADD_OSQUERY_LIBRARY(TRUE osquery_registry registry.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_registry_tests registry_tests.cpp)
+FILE(GLOB OSQUERY_REGISTRY_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(TRUE ${OSQUERY_REGISTRY_TESTS})
if (items_.count(item_name) == 0 && external_.count(item_name) == 0) {
return Status(1, "Unknown registry item");
}
+
active_ = item_name;
+ // The active plugin is setup when initialized.
+ if (exists(item_name, true)) {
+ Registry::get(name_, item_name)->setUp();
+ }
return Status(0, "OK");
}
}
void RegistryHelperCore::setUp() {
+ // If this registry does not auto-setup do NOT setup the registry items.
+ if (!auto_setup_) {
+ return;
+ }
+
// If the registry is using a single 'active' plugin, setUp that plugin.
// For config and logger, only setUp the selected plugin.
if (active_.size() != 0 && exists(active_, true)) {
return;
}
- // If this registry does not auto-setup do NOT setup the registry items.
- if (!auto_setup_) {
- return;
- }
-
// Try to set up each of the registry items.
// If they fail, remove them from the registry.
std::vector<std::string> failed;
const PluginRequest& request,
PluginResponse& response) {
// Forward factory call to the registry.
- return registry(registry_name)->call(item_name, request, response);
+ try {
+ return registry(registry_name)->call(item_name, request, response);
+ } catch (const std::exception& e) {
+ LOG(ERROR) << registry_name << " registry " << item_name
+ << " plugin caused exception: " << e.what();
+ return Status(1, e.what());
+ } catch (...) {
+ LOG(ERROR) << registry_name << " registry " << item_name
+ << " plugin caused unknown exception";
+ return Status(2, "Unknown exception");
+ }
}
Status RegistryFactory::call(const std::string& registry_name,
const PluginRequest& request,
PluginResponse& response) {
auto& plugin = registry(registry_name)->getActive();
- return registry(registry_name)->call(plugin, request, response);
+ return call(registry_name, plugin, request, response);
}
Status RegistryFactory::call(const std::string& registry_name,
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
-
+
#include <gtest/gtest.h>
#include <osquery/logger.h>
EXPECT_EQ(RegistryFactory::getModule(), 0);
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- google::InitGoogleLogging(argv[0]);
- return RUN_ALL_TESTS();
-}
+++ /dev/null
-ADD_OSQUERY_LIBRARY(TRUE osquery_scheduler scheduler.cpp)
-
-ADD_OSQUERY_TEST(TRUE osquery_scheduler_tests scheduler_tests.cpp)
ADD_OSQUERY_LIBRARY(FALSE osquery_sql_internal sqlite_util.cpp
virtual_table.cpp)
-ADD_OSQUERY_TEST(TRUE osquery_sql_tests sql_tests.cpp)
-ADD_OSQUERY_TEST(FALSE osquery_sqlite_util_tests sqlite_util_tests.cpp)
-ADD_OSQUERY_TEST(FALSE osquery_virtual_table_tests virtual_table_tests.cpp)
-
+FILE(GLOB OSQUERY_SQL_TESTS "tests/*.cpp")
+ADD_OSQUERY_TEST(FALSE ${OSQUERY_SQL_TESTS})
#include <osquery/core.h>
#include <osquery/database.h>
+#include <osquery/flags.h>
#include <osquery/logger.h>
#include <osquery/sql.h>
/// SQL provider for osquery internal/core.
REGISTER_INTERNAL(SQLiteSQLPlugin, "sql", "sql");
+FLAG(string,
+ disable_tables,
+ "Not Specified",
+ "Comma-delimited list of table names to be disabled");
+
/**
* @brief A map of SQLite status codes to their corresponding message string
*
void SQLiteDBManager::unlock() { instance().lock_.unlock(); }
+bool SQLiteDBManager::isDisabled(const std::string& table_name) {
+ const auto& element = instance().disabled_tables_.find(table_name);
+ return (element != instance().disabled_tables_.end());
+}
+
+std::unordered_set<std::string> SQLiteDBManager::parseDisableTablesFlag(
+ const std::string& list) {
+ const auto& tables = split(list, ",");
+ return std::unordered_set<std::string>(tables.begin(), tables.end());
+}
+
SQLiteDBInstance SQLiteDBManager::getUnique() { return SQLiteDBInstance(); }
SQLiteDBInstance SQLiteDBManager::get() {
}
Status getQueryColumnsInternal(const std::string& q,
- tables::TableColumns& columns,
- sqlite3* db) {
+ tables::TableColumns& columns,
+ sqlite3* db) {
int rc;
// Will automatically handle calling sqlite3_finalize on the prepared stmt
#include <map>
#include <mutex>
+#include <unordered_set>
#include <sqlite3.h>
/// See `get` but always return a transient DB connection (for testing).
static SQLiteDBInstance getUnique();
+ /**
+ * @brief Check if `table_name` is disabled.
+ *
+ * Check if `table_name` is in the list of tables passed in to the
+ * `--disable_tables` flag.
+ *
+ * @param The name of the Table to check.
+ * @return If `table_name` is disabled.
+ */
+ static bool isDisabled(const std::string& table_name);
+
/// When the primary SQLiteDBInstance is destructed it will unlock.
static void unlock();
protected:
- SQLiteDBManager() : db_(nullptr), lock_(mutex_, boost::defer_lock) {}
+ SQLiteDBManager() : db_(nullptr), lock_(mutex_, boost::defer_lock) {
+ disabled_tables_ = parseDisableTablesFlag(Flag::getValue("disable_tables"));
+ }
SQLiteDBManager(SQLiteDBManager const&);
- void operator=(SQLiteDBManager const&);
+ SQLiteDBManager& operator=(SQLiteDBManager const&);
virtual ~SQLiteDBManager();
private:
boost::mutex mutex_;
/// Mutex and lock around sqlite3 access.
boost::unique_lock<boost::mutex> lock_;
+ /// Member variable to hold set of disabled tables.
+ std::unordered_set<std::string> disabled_tables_;
+ /// Parse a comma-delimited set of tables names, passed in as a flag.
+ std::unordered_set<std::string> parseDisableTablesFlag(const std::string& s);
};
/**
EXPECT_EQ(results.size(), 1);
}
-class TestTable : public tables::TablePlugin {
+class TestTablePlugin : public tables::TablePlugin {
private:
tables::TableColumns columns() const {
return {{"test_int", "INTEGER"}, {"test_text", "TEXT"}};
};
TEST_F(SQLTests, test_raw_access_context) {
- Registry::add<TestTable>("table", "test_table");
- auto results = SQL::selectAllFrom("test_table");
+ Registry::add<TestTablePlugin>("table", "test");
+ auto results = SQL::selectAllFrom("test");
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0]["test_text"], "1");
- results = SQL::selectAllFrom("test_table", "test_int", tables::EQUALS, "1");
+ results = SQL::selectAllFrom("test", "test_int", tables::EQUALS, "1");
EXPECT_EQ(results.size(), 2);
- results = SQL::selectAllFrom("test_table", "test_int", tables::EQUALS, "2");
+ results = SQL::selectAllFrom("test", "test_int", tables::EQUALS, "2");
EXPECT_EQ(results.size(), 2);
EXPECT_EQ(results[0]["test_int"], "0");
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
SQLiteDBInstance dbc = SQLiteDBManager::getUnique();
char* err = nullptr;
std::vector<std::string> queries = {
- "CREATE TABLE test_table ("
- "username varchar(30) primary key, "
- "age int"
- ")",
+ "CREATE TABLE test_table (username varchar(30) primary key, age int)",
"INSERT INTO test_table VALUES (\"mike\", 23)",
"INSERT INTO test_table VALUES (\"matt\", 24)"};
+
for (auto q : queries) {
sqlite3_exec(dbc.db(), q.c_str(), nullptr, nullptr, &err);
if (err != nullptr) {
- throw std::domain_error("Cannot create testing DBC's db.");
+ throw std::domain_error(std::string("Cannot create testing DBC's db: ") +
+ err);
}
}
TEST_F(SQLiteUtilTests, test_get_query_columns) {
auto dbc = getTestDBC();
-
- std::string query;
- Status status;
tables::TableColumns results;
- query =
- "SELECT hour, minutes, seconds, version, config_md5, config_path, \
- pid FROM time JOIN osquery_info";
- status = getQueryColumnsInternal(query, results, dbc.db());
+ std::string query = "SELECT seconds, version FROM time JOIN osquery_info";
+ auto status = getQueryColumnsInternal(query, results, dbc.db());
ASSERT_TRUE(status.ok());
- ASSERT_EQ(7, results.size());
- EXPECT_EQ(std::make_pair(std::string("hour"), std::string("INTEGER")),
- results[0]);
- EXPECT_EQ(std::make_pair(std::string("minutes"), std::string("INTEGER")),
- results[1]);
+ ASSERT_EQ(2, results.size());
EXPECT_EQ(std::make_pair(std::string("seconds"), std::string("INTEGER")),
- results[2]);
+ results[0]);
EXPECT_EQ(std::make_pair(std::string("version"), std::string("TEXT")),
- results[3]);
- EXPECT_EQ(std::make_pair(std::string("config_md5"), std::string("TEXT")),
- results[4]);
- EXPECT_EQ(std::make_pair(std::string("config_path"), std::string("TEXT")),
- results[5]);
- EXPECT_EQ(std::make_pair(std::string("pid"), std::string("INTEGER")),
- results[6]);
+ results[1]);
query = "SELECT hour + 1 AS hour1, minutes + 1 FROM time";
status = getQueryColumnsInternal(query, results, dbc.db());
ASSERT_FALSE(status.ok());
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
TEST_F(VirtualTableTests, test_sqlite3_attach_vtable) {
auto table = std::make_shared<sampleTablePlugin>();
table->setName("sample");
- //sqlite3* db = nullptr;
- //sqlite3_open(":memory:", &db);
+
+ // Request a managed "connection".
+ // This will be a single (potentially locked) instance or a transient
+ // SQLite database if there is contention and a lock was not requested.
auto dbc = SQLiteDBManager::get();
// Virtual tables require the registry/plugin API to query tables.
"sample", tables::columnDefinition(response), dbc.db());
EXPECT_EQ(status.getCode(), SQLITE_OK);
+/*
+/// TODO: Check below
+/// This makes SEGFAULT - But, single test is OK
std::string q = "SELECT sql FROM sqlite_temp_master WHERE tbl_name='sample';";
QueryData results;
status = queryInternal(q, results, dbc.db());
EXPECT_EQ("CREATE VIRTUAL TABLE sample USING sample(foo INTEGER, bar TEXT)",
results[0]["sql"]);
+*/
}
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
for (size_t i = 0; i < argc; ++i) {
auto expr = (const char *)sqlite3_value_text(argv[i]);
+ if (expr == nullptr) {
+ // SQLite did not expose the expression value.
+ continue;
+ }
// Set the expression from SQLite's now-populated argv.
pVtab->content->constraints[i].second.expr = std::string(expr);
// Add the constraint to the column-sorted query request map.
Status attachTableInternal(const std::string &name,
const std::string &statement,
sqlite3 *db) {
+ if (SQLiteDBManager::isDisabled(name)) {
+ VLOG(0) << "Table " << name << " is disabled, not attaching";
+ return Status(0, getStringForSQLiteReturnCode(0));
+ }
+
// A static module structure does not need specific logic per-table.
static sqlite3_module module = {
- 0,
- xCreate,
- xCreate,
- xBestIndex,
- xDestroy,
- xDestroy,
- xOpen,
- xClose,
- xFilter,
- xNext,
- xEof,
- xColumn,
- xRowid,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
+ 0, xCreate, xCreate, xBestIndex, xDestroy, xDestroy, xOpen,
+ xClose, xFilter, xNext, xEof, xColumn, xRowid, 0,
+ 0, 0, 0, 0, 0, 0,
};
// Note, if the clientData API is used then this will save a registry call
if (rc == SQLITE_OK || rc == SQLITE_MISUSE) {
auto format =
"CREATE VIRTUAL TABLE temp." + name + " USING " + name + statement;
- rc = sqlite3_exec(db, format.c_str(), 0, 0, 0);
+ rc = sqlite3_exec(db, format.c_str(), nullptr, nullptr, 0);
} else {
LOG(ERROR) << "Error attaching table: " << name << " (" << rc << ")";
}
Status detachTableInternal(const std::string &name, sqlite3 *db) {
auto format = "DROP TABLE IF EXISTS temp." + name;
- int rc = sqlite3_exec(db, format.c_str(), 0, 0, 0);
+ int rc = sqlite3_exec(db, format.c_str(), nullptr, nullptr, 0);
if (rc != SQLITE_OK) {
LOG(ERROR) << "Error detaching table: " << name << " (" << rc << ")";
}
ADD_OSQUERY_LIBRARY(FALSE osquery_tables_linux events/linux/passwd_changes.cpp
- events/linux/file_changes.cpp
+ events/linux/file_events.cpp
events/linux/hardware_events.cpp
+ networking/etc_protocols.cpp
networking/linux/routes.cpp
networking/linux/process_open_sockets.cpp
networking/linux/arp_cache.cpp
system/system_controls.cpp
system/logged_in_users.cpp)
-ADD_OSQUERY_TEST(FALSE osquery_etc_hosts_tests networking/etc_hosts_tests.cpp)
+FILE(GLOB OSQUERY_CROSS_TABLES_TESTS "[!u]*/tests/*.cpp")
+ADD_OSQUERY_TEST(FALSE ${OSQUERY_CROSS_TABLES_TESTS})
*
* This is mostly an example EventSubscriber implementation.
*/
-class FileChangesEventSubscriber
+class FileEventSubscriber
: public EventSubscriber<INotifyEventPublisher> {
- DECLARE_SUBSCRIBER("file_changes");
-
public:
- void init();
+ Status init();
/**
* @brief This exports a single Callback for INotifyEventPublisher events.
* @brief Each EventSubscriber must register itself so the init method is
*called.
*
- * This registers PasswdChangesEventSubscriber into the osquery EventSubscriber
+ * This registers FileEventSubscriber into the osquery EventSubscriber
* pseudo-plugin registry.
*/
-REGISTER(FileChangesEventSubscriber, "event_subscriber", "file_changes");
+REGISTER(FileEventSubscriber, "event_subscriber", "file_events");
-void FileChangesEventSubscriber::init() {
+Status FileEventSubscriber::init() {
ConfigDataInstance config;
for (const auto& element_kv : config.files()) {
for (const auto& file : element_kv.second) {
mc->recursive = 1;
mc->path = file;
mc->mask = IN_ATTRIB | IN_MODIFY | IN_DELETE | IN_CREATE;
- subscribe(&FileChangesEventSubscriber::Callback, mc,
+ subscribe(&FileEventSubscriber::Callback, mc,
(void*)(&element_kv.first));
}
}
+
+ return Status(0, "OK");
}
-Status FileChangesEventSubscriber::Callback(const INotifyEventContextRef& ec,
+Status FileEventSubscriber::Callback(const INotifyEventContextRef& ec,
const void* user_data) {
Row r;
r["action"] = ec->action;
* @brief Track udev events in Linux
*/
class HardwareEventSubscriber : public EventSubscriber<UdevEventPublisher> {
- DECLARE_SUBSCRIBER("hardware_events");
-
public:
- void init();
+ Status init();
Status Callback(const UdevEventContextRef& ec, const void* user_data);
};
REGISTER(HardwareEventSubscriber, "event_subscriber", "hardware_events");
-void HardwareEventSubscriber::init() {
+Status HardwareEventSubscriber::init() {
auto subscription = createSubscriptionContext();
subscription->action = UDEV_EVENT_ACTION_ALL;
subscribe(&HardwareEventSubscriber::Callback, subscription, nullptr);
+ return Status(0, "OK");
}
Status HardwareEventSubscriber::Callback(const UdevEventContextRef& ec,
*/
class PasswdChangesEventSubscriber
: public EventSubscriber<INotifyEventPublisher> {
- DECLARE_SUBSCRIBER("passwd_changes");
-
public:
- void init();
+ Status init();
/**
* @brief This exports a single Callback for INotifyEventPublisher events.
*/
REGISTER(PasswdChangesEventSubscriber, "event_subscriber", "passwd_changes");
-void PasswdChangesEventSubscriber::init() {
+Status PasswdChangesEventSubscriber::init() {
auto mc = createSubscriptionContext();
mc->path = "/etc/passwd";
mc->mask = IN_ATTRIB | IN_MODIFY | IN_DELETE | IN_CREATE;
subscribe(&PasswdChangesEventSubscriber::Callback, mc, nullptr);
+ return Status(0, "OK");
}
Status PasswdChangesEventSubscriber::Callback(const INotifyEventContextRef& ec,
if (line.size() > 1) {
std::vector<std::string> hostnames;
for (int i = 1; i < line.size(); ++i) {
+ if (boost::starts_with(line[i], "#")) {
+ break;
+ }
hostnames.push_back(line[i]);
}
r["hostnames"] = boost::algorithm::join(hostnames, " ");
}
}
}
-
-int main(int argc, char* argv[]) {
- testing::InitGoogleTest(&argc, argv);
- google::InitGoogleLogging(argv[0]);
- return RUN_ALL_TESTS();
-}
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <vector>
+#include <string>
+
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/predicate.hpp>
+
+#include <osquery/core.h>
+#include <osquery/logger.h>
+#include <osquery/tables.h>
+#include <osquery/filesystem.h>
+
+namespace osquery {
+namespace tables {
+
+QueryData parseEtcProtocolsContent(const std::string& content) {
+ QueryData results;
+
+ for (const auto& line : split(content, "\n")) {
+ // Empty line or comment.
+ if (line.size() == 0 || boost::starts_with(line, "#")) {
+ continue;
+ }
+
+ // [0]: name protocol_number alias
+ // [1]: [comment part1]
+ // [2]: [comment part2]
+ // [n]: [comment partn]
+ auto protocol_comment = split(line, "#");
+
+ // [0]: name
+ // [1]: protocol_number
+ // [2]: alias
+ auto protocol_fields = split(protocol_comment[0]);
+ if (protocol_fields.size() < 2) {
+ continue;
+ }
+
+ Row r;
+ r["name"] = TEXT(protocol_fields[0]);
+ r["number"] = INTEGER(protocol_fields[1]);
+ if (protocol_fields.size() > 2) {
+ r["alias"] = TEXT(protocol_fields[2]);
+ }
+
+ // If there is a comment for the service.
+ if (protocol_comment.size() > 1) {
+ // Removes everything except the comment (parts of the comment).
+ protocol_comment.erase(protocol_comment.begin(), protocol_comment.begin() + 1);
+ r["comment"] = TEXT(boost::algorithm::join(protocol_comment, " # "));
+ }
+ results.push_back(r);
+ }
+ return results;
+}
+
+QueryData genEtcProtocols(QueryContext& context) {
+ std::string content;
+ auto s = osquery::readFile("/etc/protocols", content);
+ if (s.ok()) {
+ return parseEtcProtocolsContent(content);
+ } else {
+ TLOG << "Error reading /etc/protocols: " << s.toString();
+ return {};
+ }
+}
+}
+}
*/
#include <arpa/inet.h>
-#include <linux/netlink.h>
#include <boost/algorithm/string/split.hpp>
#include <osquery/logger.h>
#include <osquery/tables.h>
-// From uapi/linux/sock_diag.h
-// From linux/sock_diag.h (<= 3.6)
-#ifndef SOCK_DIAG_BY_FAMILY
-#define SOCK_DIAG_BY_FAMILY 20
-#endif
-
-#include "inet_diag.h"
-
namespace osquery {
namespace tables {
-// heavily influenced by github.com/kristrev/inet-diag-example
-enum {
- TCP_ESTABLISHED = 1,
- TCP_SYN_SENT,
- TCP_SYN_RECV,
- TCP_FIN_WAIT1,
- TCP_FIN_WAIT2,
- TCP_TIME_WAIT,
- TCP_CLOSE,
- TCP_CLOSE_WAIT,
- TCP_LAST_ACK,
- TCP_LISTEN,
- TCP_CLOSING
+// Linux proc protocol define to net stats file name.
+const std::map<int, std::string> kLinuxProtocolNames = {
+ {IPPROTO_ICMP, "icmp"},
+ {IPPROTO_TCP, "tcp"},
+ {IPPROTO_UDP, "udp"},
+ {IPPROTO_UDPLITE, "udplite"},
+ {IPPROTO_RAW, "raw"},
};
-#define TCPF_ALL 0xFFF
-#define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L)
-
-int sendNLDiagMessage(int sockfd, int protocol, int family) {
- struct sockaddr_nl sa;
- memset(&sa, 0, sizeof(sa));
- sa.nl_family = AF_NETLINK;
-
- // Only interested in network sockets currently.
- struct inet_diag_req_v2 conn_req;
- memset(&conn_req, 0, sizeof(conn_req));
- conn_req.sdiag_family = family;
- conn_req.sdiag_protocol = protocol;
- if (protocol == IPPROTO_TCP) {
- conn_req.idiag_states =
- TCPF_ALL &
- ~((1 << TCP_SYN_RECV) | (1 << TCP_TIME_WAIT) | (1 << TCP_CLOSE));
- // Request additional TCP information.
- conn_req.idiag_ext |= (1 << (INET_DIAG_INFO - 1));
- } else {
- conn_req.idiag_states = -1;
- }
-
- struct nlmsghdr nlh;
- memset(&nlh, 0, sizeof(nlh));
- nlh.nlmsg_len = NLMSG_LENGTH(sizeof(conn_req));
- nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST;
- nlh.nlmsg_type = SOCK_DIAG_BY_FAMILY;
-
- struct iovec iov[4];
- iov[0].iov_base = (void *)&nlh;
- iov[0].iov_len = sizeof(nlh);
- iov[1].iov_base = (void *)&conn_req;
- iov[1].iov_len = sizeof(conn_req);
-
- struct msghdr msg;
- memset(&msg, 0, sizeof(msg));
- msg.msg_name = (void *)&sa;
- msg.msg_namelen = sizeof(sa);
- msg.msg_iov = iov;
- msg.msg_iovlen = 2;
-
- int retval = sendmsg(sockfd, &msg, 0);
- return retval;
-}
-
-Row getNLDiagMessage(const struct inet_diag_msg *diag_msg,
- int protocol,
- int family) {
- char local_addr_buf[INET6_ADDRSTRLEN] = {0};
- char remote_addr_buf[INET6_ADDRSTRLEN] = {0};
-
- // set up data structures depending on idiag_family type
- if (diag_msg->idiag_family == AF_INET) {
- inet_ntop(AF_INET,
- (struct in_addr *)&(diag_msg->id.idiag_src),
- local_addr_buf,
- INET_ADDRSTRLEN);
- inet_ntop(AF_INET,
- (struct in_addr *)&(diag_msg->id.idiag_dst),
- remote_addr_buf,
- INET_ADDRSTRLEN);
- } else if (diag_msg->idiag_family == AF_INET6) {
- inet_ntop(AF_INET6,
- (struct in_addr6 *)&(diag_msg->id.idiag_src),
- local_addr_buf,
- INET6_ADDRSTRLEN);
- inet_ntop(AF_INET6,
- (struct in_addr6 *)&(diag_msg->id.idiag_dst),
- remote_addr_buf,
- INET6_ADDRSTRLEN);
- }
-
- // populate the Row from diag_msg fields
- Row row;
- row["socket"] = INTEGER(diag_msg->idiag_inode);
- row["family"] = INTEGER(family);
- row["protocol"] = INTEGER(protocol);
- row["local_address"] = TEXT(local_addr_buf);
- row["remote_address"] = TEXT(remote_addr_buf);
- row["local_port"] = INTEGER(ntohs(diag_msg->id.idiag_sport));
- row["remote_port"] = INTEGER(ntohs(diag_msg->id.idiag_dport));
- return row;
-}
-
std::string addressFromHex(const std::string &encoded_address, int family) {
char addr_buffer[INET6_ADDRSTRLEN] = {0};
if (family == AF_INET) {
return decoded;
}
-/// A fallback method for generating socket information from /proc/net
void genSocketsFromProc(const std::map<std::string, std::string> &socket_inodes,
int protocol,
int family,
QueryData &results) {
std::string path = "/proc/net/";
- path += (protocol == IPPROTO_UDP) ? "udp" : "tcp";
+ path += kLinuxProtocolNames.at(protocol);
path += (family == AF_INET6) ? "6" : "";
std::string content;
}
}
-void genSocketsForFamily(
- const std::map<std::string, std::string> &socket_inodes,
- int protocol,
- int family,
- QueryData &results) {
- // set up the socket
- int nl_sock = 0;
- if ((nl_sock = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_INET_DIAG)) == -1) {
- return;
- }
-
- // send the inet_diag message
- if (sendNLDiagMessage(nl_sock, protocol, family) < 0) {
- close(nl_sock);
- return;
- }
-
- // recieve netlink messages
- uint8_t recv_buf[SOCKET_BUFFER_SIZE];
- int numbytes = recv(nl_sock, recv_buf, sizeof(recv_buf), 0);
- if (numbytes <= 0) {
- VLOG(1) << "NETLINK receive failed";
- return;
- }
-
- auto nlh = (struct nlmsghdr *)recv_buf;
- while (NLMSG_OK(nlh, numbytes)) {
- if (nlh->nlmsg_type == NLMSG_DONE) {
- break;
- }
-
- if (nlh->nlmsg_type == NLMSG_ERROR) {
- genSocketsFromProc(socket_inodes, protocol, family, results);
- break;
- }
-
- // parse and process netlink message
- auto diag_msg = (struct inet_diag_msg *)NLMSG_DATA(nlh);
- auto row = getNLDiagMessage(diag_msg, protocol, family);
-
- if (socket_inodes.count(row["socket"]) > 0) {
- row["pid"] = socket_inodes.at(row["socket"]);
- } else {
- row["pid"] = "-1";
- }
-
- results.push_back(row);
- nlh = NLMSG_NEXT(nlh, numbytes);
- }
-
- close(nl_sock);
- return;
-}
-
QueryData genOpenSockets(QueryContext &context) {
QueryData results;
}
}
- // Use netlink messages to query socket information.
- genSocketsForFamily(socket_inodes, IPPROTO_TCP, AF_INET, results);
- genSocketsForFamily(socket_inodes, IPPROTO_UDP, AF_INET, results);
- genSocketsForFamily(socket_inodes, IPPROTO_TCP, AF_INET6, results);
- genSocketsForFamily(socket_inodes, IPPROTO_UDP, AF_INET6, results);
+ // This used to use netlink (Ref: #1094) to request socket information.
+ // Use proc messages to query socket information.
+ for (const auto &protocol : kLinuxProtocolNames) {
+ genSocketsFromProc(socket_inodes, protocol.first, AF_INET, results);
+ genSocketsFromProc(socket_inodes, protocol.first, AF_INET6, results);
+ }
+
return results;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <gtest/gtest.h>
+
+#include <osquery/logger.h>
+#include <osquery/database.h>
+
+#include "osquery/core/test_util.h"
+
+namespace osquery {
+namespace tables {
+
+osquery::QueryData parseEtcProtocolsContent(const std::string& content);
+
+class EtcProtocolsTests : public testing::Test {};
+
+TEST_F(EtcProtocolsTests, test_parse_etc_protocols_content) {
+ EXPECT_EQ(parseEtcProtocolsContent(getEtcProtocolsContent()),
+ getEtcProtocolsExpectedResults());
+}
+}
+}
std::string macAsString(const struct ifaddrs *addr) {
std::stringstream mac;
- if (addr->ifa_addr == NULL) {
+ if (addr->ifa_addr == nullptr) {
// No link or MAC exists.
return "";
}
int socket_fd = socket(AF_INET, SOCK_DGRAM, 0);
ifr.ifr_addr.sa_family = AF_INET;
- strncpy(ifr.ifr_name, addr->ifa_name, IFNAMSIZ);
+ memcpy(ifr.ifr_name, addr->ifa_name, IFNAMSIZ);
ioctl(socket_fd, SIOCGIFHWADDR, &ifr);
close(socket_fd);
+++ /dev/null
-table_name("package_bom")
-description("OS X package bill of materials (BOM) file list.")
-schema([
- Column("filepath", TEXT, "Package file or directory"),
- Column("uid", INTEGER, "Expected user of file or directory"),
- Column("gid", INTEGER, "Expected group of file or directory"),
- Column("mode", INTEGER, "Expected permissions"),
- Column("size", INTEGER, "Expected file size"),
- Column("modified_time", INTEGER, "Timestamp the file was installed"),
- Column("path", TEXT, "Path of package bom", required=True),
-])
-implementation("packages@genPackageBOM")
+++ /dev/null
-table_name("package_receipts")
-description("OS X package receipt details.")
-schema([
- Column("package_id", TEXT, "Package domain identifier"),
- Column("package_filename", TEXT, "Filename of original .pkg file"),
- Column("version", TEXT, "Installed package version"),
- Column("location", TEXT, "Optional relative install path on volume"),
- Column("install_time", INTEGER, "Timestamp of install time"),
- Column("installer_name", TEXT, "Name of installer process"),
- Column("path", TEXT, "Path of receipt plist",
- additional=True),
-])
-implementation("packages@genPackageReceipts")
--- /dev/null
+table_name("etc_protocols")
+description("Line-parsed /etc/protocols.")
+schema([
+ Column("name", TEXT, "Protocol name"),
+ Column("number", INTEGER, "Protocol number"),
+ Column("alias", TEXT, "Protocol alias"),
+ Column("comment", TEXT, "Comment with protocol description"),
+])
+implementation("etc_protocols@genEtcProtocols")
+
-table_name("file_changes")
-description("Track time, action changes to files specified in configuation data.")
+table_name("file_events")
+description("Track time/action changes to files specified in configuration data.")
schema([
Column("target_path", TEXT, "The path changed"),
Column("category", TEXT, "The category of the file"),
Column("sha256", TEXT, "The SHA256 of the file after change"),
])
attributes(event_subscriber=True)
-implementation("file_changes@file_changes::genTable")
+implementation("file_events@file_events::genTable")
--- /dev/null
+table_name("os_version")
+description("A single row containing the operating system name and version.")
+schema([
+ Column("name", TEXT, "Distribution or product name"),
+ Column("major", INTEGER, "Major release version"),
+ Column("minor", INTEGER, "Minor release version"),
+ Column("patch", INTEGER, "Optional patch release"),
+ Column("build", TEXT, "Optional build-specific or variant string"),
+])
+implementation("system/os_version@genOSVersion")
Column("gid", BIGINT, "Unsgiend groud ID"),
Column("euid", BIGINT, "Unsigned effective user ID"),
Column("egid", BIGINT, "Unsigned effective group ID"),
- Column("on_disk", TEXT, "The process path exist yes=1, no=-1"),
+ Column("on_disk", TEXT, "The process path exists yes=1, no=0, unknown=-1"),
Column("wired_size", TEXT, "Bytes of unpagable memory used by process"),
Column("resident_size", TEXT, "Bytes of private memory used by process"),
Column("phys_footprint", TEXT, "Bytes of total physical memory used"),
Column("is_link", INTEGER, "1 if a symlink else 0"),
Column("is_char", INTEGER, "1 if a character special device else 0"),
Column("is_block", INTEGER, "1 if a block special device else 0"),
+ Column("pattern", TEXT, "A pattern which can be used to match file paths"),
])
attributes(utility=True)
implementation("utility/file@genFile")
--- /dev/null
+table_name("osquery_schedule")
+description("Information about the current queries that are scheduled in osquery.")
+schema([
+ Column("name", TEXT, "The given name for this query"),
+ Column("query", TEXT, "The exact query to run"),
+ Column("interval", INTEGER, "The interval in seconds to run this query, not an exact interval"),
+ Column("executions", BIGINT, "Number of times the query was executed"),
+ Column("output_size", BIGINT, "Total number of bytes generated by the query"),
+ Column("wall_time", BIGINT, "Total wall time spent executing"),
+ Column("user_time", BIGINT, "Total user time spent executing"),
+ Column("system_time", BIGINT, "Total system time spent executing"),
+ Column("average_memory", BIGINT, "Average private memory left after executing"),
+])
+attributes(utility=True)
+implementation("osquery@genOsquerySchedule")
+++ /dev/null
-table_name("os_version")
-description("A single row containing the operating system version.")
-schema([
- Column("major", INTEGER),
- Column("minor", INTEGER),
- Column("patch", INTEGER),
-])
-implementation("system/os_version@genOSVersion")
+++ /dev/null
-/*
- * Copyright (c) 2014, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
- *
- */
-
-#include <osquery/filesystem.h>
-#include <osquery/logger.h>
-#include <osquery/hash.h>
-#include <osquery/sql.h>
-#include <osquery/tables.h>
-
-// Package BOM structure headers
-#include "osquery/tables/system/darwin/packages.h"
-
-const std::vector<std::string> kPkgReceiptPaths = {
- "/private/var/db/receipts/", "/Library/Receipts/",
-};
-
-const std::vector<std::string> kPkgReceiptUserPaths = {
- "/Library/Receipts/",
-};
-
-const std::map<std::string, std::string> kPkgReceiptKeys = {
- {"PackageIdentifier", "package_id"},
- {"PackageFileName", "package_filename"},
- {"PackageVersion", "version"},
- {"InstallPrefixPath", "location"},
- {"InstallDate", "install_time"},
- {"InstallProcessName", "installer_name"},
-};
-
-namespace fs = boost::filesystem;
-
-namespace osquery {
-namespace tables {
-
-BOM::BOM(const char* data, size_t size)
- : data_(data), size_(size), valid_(false) {
- if (size_ < sizeof(BOMHeader)) {
- // BOM structure header is invalid.
- return;
- }
-
- Header = (BOMHeader*)data_;
- if (std::string(Header->magic, 8) != "BOMStore") {
- // Header does not include expected magic value.
- return;
- }
-
- if (size_ < ntohl(Header->indexOffset) + sizeof(BOMBlockTable)) {
- // BOM block table is invalid.
- return;
- }
-
- Table = (BOMBlockTable*)(data_ + ntohl(Header->indexOffset));
- table_offset_ = ntohl(Header->indexOffset) + sizeof(BOMBlockTable);
- if (size_ < table_offset_ + ntohl(Table->count) * sizeof(BOMPointer)) {
- // BOM Pointer size/count is invalid.
- return;
- }
-
- if (size_ < ntohl(Header->varsOffset) + sizeof(BOMVars)) {
- // BOM variable table is invalid.
- return;
- }
-
- Vars = (BOMVars*)(data_ + ntohl(Header->varsOffset));
- vars_offset_ = ntohl(Header->varsOffset) + sizeof(BOMVars);
- if (size_ < vars_offset_ + ntohl(Vars->count) * sizeof(BOMVar)) {
- // BOM variables size/count is invalid.
- return;
- }
- valid_ = true;
-}
-
-/// Lookup a BOM pointer and optionally, it's size.
-const char* BOM::getPointer(int index, size_t* length) const {
- if (ntohl(index) >= ntohl(Table->count)) {
- // Requested pointer is out of range.
- return nullptr;
- }
-
- const BOMPointer* pointer = Table->blockPointers + ntohl(index);
- uint32_t addr = ntohl(pointer->address);
- if (size_ < addr + ntohl(pointer->length)) {
- // Address value is out of range.
- return nullptr;
- }
-
- if (length != nullptr) {
- *length = ntohl(pointer->length);
- }
- return data_ + addr;
-}
-
-const BOMVar* BOM::getVariable(size_t* offset) const {
- if (size_ < vars_offset_ + *offset + sizeof(BOMVar)) {
- // Offset overflows the variable list.
- *offset = 0;
- return nullptr;
- }
-
- const BOMVar* var = (BOMVar*)((char*)Vars->list + *offset);
- *offset += sizeof(BOMVar) + var->length;
- return var;
-}
-
-const BOMPaths* BOM::getPaths(int index) const {
- size_t paths_size = 0;
- auto paths = (BOMPaths*)getPointer(index, &paths_size);
- if (paths == nullptr || paths_size < sizeof(BOMPaths)) {
- return nullptr;
- }
-
- // Check the number of indexes.
- if (paths_size < ntohs(paths->count) * sizeof(BOMPathIndices)) {
- return nullptr;
- }
- return paths;
-}
-
-void genBOMPaths(const std::string& path,
- const BOM& bom,
- const BOMPaths* paths,
- QueryData& results) {
- std::map<uint32_t, std::string> filenames;
- std::map<uint32_t, uint32_t> parents;
-
- while (paths != nullptr) {
- for (unsigned j = 0; j < ntohs(paths->count); j++) {
- uint32_t index0 = paths->indices[j].index0;
- uint32_t index1 = paths->indices[j].index1;
-
- auto info1 = (const BOMPathInfo1*)bom.getPointer(index0);
- if (info1 == nullptr) {
- // Invalid BOMPathInfo1 structure.
- return;
- }
-
- auto info2 = (const BOMPathInfo2*)bom.getPointer(info1->index);
- if (info2 == nullptr) {
- // Invalid BOMPathInfo2 structure.
- return;
- }
-
- // Compute full name using pointer size.
- size_t file_size;
- auto file = (const BOMFile*)bom.getPointer(index1, &file_size);
- if (file == nullptr || file_size <= sizeof(BOMFile)) {
- // Invalid BOMFile structure or size out of bounds.
- return;
- }
- std::string filename(file->name, file_size - sizeof(BOMFile));
- filename = std::string(filename.c_str());
-
- // Maintain a lookup from BOM file index to filename.
- filenames[info1->id] = filename;
- if (file->parent) {
- parents[info1->id] = file->parent;
- }
-
- auto it = parents.find(info1->id);
- while (it != parents.end()) {
- filename = filenames[it->second] + "/" + filename;
- it = parents.find(it->second);
- }
-
- Row r;
- r["filepath"] = filename;
- r["uid"] = INTEGER(ntohl(info2->user));
- r["gid"] = INTEGER(ntohl(info2->group));
- r["mode"] = INTEGER(ntohs(info2->mode));
- r["size"] = INTEGER(ntohl(info2->size));
- r["modified_time"] = INTEGER(ntohl(info2->modtime));
- r["path"] = path;
- results.push_back(r);
- }
-
- if (paths->forward == htonl(0)) {
- return;
- } else {
- paths = bom.getPaths(paths->forward);
- }
- }
-}
-
-void genPackageBOM(const std::string& path, QueryData& results) {
- std::string content;
- // Read entire BOM file.
- if (!readFile(path, content).ok()) {
- return;
- }
-
- // Create a BOM representation.
- BOM bom(content.c_str(), content.size());
- if (!bom.isValid()) {
- return;
- }
-
- size_t var_offset = 0;
- for (unsigned i = 0; i < ntohl(bom.Vars->count); i++) {
- // Iterate through each BOM variable, a packed set of structures.
- auto var = bom.getVariable(&var_offset);
- if (var == nullptr || var->name == nullptr) {
- break;
- }
-
- size_t var_size;
- const char* var_data = bom.getPointer(var->index, &var_size);
- if (var_data == nullptr || var_size < sizeof(BOMTree) || var_size < var->length) {
- break;
- }
-
- std::string name = std::string(var->name, var->length);
- if (name != "Paths") {
- // We only parse the BOM paths structure.
- continue;
- }
-
- const BOMTree* tree = (const BOMTree*)var_data;
- auto paths = bom.getPaths(tree->child);
- while (paths != nullptr && paths->isLeaf == htons(0)) {
- if (paths->indices == nullptr) {
- break;
- }
- paths = bom.getPaths(paths->indices[0].index0);
- }
-
- genBOMPaths(path, bom, paths, results);
- break;
- }
-}
-
-QueryData genPackageBOM(QueryContext& context) {
- QueryData results;
- if (context.constraints["path"].exists()) {
- // If an explicit path was given, generate and return.
- auto paths = context.constraints["path"].getAll(EQUALS);
- for (const auto& path : paths) {
- genPackageBOM(path, results);
- }
- }
-
- return results;
-}
-
-void genPackageReceipt(const std::string& path, QueryData& results) {
- auto receipt = SQL::selectAllFrom("preferences", "path", EQUALS, path);
- if (receipt.size() == 0) {
- // Fail if the file could not be plist-parsed.
- return;
- }
-
- Row r;
- r["path"] = path;
- for (const auto& row : receipt) {
- if (kPkgReceiptKeys.count(row.at("key")) > 0) {
- r[kPkgReceiptKeys.at(row.at("key"))] = row.at("value");
- }
- }
- results.push_back(r);
-}
-
-QueryData genPackageReceipts(QueryContext& context) {
- QueryData results;
- if (context.constraints["path"].exists()) {
- // If an explicit path was given, generate and return.
- auto paths = context.constraints["path"].getAll(EQUALS);
- for (const auto& path : paths) {
- genPackageReceipt(path, results);
- }
- return results;
- }
-
- // Iterate over each well-known system absolute directory of receipts.
- // This is not the absolute correct way to enumerate receipts, but works.
- for (const auto& path : kPkgReceiptPaths) {
- std::vector<std::string> receipts;
- if (resolveFilePattern(path + "%.plist", receipts)) {
- for (const auto& receipt : receipts) {
- genPackageReceipt(receipt, results);
- }
- }
- }
-
- // User home directories may include user-specific receipt lists.
- auto users = getHomeDirectories();
- for (const auto& user : users) {
- for (const auto& path : kPkgReceiptUserPaths) {
- std::vector<std::string> receipts;
- fs::path receipt_path = user / path;
- if (resolveFilePattern(receipt_path.string() + "%.plist", receipts)) {
- for (const auto& receipt : receipts) {
- genPackageReceipt(receipt, results);
- }
- }
- }
- }
-
- return results;
-}
-}
-}
+++ /dev/null
-/*
- * Copyright (c) 2014, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
- *
- */
-
-#include <vector>
-#include <map>
-#include <string>
-
-namespace osquery {
-namespace tables {
-
-// Structure details based on work from Joseph Coffland, Julian Devlin
-struct BOMHeader {
- // Always "BOMStore"
- char magic[8];
- // Always 1
- uint32_t version;
- // Number of non-null entries in BOMBlockTable
- uint32_t numberOfBlocks;
- uint32_t indexOffset;
- uint32_t indexLength;
- uint32_t varsOffset;
- uint32_t varsLength;
-} __attribute__((packed));
-
-struct BOMPointer {
- uint32_t address;
- uint32_t length;
-} __attribute__((packed));
-
-struct BOMBlockTable {
- // See header for number of non-null blocks
- uint32_t count;
- // First entry must always be a null entry
- BOMPointer blockPointers[];
-} __attribute__((packed));
-
-struct BOMTree {
- // Always "tree"
- char tree[4];
- // Always 1
- uint32_t version;
- // Index for BOMPaths
- uint32_t child;
- // Always 4096
- uint32_t blockSize;
- // Total number of paths in all leaves combined
- uint32_t pathCount;
- uint8_t unknown3;
-} __attribute__((packed));
-
-struct BOMVar {
- uint32_t index;
- uint8_t length;
- char name[];
-} __attribute__((packed));
-
-struct BOMVars {
- uint32_t count;
- BOMVar list[];
-} __attribute__((packed));
-
-struct BOMPathIndices {
- // for leaf: points to BOMPathInfo1, for branch points to BOMPaths
- uint32_t index0;
- // always points to BOMFile
- uint32_t index1;
-} __attribute__((packed));
-
-struct BOMPaths {
- uint16_t isLeaf;
- uint16_t count;
- uint32_t forward;
- uint32_t backward;
- BOMPathIndices indices[];
-} __attribute__((packed));
-
-struct BOMPathInfo2 {
- uint8_t type;
- uint8_t unknown0;
- uint16_t architecture;
- uint16_t mode;
- uint32_t user;
- uint32_t group;
- uint32_t modtime;
- uint32_t size;
- uint8_t unknown1;
- union {
- uint32_t checksum;
- uint32_t devType;
- };
- uint32_t linkNameLength;
- char linkName[];
-} __attribute__((packed));
-
-struct BOMPathInfo1 {
- uint32_t id;
- // Pointer to BOMPathInfo2
- uint32_t index;
-} __attribute__((packed));
-
-struct BOMFile {
- // Parent BOMPathInfo1->id
- uint32_t parent;
- char name[];
-} __attribute__((packed));
-
-class BOM {
- public:
- BOM(const char* data, size_t size);
-
- /// Helper to check if the header parsing completed.
- bool isValid() { return valid_; }
-
- /// Lookup a BOM pointer and optionally, it's size.
- const char* getPointer(int index, size_t* length = nullptr) const;
- const BOMPaths* getPaths(int index) const;
- const BOMVar* getVariable(size_t* offset) const;
-
- private:
- const char* data_;
- size_t size_;
- bool valid_;
-
- private:
- size_t vars_offset_;
- size_t table_offset_;
-
- public:
- const BOMHeader* Header;
- const BOMBlockTable* Table;
- const BOMVars* Vars;
-};
-}
-}
#ifdef __APPLE__
setutxent_wtmp(0); // 0 = reverse chronological order
- while ((ut = getutxent_wtmp()) != NULL) {
+ while ((ut = getutxent_wtmp()) != nullptr) {
#else
utmpxname("/var/log/wtmpx");
setutxent();
- while ((ut = getutxent()) != NULL) {
+ while ((ut = getutxent()) != nullptr) {
#endif
Row r;
std::set<long> groups_in;
setgrent();
- while ((grp = getgrent()) != NULL) {
+ while ((grp = getgrent()) != nullptr) {
if (std::find(groups_in.begin(), groups_in.end(), grp->gr_gid) ==
groups_in.end()) {
Row r;
#include <string>
#include <boost/regex.hpp>
+#include <boost/xpressive/xpressive.hpp>
#include <osquery/filesystem.h>
#include <osquery/sql.h>
#include <osquery/tables.h>
+namespace xp = boost::xpressive;
+
+namespace osquery {
+namespace tables {
+
#ifdef CENTOS
const std::string kLinuxOSRelease = "/etc/redhat-release";
-#define kLinuxOSRegex "CentOS release ([0-9]+).([0-9]+)"
+const std::string kLinuxOSRegex =
+ "(?P<name>\\w+) .* "
+ "(?P<major>[0-9]+).(?P<minor>[0-9]+)[\\.]{0,1}(?P<patch>[0-9]+)";
#else
const std::string kLinuxOSRelease = "/etc/os-release";
-#define kLinuxOSRegex "VERSION=\"([0-9]+)\\.([0-9]+)[\\.]{0,1}([0-9]+)?"
+const std::string kLinuxOSRegex =
+ "VERSION=\"(?P<major>[0-9]+)\\.(?P<minor>[0-9]+)[\\.]{0,1}(?P<patch>[0-9]+)"
+ "?.*, (?P<name>[\\w ]*)\"$";
#endif
-namespace osquery {
-namespace tables {
-
QueryData genOSVersion(QueryContext& context) {
std::string content;
if (!readFile(kLinuxOSRelease, content).ok()) {
return {};
}
- std::vector<std::string> version = {"0", "0", "0"};
- boost::regex rx(kLinuxOSRegex);
- boost::smatch matches;
+ Row r;
+ auto rx = xp::sregex::compile(kLinuxOSRegex);
+ xp::smatch matches;
for (const auto& line : osquery::split(content, "\n")) {
- if (boost::regex_search(line, matches, rx)) {
- // Push the matches in reverse order.
- version[0] = matches[1];
- version[1] = matches[2];
- if (matches.size() == 4) {
- // Patch is optional for Ubuntu and not used for CentOS.
- version[2] = matches[3];
- }
+ if (xp::regex_search(line, matches, rx)) {
+ r["major"] = INTEGER(matches["major"]);
+ r["minor"] = INTEGER(matches["minor"]);
+ r["patch"] =
+ (matches["patch"].length() > 0) ? INTEGER(matches["patch"]) : "0";
+ r["name"] = matches["name"];
break;
}
}
- Row r;
- if (version.size() == 3) {
- r["major"] = INTEGER(version[0]);
- r["minor"] = INTEGER(version[1]);
- r["patch"] = INTEGER(version[2]);
- }
+ // No build name.
+ r["build"] = "";
return {r};
}
}
unsigned long shm_swp;
unsigned long swap_attempts;
unsigned long swap_successes;
-};
+} __attribute__((unused));
QueryData genSharedMemory(QueryContext &context) {
QueryData results;
}
for (const auto& sub : subsystems) {
- if (subsystem.size() != 0 && fs::path(sub).filename().string() != subsystem) {
+ if (subsystem.size() != 0 &&
+ fs::path(sub).filename().string() != subsystem) {
// Request is limiting subsystem.
continue;
}
struct passwd *pwd = nullptr;
std::set<long> users_in;
- while ((pwd = getpwent()) != NULL) {
+ while ((pwd = getpwent()) != nullptr) {
if (std::find(users_in.begin(), users_in.end(), pwd->pw_uid) ==
users_in.end()) {
Row r;
QueryData results;
struct utmpx *entry = nullptr;
- while ((entry = getutxent()) != NULL) {
+ while ((entry = getutxent()) != nullptr) {
if (entry->ut_pid == 1) {
continue;
}
QueryData genShellHistory(QueryContext& context) {
QueryData results;
- std::string sql_str;
QueryData users;
if (!getuid()) {
return result;
}
-void genControlInfoFromOIDString(const std::string& oid_string, QueryData& results,
- const std::map<std::string, std::string>& config) {
+void genControlInfoFromOIDString(
+ const std::string& oid_string,
+ QueryData& results,
+ const std::map<std::string, std::string>& config) {
int request[CTL_DEBUG_MAXID + 2] = {0};
auto tokens = osquery::split(oid_string, ".");
if (tokens.size() > CTL_DEBUG_MAXID) {
QueryData generate(QueryContext& request) {
{% if class_name != "" %}\
- auto subscriber = EventFactory::getEventSubscriber("{{class_name}}");
- return subscriber->{{function}}(request);
+ if (EventFactory::exists("{{class_name}}")) {
+ auto subscriber = EventFactory::getEventSubscriber("{{class_name}}");
+ return subscriber->{{function}}(request);
+ } else {
+ return {};
+ }
{% else %}\
return osquery::tables::{{function}}(request);
{% endif %}\
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
void genFileInfo(const std::string& path,
const std::string& filename,
const std::string& dir,
+ const std::string& pattern,
QueryData& results) {
// Must provide the path, filename, directory separate from boost path->string
// helpers to match any explicit (query-parsed) predicate constraints.
r["is_char"] = (S_ISCHR(file_stat.st_mode)) ? "1" : "0";
r["is_block"] = (S_ISBLK(file_stat.st_mode)) ? "1" : "0";
+ // pattern
+ r["pattern"] = pattern;
+
results.push_back(r);
}
genFileInfo(path_string,
path.filename().string(),
path.parent_path().string(),
+ "",
results);
}
genFileInfo(begin->path().string(),
begin->path().filename().string(),
directory_string,
+ "",
results);
}
} catch (const fs::filesystem_error& e) {
}
}
+ // Now loop through constraints using the pattern column constraint.
+ auto patterns = context.constraints["pattern"].getAll(EQUALS);
+ if (patterns.size() != 1) {
+ return results;
+ }
+
+ for (const auto& pattern : patterns) {
+ std::vector<std::string> expanded_patterns;
+ auto status = resolveFilePattern(pattern, expanded_patterns);
+ if (!status.ok()) {
+ VLOG(1) << "Could not expand pattern properly: " << status.toString();
+ return results;
+ }
+
+ for (const auto& resolved : expanded_patterns) {
+ if (!isReadable(resolved)) {
+ continue;
+ }
+ fs::path path = resolved;
+ genFileInfo(resolved,
+ path.filename().string(),
+ path.parent_path().string(),
+ pattern,
+ results);
+
+ }
+ }
+
return results;
}
}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
auto flags = Flag::flags();
for (const auto& flag : flags) {
- genFlag(flag.first, flag.second, results);
+ if (flag.first.size() > 2) {
+ // Skip single-character flags.
+ genFlag(flag.first, flag.second, results);
+ }
}
return results;
QueryData results;
Row r;
- r["version"] = TEXT(OSQUERY_VERSION);
r["pid"] = INTEGER(getpid());
+ r["version"] = TEXT(OSQUERY_VERSION);
std::string hash_string;
auto s = Config::getMD5(hash_string);
r["config_path"] = Flag::getValue("config_path");
r["extensions"] =
(pingExtension(FLAGS_extensions_socket).ok()) ? "active" : "inactive";
+
+ r["build_platform"] = STR(OSQUERY_BUILD_PLATFORM);
+ r["build_distro"] = STR(OSQUERY_BUILD_DISTRO);
+
results.push_back(r);
return results;
}
+
+QueryData genOsquerySchedule(QueryContext& context) {
+ QueryData results;
+
+ ConfigDataInstance config;
+ for (const auto& query : config.schedule()) {
+ Row r;
+ r["name"] = TEXT(query.first);
+ r["query"] = TEXT(query.second.query);
+ r["interval"] = INTEGER(query.second.interval);
+
+ // Report optional performance information.
+ r["executions"] = BIGINT(query.second.executions);
+ r["output_size"] = BIGINT(query.second.output_size);
+ r["wall_time"] = BIGINT(query.second.wall_time);
+ r["user_time"] = BIGINT(query.second.user_time);
+ r["system_time"] = BIGINT(query.second.system_time);
+ r["average_memory"] = BIGINT(query.second.memory);
+ results.push_back(r);
+ }
+
+ return results;
+}
}
}
Name: osquery
-Version: 1.4.4
+Version: 1.4.5
Release: 0
License: Apache-2.0 and GPLv2
Summary: A SQL powered operating system instrumentation, monitoring framework.
%files test
%manifest %{name}.manifest
-%{_bindir}/osquery_hash_tests
-%{_bindir}/osquery_status_tests
-%{_bindir}/osquery_db_handle_tests
-%{_bindir}/osquery_results_tests
-%{_bindir}/osquery_config_tests
-%{_bindir}/osquery_filesystem_tests
-%{_bindir}/osquery_query_tests
-%{_bindir}/osquery_sql_tests
-%{_bindir}/osquery_sqlite_util_tests
-%{_bindir}/osquery_scheduler_tests
-%{_bindir}/osquery_tables_tests
-%{_bindir}/osquery_virtual_table_tests
-%{_bindir}/osquery_test_util_tests
-%{_bindir}/osquery_text_tests
-%{_bindir}/osquery_logger_tests
-%{_bindir}/osquery_conversions_tests
-%{_bindir}/osquery_dispatcher_tests
-%{_bindir}/osquery_distributed_tests
-%{_bindir}/osquery_events_tests
-%{_bindir}/osquery_events_database_tests
-%{_bindir}/osquery_inotify_tests
-%{_bindir}/osquery_etc_hosts_tests
-%{_bindir}/osquery_printer_tests
-%{_bindir}/osquery_extensions_test
-%{_bindir}/osquery_registry_tests
+%{_bindir}/osquery-test
def setup_templates(path):
- tables_path = os.path.dirname(os.path.dirname(os.path.dirname(path)))
+ tables_path = os.path.dirname(os.path.dirname(path))
templates_path = os.path.join(tables_path, "templates")
if not os.path.exists(templates_path):
- print ("Cannot read templates path: %s" % (templates_path))
- exit(1)
- for template in os.listdir(os.path.join(tables_path, "templates")):
+ templates_path = os.path.join(os.path.dirname(tables_path), "templates")
+ if not os.path.exists(templates_path):
+ print ("Cannot read templates path: %s" % (templates_path))
+ exit(1)
+ for template in os.listdir(templates_path):
template_name = template.split(".", 1)[0]
with open(os.path.join(templates_path, template), "rb") as fh:
TEMPLATES[template_name] = fh.read().replace("\\\n", "")
attributes=self.attributes,
)
+ if self.table_name == "" or self.function == "":
+ print (lightred("Invalid table spec: %s" % (path)))
+ exit(1)
+
# Check for reserved column names
for column in self.columns():
if column.name in RESERVED:
// memory. Currently rocksdb must use disk-based storage.
//"use_in_memory_database": "false",
+ // Comma-delimited list of table names to be disabled.
+ // This allows osquery to be launched without certain tables.
+ //"disable_tables": "foo_bar,time",
+
// Enable debug or verbose debug output when logging.
"debug": "false",
"verbose_debug": "false",
--- /dev/null
+# This function and source blacklist is applied to LLVM's sanitize frameworks.
+# Please restrict entries to known-problems in third-party libraries.
+
+# ASIO 0-lookups
+fun:*get_io_service*
+src:*asio/impl/*
{
+ // Deprecated query schedule
"scheduledQueries": [
{
"name": "time",
"interval": 1
}
],
+
+ // New, recommended query schedule
+ "schedule": {
+ "time2": {"query": "select * from time;", "interval": 1}
+ },
+
+ // Deprecated collection for file monitoring
"additional_monitoring" : {
"file_paths": {
"downloads": [
"/tmp/osquery-fstests-pattern/%%"
- ],
- "system_binaries": [
- "/tmp/osquery-fstests-pattern/%",
- "/tmp/osquery-fstests-pattern/deep11/%"
]
}
+ },
+
+ // New, recommended file monitoring (top-level)
+ "file_paths": {
+ "downloads2": [
+ "/tmp/osquery-fstests-pattern/%%"
+ ],
+ "system_binaries": [
+ "/tmp/osquery-fstests-pattern/%",
+ "/tmp/osquery-fstests-pattern/deep11/%"
+ ]
}
}
255.255.255.255 broadcasthost
::1 localhost
fe80::1%lo0 localhost
+127.0.0.1 example.com example
+127.0.0.1 example.net # This is a comment
--- /dev/null
+#
+# Internet protocols
+#
+# $FreeBSD: src/etc/protocols,v 1.14 2000/09/24 11:20:27 asmodai Exp $
+# from: @(#)protocols 5.1 (Berkeley) 4/17/89
+#
+# See also http://www.isi.edu/in-notes/iana/assignments/protocol-numbers
+#
+ip 0 IP # internet protocol, pseudo protocol number
+#hopopt 0 HOPOPT # hop-by-hop options for ipv6
+icmp 1 ICMP # internet control message protocol
+tcp 6 TCP # transmission control protocol