- run: cd /usr/src/rocksdb && sudo make static_lib && sudo make install
# built-in table dependencies
- run: sudo apt-get install -qq python-pip && sudo pip install Jinja2
- - run: sudo apt-get install -qq libprocps-dev libsystemd-dev libudev-dev
+ - run: sudo apt-get install -qq libprocps-dev libsystemd-dev libudev-dev iptables-dev
# Tizen osquery build & test
- run: cd /home/circleci/project && sudo make
pip install Jinja2
# table dependencies
-RUN apt-get install -qq libprocps-dev libsystemd-dev libudev-dev
+RUN apt-get install -qq libprocps-dev libsystemd-dev libudev-dev iptables-dev
#ADD_DEFINITIONS("-pedantic-errors")
# TODO(sangwan.kwon): Get version from packing spec.
-SET(OSQUERY_BUILD_VERSION "1.4.5")
+SET(OSQUERY_BUILD_VERSION "1.4.7")
# Set various platform/platform-version/build version/etc defines.
ADD_DEFINITIONS(-DOSQUERY_BUILD_VERSION=${OSQUERY_BUILD_VERSION}
+ -DOSQUERY_BUILD_PLATFORM=${OSQUERY_BUILD_VERSION}
-DOSQUERY_BUILD_SDK_VERSION=${OSQUERY_BUILD_VERSION})
INCLUDE_DIRECTORIES("${CMAKE_SOURCE_DIR}")
#include <boost/property_tree/json_parser.hpp>
#include <boost/thread/shared_mutex.hpp>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
#include <osquery/flags.h>
#include <osquery/registry.h>
#include <osquery/status.h>
DECLARE_string(config_plugin);
/**
+ * @brief The osquery config is updated names sources containing JSON.
+ *
+ * A ConfigSourceMap is a named mapping from source (the key) to a JSON blob.
+ * This map is generated by a ConfigPlugin an provided to the Config via an
+ * update call. ConfigPlugin%s may update the Config asynchronously.
+ *
+ * The osquery Config instance will perform source merging by amalgamating
+ * the JSON literal types (lists and maps) for well known top-level keys.
+ * The merging will happen in lexicographical order based on source name.
+ */
+typedef std::map<std::string, std::string> ConfigSourceMap;
+
+/**
* @brief A native representation of osquery configuration data.
*
* When you use osquery::Config::getInstance(), you are getting a singleton
* @param config A map of domain or namespace to config data.
* @return If the config changes were applied.
*/
- static Status update(const std::map<std::string, std::string>& config);
+ static Status update(const ConfigSourceMap& config);
/**
* @brief Calculate the has of the osquery config
static Status getMD5(std::string& hashString);
/**
+ * @brief Adds a new query to the scheduled queries.
+ *
+ */
+ static void addScheduledQuery(const std::string& name,
+ const std::string& query,
+ int interval);
+
+ /**
+ * @brief Checks if a query exists in the query schedule.
+ *
+ */
+ static bool checkScheduledQuery(const std::string& query);
+
+ /**
+ * @brief Checks if the query name exists in the query schedule.
+ *
+ */
+ static bool checkScheduledQueryName(const std::string& query_name);
+
+ /**
* @brief Check to ensure that the config is accessible and properly
* formatted
*
* Since instances of Config should only be created via getInstance(),
* Config's constructor is private
*/
- Config() {}
+ Config() : force_merge_success_(false) {}
~Config(){}
Config(Config const&);
void operator=(Config const&);
static Status genConfig();
/// Merge a retrieved config source JSON into a working ConfigData.
- static void mergeConfig(const std::string& source, ConfigData& conf);
+ static Status mergeConfig(const std::string& source, ConfigData& conf);
public:
/**
/// The reader/writer config data mutex.
boost::shared_mutex mutex_;
+ /// Enforce merge success.
+ bool force_merge_success_;
+
private:
static const pt::ptree& getParsedData(const std::string& parser);
static const ConfigPluginRef getParser(const std::string& parser);
* ConfigPlugin::genConfig should be implemented by a subclasses of
* ConfigPlugin which needs to retrieve config data in a custom way.
*
- * @return a pair such that pair.first is an osquery::Status instance which
- * indicates the success or failure of config retrieval. If pair.first
- * indicates that config retrieval was successful, then the config data
- * should be returned in pair.second.
+ * @param config The output ConfigSourceMap, a map of JSON to source names.
+ * @return A failure status will prevent the source map from merging.
*/
- virtual Status genConfig(std::map<std::string, std::string>& config) = 0;
+ virtual Status genConfig(ConfigSourceMap& config) = 0;
Status call(const PluginRequest& request, PluginResponse& response);
};
/// Helper merged and parsed property tree.
typedef pt::ptree ConfigTree;
+/// Helper for a map of requested keys to their merged and parsed property tree.
+typedef std::map<std::string, ConfigTree> ConfigTreeMap;
+
/**
* @brief A pluggable configuration parser.
*
* @param config A JSON-parsed property tree map.
* @return Failure if the parser should no longer receive updates.
*/
- virtual Status update(const std::map<std::string, ConfigTree>& config) = 0;
+ virtual Status update(const ConfigTreeMap& config) = 0;
protected:
/// Allow the config parser to keep some global state.
*/
extern const std::string kVersion;
extern const std::string kSDKVersion;
+extern const std::string kSDKPlatform;
-/// Use a macro for the version literal, set the kVersion symbol in the library.
-#define OSQUERY_VERSION STR(OSQUERY_BUILD_VERSION)
+/// Use a macro for the sdk/platform literal, symbols available in lib.cpp.
#define OSQUERY_SDK_VERSION STR(OSQUERY_BUILD_SDK_VERSION)
+#define OSQUERY_PLATFORM STR(OSQUERY_BUILD_PLATFORM)
/**
* @brief A helpful tool type to report when logging, print help, or debugging.
#pragma once
-#include <osquery/database/db_handle.h>
-#include <osquery/database/query.h>
-#include <osquery/database/results.h>
+#include <map>
+#include <string>
+#include <vector>
+
+#include <boost/property_tree/ptree.hpp>
+
+#include <osquery/registry.h>
+#include <osquery/status.h>
+
+namespace pt = boost::property_tree;
+
+namespace osquery {
+
+/**
+ * @brief A backing storage domain name, used for key/value based storage.
+ *
+ * There are certain "cached" variables such as a node-unique UUID or negotiated
+ * 'node_key' following enrollment. If a value or setting must persist between
+ * osqueryi or osqueryd runs it should be stored using the kPersistentSetting%s
+ * domain.
+ */
+extern const std::string kPersistentSettings;
+
+/// The "domain" where the results of scheduled queries are stored.
+extern const std::string kQueries;
+
+/// The "domain" where event results are stored, queued for querytime retrieval.
+extern const std::string kEvents;
+
+/**
+ * @brief The "domain" where buffered log results are stored.
+ *
+ * Logger plugins may shuttle logs to a remote endpoint or API call
+ * asynchronously. The backing store can be used to buffer results and status
+ * logs until the logger plugin-specific thread decided to flush.
+ */
+extern const std::string kLogs;
+
+/////////////////////////////////////////////////////////////////////////////
+// Row
+/////////////////////////////////////////////////////////////////////////////
+
+/**
+ * @brief A variant type for the SQLite type affinities.
+ */
+typedef std::string RowData;
+
+/**
+ * @brief A single row from a database query
+ *
+ * Row is a simple map where individual column names are keys, which map to
+ * the Row's respective value
+ */
+typedef std::map<std::string, RowData> Row;
+
+/**
+ * @brief Serialize a Row into a property tree
+ *
+ * @param r the Row to serialize
+ * @param tree the output property tree
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeRow(const Row& r, pt::ptree& tree);
+
+/**
+ * @brief Serialize a Row object into a JSON string
+ *
+ * @param r the Row to serialize
+ * @param json the output JSON string
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeRowJSON(const Row& r, std::string& json);
+
+/**
+ * @brief Deserialize a Row object from a property tree
+ *
+ * @param tree the input property tree
+ * @param r the output Row structure
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status deserializeRow(const pt::ptree& tree, Row& r);
+
+/**
+ * @brief Deserialize a Row object from a JSON string
+ *
+ * @param json the input JSON string
+ * @param r the output Row structure
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status deserializeRowJSON(const std::string& json, Row& r);
+
+/////////////////////////////////////////////////////////////////////////////
+// QueryData
+/////////////////////////////////////////////////////////////////////////////
+
+/**
+ * @brief The result set returned from a osquery SQL query
+ *
+ * QueryData is the canonical way to represent the results of SQL queries in
+ * osquery. It's just a vector of Row's.
+ */
+typedef std::vector<Row> QueryData;
+
+/**
+ * @brief Serialize a QueryData object into a property tree
+ *
+ * @param q the QueryData to serialize
+ * @param tree the output property tree
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeQueryData(const QueryData& q, pt::ptree& tree);
+
+/**
+ * @brief Serialize a QueryData object into a JSON string
+ *
+ * @param q the QueryData to serialize
+ * @param json the output JSON string
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeQueryDataJSON(const QueryData& q, std::string& json);
+
+/// Inverse of serializeQueryData, convert property tree to QueryData.
+Status deserializeQueryData(const pt::ptree& tree, QueryData& qd);
+
+/// Inverse of serializeQueryDataJSON, convert a JSON string to QueryData.
+Status deserializeQueryDataJSON(const std::string& json, QueryData& qd);
+
+/////////////////////////////////////////////////////////////////////////////
+// DiffResults
+/////////////////////////////////////////////////////////////////////////////
+
+/**
+ * @brief Data structure representing the difference between the results of
+ * two queries
+ *
+ * The representation of two diffed QueryData result sets. Given and old and
+ * new QueryData, DiffResults indicates the "added" subset of rows and the
+ * "removed" subset of rows.
+ */
+struct DiffResults {
+ /// vector of added rows
+ QueryData added;
+
+ /// vector of removed rows
+ QueryData removed;
+
+ /// equals operator
+ bool operator==(const DiffResults& comp) const {
+ return (comp.added == added) && (comp.removed == removed);
+ }
+
+ /// not equals operator
+ bool operator!=(const DiffResults& comp) const { return !(*this == comp); }
+};
+
+/**
+ * @brief Serialize a DiffResults object into a property tree
+ *
+ * @param d the DiffResults to serialize
+ * @param tree the output property tree
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeDiffResults(const DiffResults& d, pt::ptree& tree);
+
+/**
+ * @brief Serialize a DiffResults object into a JSON string
+ *
+ * @param d the DiffResults to serialize
+ * @param json the output JSON string
+ *
+ * @return an instance of osquery::Status, indicating the success or failure
+ * of the operation
+ */
+Status serializeDiffResultsJSON(const DiffResults& d, std::string& json);
+
+/**
+ * @brief Diff two QueryData objects and create a DiffResults object
+ *
+ * @param old_ the "old" set of results
+ * @param new_ the "new" set of results
+ *
+ * @return a DiffResults object which indicates the change from old_ to new_
+ *
+ * @see DiffResults
+ */
+DiffResults diff(const QueryData& old_, const QueryData& new_);
+
+/**
+ * @brief Add a Row to a QueryData if the Row hasn't appeared in the QueryData
+ * already
+ *
+ * Note that this function will iterate through the QueryData list until a
+ * given Row is found (or not found). This shouldn't be that significant of an
+ * overhead for most use-cases, but it's worth keeping in mind before you use
+ * this in it's current state.
+ *
+ * @param q the QueryData list to append to
+ * @param r the Row to add to q
+ *
+ * @return true if the Row was added to the QueryData, false if it was not
+ */
+bool addUniqueRowToQueryData(QueryData& q, const Row& r);
+
+/**
+ * @brief Construct a new QueryData from an existing one, replacing all
+ * non-ASCII characters with their \u encoding.
+ *
+ * This function is intended as a workaround for
+ * https://svn.boost.org/trac/boost/ticket/8883,
+ * and will allow rows containing data with non-ASCII characters to be stored in
+ * the database and parsed back into a property tree.
+ *
+ * @param oldData the old QueryData to copy
+ * @param newData the new escaped QueryData object
+ */
+void escapeQueryData(const QueryData& oldData, QueryData& newData);
+
+/**
+ * @brief represents the relevant parameters of a scheduled query.
+ *
+ * Within the context of osqueryd, a scheduled query may have many relevant
+ * attributes. Those attributes are represented in this data structure.
+ */
+struct ScheduledQuery {
+ /// The SQL query.
+ std::string query;
+
+ /// How often the query should be executed, in second.
+ size_t interval;
+
+ /// A temporary splayed internal.
+ size_t splayed_interval;
+
+ /// Number of executions.
+ size_t executions;
+
+ /// Total wall time taken
+ unsigned long long int wall_time;
+
+ /// Total user time (cycles)
+ unsigned long long int user_time;
+
+ /// Total system time (cycles)
+ unsigned long long int system_time;
+
+ /// Average memory differentials. This should be near 0.
+ unsigned long long int memory;
+
+ /// Total characters, bytes, generated by query.
+ unsigned long long int output_size;
+
+ /// Set of query options.
+ std::map<std::string, bool> options;
+
+ ScheduledQuery()
+ : interval(0),
+ splayed_interval(0),
+ executions(0),
+ wall_time(0),
+ user_time(0),
+ system_time(0),
+ memory(0),
+ output_size(0) {}
+
+ /// equals operator
+ bool operator==(const ScheduledQuery& comp) const {
+ return (comp.query == query) && (comp.interval == interval);
+ }
+
+ /// not equals operator
+ bool operator!=(const ScheduledQuery& comp) const { return !(*this == comp); }
+};
+
+/////////////////////////////////////////////////////////////////////////////
+// QueryLogItem
+/////////////////////////////////////////////////////////////////////////////
+
+/**
+ * @brief Query results from a schedule, snapshot, or ad-hoc execution.
+ *
+ * When a scheduled query yields new results, we need to log that information
+ * to our upstream logging receiver. A QueryLogItem contains metadata and
+ * results in potentially-differential form for a logger.
+ */
+struct QueryLogItem {
+ /// Differential results from the query.
+ DiffResults results;
+
+ /// Optional snapshot results, no differential applied.
+ QueryData snapshot_results;
+
+ /// The name of the scheduled query.
+ std::string name;
+
+ /// The identifier (hostname, or uuid) of the host.
+ std::string identifier;
+
+ /// The time that the query was executed, seconds as UNIX time.
+ int time;
+
+ /// The time that the query was executed, an ASCII string.
+ std::string calendar_time;
+
+ /// equals operator
+ bool operator==(const QueryLogItem& comp) const {
+ return (comp.results == results) && (comp.name == name);
+ }
+
+ /// not equals operator
+ bool operator!=(const QueryLogItem& comp) const { return !(*this == comp); }
+};
+
+/**
+ * @brief Serialize a QueryLogItem object into a property tree
+ *
+ * @param item the QueryLogItem to serialize
+ * @param tree the output property tree
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeQueryLogItem(const QueryLogItem& item, pt::ptree& tree);
+
+/**
+ * @brief Serialize a QueryLogItem object into a JSON string
+ *
+ * @param item the QueryLogItem to serialize
+ * @param json the output JSON string
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeQueryLogItemJSON(const QueryLogItem& item, std::string& json);
+
+/// Inverse of serializeQueryLogItem, convert property tree to QueryLogItem.
+Status deserializeQueryLogItem(const pt::ptree& tree, QueryLogItem& item);
+
+/// Inverse of serializeQueryLogItem, convert a JSON string to QueryLogItem.
+Status deserializeQueryLogItemJSON(const std::string& json, QueryLogItem& item);
+
+/**
+ * @brief Serialize a QueryLogItem object into a property tree
+ * of events, a list of actions.
+ *
+ * @param item the QueryLogItem to serialize
+ * @param tree the output property tree
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeQueryLogItemAsEvents(const QueryLogItem& item, pt::ptree& tree);
+
+/**
+ * @brief Serialize a QueryLogItem object into a JSON string of events,
+ * a list of actions.
+ *
+ * @param i the QueryLogItem to serialize
+ * @param json the output JSON string
+ *
+ * @return Status indicating the success or failure of the operation
+ */
+Status serializeQueryLogItemAsEventsJSON(const QueryLogItem& i,
+ std::string& json);
+
+/**
+ * @brief An osquery backing storage (database) type that persists executions.
+ *
+ * The osquery tools need a high-performance storage and indexing mechanism for
+ * storing intermediate results from EventPublisher%s, persisting one-time
+ * generated values, and performing non-memory backed differentials.
+ *
+ * Practically, osquery is built around RocksDB's performance guarantees and
+ * all of the internal APIs expect RocksDB's indexing and read performance.
+ * However, access to this representation of a backing-store is still abstracted
+ * to removing RocksDB as a dependency for the osquery SDK.
+ */
+class DatabasePlugin : public Plugin {
+ protected:
+ /**
+ * @brief Perform a domain and key lookup from the backing store.
+ *
+ * Database value access indexing is abstracted into domains and keys.
+ * Both are string values but exist separately for simple indexing without
+ * API-enforcing tokenization. In some cases we do add a component-specific
+ * tokeninzation to keys.
+ *
+ * @param domain A string value representing abstract storage indexing.
+ * @param key A string value representing the lookup/retrieval key.
+ * @param value The output parameter, left empty if the key does not exist.
+ * @return Failure if the data could not be accessed. It is up to the plugin
+ * to determine if a missing key means a non-success status.
+ */
+ virtual Status get(const std::string& domain,
+ const std::string& key,
+ std::string& value) const = 0;
+
+ /**
+ * @brief Store a string-represented value using a domain and key index.
+ *
+ * See DatabasePlugin::get for discussion around domain and key use.
+ *
+ * @param domain A string value representing abstract storage indexing.
+ * @param key A string value representing the lookup/retrieval key.
+ * @param value A string value representing the data.
+ * @return Failure if the data could not be stored. It is up to the plugin
+ * to determine if a conflict/overwrite should return different status text.
+ */
+ virtual Status put(const std::string& domain,
+ const std::string& key,
+ const std::string& value) = 0;
+
+ /// Data removal method.
+ virtual Status remove(const std::string& domain, const std::string& k) = 0;
+
+ /// Key/index lookup method.
+ virtual Status scan(const std::string& domain,
+ std::vector<std::string>& results) const {
+ return Status(0, "Not used");
+ }
+
+ public:
+ Status call(const PluginRequest& request, PluginResponse& response);
+};
+
+/**
+ * @brief Lookup a value from the active osquery DatabasePlugin storage.
+ *
+ * See DatabasePlugin::get for discussion around domain and key use.
+ * Extensions, components, plugins, and core code should use getDatabaseValue
+ * as a wrapper around the current tool's choice of a backing storage plugin.
+ *
+ * @param domain A string value representing abstract storage indexing.
+ * @param key A string value representing the lookup/retrieval key.
+ * @param value The output parameter, left empty if the key does not exist.
+ * @return Storage operation status.
+ */
+Status getDatabaseValue(const std::string& domain,
+ const std::string& key,
+ std::string& value);
+
+/**
+ * @brief Set or put a value into the active osquery DatabasePlugin storage.
+ *
+ * See DatabasePlugin::get for discussion around domain and key use.
+ * Extensions, components, plugins, and core code should use setDatabaseValue
+ * as a wrapper around the current tool's choice of a backing storage plugin.
+ *
+ * @param domain A string value representing abstract storage indexing.
+ * @param key A string value representing the lookup/retrieval key.
+ * @param value A string value representing the data.
+ * @return Storage operation status.
+ */
+Status setDatabaseValue(const std::string& domain,
+ const std::string& key,
+ const std::string& value);
+
+/// Remove a domain/key identified value from backing-store.
+Status deleteDatabaseValue(const std::string& domain, const std::string& key);
+
+/// Get a list of keys for a given domain.
+Status scanDatabaseKeys(const std::string& domain,
+ std::vector<std::string>& keys);
+
+/// Generate a specific-use registry for database access abstraction.
+CREATE_REGISTRY(DatabasePlugin, "database");
+}
*
* @return Status indicating the success or failure of the operation
*/
-Status serializeRow(const Row& r, boost::property_tree::ptree& tree);
+Status serializeRow(const Row& r, pt::ptree& tree);
/**
* @brief Serialize a Row object into a JSON string
*
* @return Status indicating the success or failure of the operation
*/
-Status deserializeRow(const boost::property_tree::ptree& tree, Row& r);
+Status deserializeRow(const pt::ptree& tree, Row& r);
/**
* @brief Deserialize a Row object from a JSON string
*
* @return The query-time table data, retrieved from a backing store.
*/
- virtual QueryData genTable(tables::QueryContext& context)
- __attribute__((used)) {
+ virtual QueryData genTable(QueryContext& context) __attribute__((used)) {
return get(0, 0);
}
DECLARE_string(extensions_socket);
DECLARE_string(extensions_autoload);
DECLARE_string(extensions_timeout);
+DECLARE_bool(disable_extensions);
/// A millisecond internal applied to extension initialization.
extern const int kExtensionInitializeMLatency;
Status queryExternal(const std::string& query, QueryData& results);
/// External (extensions) SQL implementation of the osquery getQueryColumns API.
-Status getQueryColumnsExternal(const std::string& q,
- tables::TableColumns& columns);
+Status getQueryColumnsExternal(const std::string& q, TableColumns& columns);
/// External (extensions) SQL implementation plugin provider for "sql" registry.
class ExternalSQLPlugin : SQLPlugin {
return queryExternal(q, results);
}
- Status getQueryColumns(const std::string& q,
- tables::TableColumns& columns) const {
+ Status getQueryColumns(const std::string& q, TableColumns& columns) const {
return getQueryColumnsExternal(q, columns);
}
};
/**
* @brief Parse a property list on disk into a property tree.
*
- * @param path the path of the propery list which you'd like to read
- * @param tree a non-const reference to a Boost property tree, which will be
- * populated with the results of the property list
+ * @param path the input path to a property list
+ * @param tree the output reference to a Boost property tree
*
* @return an instance of Status, indicating the success or failure
* of the operation.
/**
* @brief Parse property list content into a property tree.
*
- * @param content a string reference to the content of a plist
- * @param tree a non-const reference to a Boost property tree, which will be
- * populated with the results of the property list
+ * @param content the input string-content of a property list
+ * @param tree the output reference to a Boost property tree
*
* @return an instance of Status, indicating the success or failure
* of the operation.
#include <glog/logging.h>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
#include <osquery/flags.h>
#include <osquery/registry.h>
/// The call-in prototype for Registry modules.
typedef void (*ModuleInitalizer)(void);
+template <class PluginItem>
+class PluginFactory {};
+
class Plugin : private boost::noncopyable {
public:
Plugin() : name_("unnamed") {}
#include <string>
#include <vector>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
#include <osquery/flags.h>
#include <osquery/tables.h>
*/
static QueryData selectAllFrom(const std::string& table,
const std::string& column,
- tables::ConstraintOperator op,
+ ConstraintOperator op,
const std::string& expr);
protected:
/// Use the SQL implementation to parse a query string and return details
/// (name, type) about the columns.
virtual Status getQueryColumns(const std::string& q,
- tables::TableColumns& columns) const = 0;
+ TableColumns& columns) const = 0;
/**
* @brief Attach a table at runtime.
*
* @return status indicating success or failure of the operation
*/
-Status getQueryColumns(const std::string& q, tables::TableColumns& columns);
+Status getQueryColumns(const std::string& q, TableColumns& columns);
/*
* @brief A mocked subclass of SQL useful for testing
#include <osquery/registry.h>
#include <osquery/core.h>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
#include <osquery/status.h>
/// Allow Tables to use "tracked" deprecated OS APIs.
} while (0)
namespace osquery {
-namespace tables {
/**
* @brief The SQLite type affinities are available as macros
#define BIGINT(x) boost::lexical_cast<std::string>(x)
/// See the affinity type documentation for TEXT.
#define UNSIGNED_BIGINT(x) boost::lexical_cast<std::string>(x)
+/// See the affinity type documentation for TEXT.
+#define DOUBLE(x) boost::lexical_cast<std::string>(x)
/**
* @brief The SQLite type affinities as represented as implementation literals.
#define BIGINT_LITERAL long long int
/// See the literal type documentation for TEXT_LITERAL.
#define UNSIGNED_BIGINT_LITERAL unsigned long long int
+/// See the literal type documentation for TEXT_LITERAL.
+#define DOUBLE_LITERAL double
/// Cast an SQLite affinity type to the literal type.
#define AS_LITERAL(literal, value) boost::lexical_cast<literal>(value)
* If the query contains a join or where clause with a constraint operator and
* expression the table generator may limit the data appropriately.
*/
-enum ConstraintOperator {
+enum ConstraintOperator : unsigned char {
EQUALS = 2,
GREATER_THAN = 4,
LESS_THAN_OR_EQUALS = 8,
GREATER_THAN_OR_EQUALS = 32
};
+/// Type for flags for what constraint operators are admissible.
+typedef unsigned char ConstraintOperatorFlag;
+/// Flag for any operator type.
+#define ANY_OP 0xFFU
+
/**
* @brief A Constraint is an operator and expression.
*
}
/**
- * @brief Check and return if there are any constraints on this column.
+ * @brief Check and return if there are constraints on this column.
*
* A ConstraintList is used in a ConstraintMap with a column name as the
* map index. Tables that act on optional constraints should check if any
- * constraint was provided.
+ * constraint was provided. The ops parameter serves to specify which
+ * operators we want to check existence for.
*
+ * @param ops (Optional: default ANY_OP) The operators types to look for.
* @return true if any constraint exists.
*/
- bool exists() const { return (constraints_.size() > 0); }
+ bool exists(const ConstraintOperatorFlag ops = ANY_OP) const {
+ if (ops == ANY_OP) {
+ return (constraints_.size() > 0);
+ } else {
+ for (const struct Constraint &c : constraints_) {
+ if (c.op & ops) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
/**
* @brief Check if a constraint exist AND matches the type expression.
CREATE_LAZY_REGISTRY(TablePlugin, "table");
}
-}
readline
# build-in tables deps
systemd
- udev)
+ udev
+ ip4tc)
SET(${TARGET_OSQUERY_LIB}_SRCS "")
SET(${TARGET_OSQUERY_LIB_ADDITIONAL}_SRCS "")
SET(${TARGET_OSQUERY_TEST}_SRCS "")
SET(OSQUERY_CODEGEN_PATH "${CMAKE_SOURCE_DIR}/tools/codegen")
-SET(OSQUERY_TABLES_PATH "${CMAKE_SOURCE_DIR}/osquery/tables")
+SET(OSQUERY_TABLES_PATH "${CMAKE_SOURCE_DIR}")
SET(OSQUERY_GENERATED_PATH "${CMAKE_BINARY_DIR}/generated")
ADD_DEFINITIONS("-DOSQUERY_BUILD_VERSION=${OSQUERY_BUILD_VERSION}")
ADD_SUBDIRECTORY(property)
## Table generation #############################################################
-FILE(GLOB TABLE_FILES "tables/specs/*.table")
-FILE(GLOB TABLE_FILES_LINUX "tables/specs/linux/*.table")
-FILE(GLOB TABLE_FILES_UTILITY "tables/specs/utility/*.table")
+FILE(GLOB TABLE_FILES "${CMAKE_SOURCE_DIR}/specs/*.table")
+FILE(GLOB TABLE_FILES_LINUX "${CMAKE_SOURCE_DIR}/specs/linux/*.table")
+FILE(GLOB TABLE_FILES_UTILITY "${CMAKE_SOURCE_DIR}/specs/utility/*.table")
LIST(APPEND TABLE_FILES ${TABLE_FILES_LINUX})
LIST(APPEND TABLE_FILES ${TABLE_FILES_UTILITY})
SET(GENERATED_TABLES "")
-FILE(GLOB TABLE_FILES_TEMPLATES "tables/templates/*.in")
+FILE(GLOB TABLE_FILES_TEMPLATES "${CMAKE_SOURCE_DIR}/tools/codegen/templates/*.in")
SET(GENERATION_DEPENDENCIES "${OSQUERY_CODEGEN_PATH}/gentable.py"
"${OSQUERY_CODEGEN_PATH}/amalgamate.py"
"${OSQUERY_TABLES_PATH}/specs/blacklist")
ADD_CUSTOM_COMMAND(
OUTPUT ${AMALGAMATION_FILE_GEN}
COMMAND
- python "${OSQUERY_CODEGEN_PATH}/amalgamate.py" "${OSQUERY_TABLES_PATH}" "${OSQUERY_GENERATED_PATH}"
+ python "${OSQUERY_CODEGEN_PATH}/amalgamate.py" "${OSQUERY_CODEGEN_PATH}" "${OSQUERY_GENERATED_PATH}"
DEPENDS
${GENERATED_TABLES}
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}")
## Library generation ###########################################################
# TODO(sangwan.kwon): Change amalgation files to additional
-ADD_LIBRARY(osquery_generated_tables OBJECT "${AMALGAMATION_FILE_GEN}")
-ADD_LIBRARY(osquery_static STATIC $<TARGET_OBJECTS:osquery_generated_tables>
- $<TARGET_OBJECTS:osquery_sqlite>
- ${${TARGET_OSQUERY_LIB}_SRCS}
- ${${TARGET_OSQUERY_LIB_ADDITIONAL}_SRCS})
-SET_TARGET_PROPERTIES(osquery_static PROPERTIES OUTPUT_NAME osquery_static)
-
# static_lib should include every object file in the archive in the link
-ADD_LIBRARY(${TARGET_OSQUERY_LIB} STATIC main/lib.cpp)
-TARGET_OSQUERY_LINK_WHOLE(${TARGET_OSQUERY_LIB} osquery_static)
+# ref: TARGET_OSQUERY_LINK_WHOLE
+ADD_LIBRARY(osquery_generated_tables OBJECT "${AMALGAMATION_FILE_GEN}")
+ADD_LIBRARY(${TARGET_OSQUERY_LIB}
+ STATIC main/lib.cpp
+ $<TARGET_OBJECTS:osquery_generated_tables>
+ $<TARGET_OBJECTS:osquery_sqlite>
+ ${${TARGET_OSQUERY_LIB}_SRCS}
+ ${${TARGET_OSQUERY_LIB_ADDITIONAL}_SRCS})
TARGET_LINK_LIBRARIES(${TARGET_OSQUERY_LIB} ${${TARGET_OSQUERY_LIB}_DEP})
+SET_TARGET_PROPERTIES(${TARGET_OSQUERY_LIB} PROPERTIES OUTPUT_NAME ${TARGET_OSQUERY_LIB})
#INSTALL(TARGETS ${TARGET_OSQUERY_LIB}
# DESTINATION ${CMAKE_INSTALL_LIBDIR})
## osqueryi generation ##########################################################
ADD_EXECUTABLE(${TARGET_OSQUERY_SHELL} devtools/shell.cpp main/shell.cpp)
-TARGET_LINK_LIBRARIES(${TARGET_OSQUERY_SHELL} ${TARGET_OSQUERY_LIB})
+TARGET_OSQUERY_LINK_WHOLE(${TARGET_OSQUERY_SHELL} ${TARGET_OSQUERY_LIB})
INSTALL(TARGETS ${TARGET_OSQUERY_SHELL}
DESTINATION ${CMAKE_INSTALL_BINDIR}
PERMISSIONS OWNER_READ
## osqueryd generation ##########################################################
ADD_EXECUTABLE(${TARGET_OSQUERY_DAEMON} main/daemon.cpp)
-TARGET_LINK_LIBRARIES(${TARGET_OSQUERY_DAEMON} ${TARGET_OSQUERY_LIB})
+TARGET_OSQUERY_LINK_WHOLE(${TARGET_OSQUERY_DAEMON} ${TARGET_OSQUERY_LIB})
INSTALL(TARGETS ${TARGET_OSQUERY_DAEMON}
DESTINATION ${CMAKE_INSTALL_BINDIR}
PERMISSIONS OWNER_READ
## osquery-test generation ##########################################################
ADD_EXECUTABLE(${TARGET_OSQUERY_TEST} ${${TARGET_OSQUERY_TEST}_SRCS} main/tests.cpp)
-
-TARGET_LINK_LIBRARIES(${TARGET_OSQUERY_TEST} ${TARGET_OSQUERY_LIB})
+TARGET_OSQUERY_LINK_WHOLE(${TARGET_OSQUERY_TEST} ${TARGET_OSQUERY_LIB})
TARGET_LINK_LIBRARIES(${TARGET_OSQUERY_TEST} gtest)
SET_TARGET_PROPERTIES(${TARGET_OSQUERY_TEST}
- PROPERTIES COMPILE_FLAGS "-DGTEST_HAS_TR1_TUPLE=0")
+ PROPERTIES COMPILE_FLAGS "-DGTEST_HAS_TR1_TUPLE=0")
ADD_TEST(${TARGET_OSQUERY_TEST} ${TARGET_OSQUERY_TEST})
INSTALL(TARGETS ${TARGET_OSQUERY_TEST}
DESTINATION ${CMAKE_INSTALL_BINDIR}
## example extension with the SDK ##############################################
ADD_EXECUTABLE(example_extension examples/example_extension.cpp)
-TARGET_LINK_LIBRARIES(example_extension ${TARGET_OSQUERY_LIB})
+TARGET_OSQUERY_LINK_WHOLE(example_extension ${TARGET_OSQUERY_LIB})
SET_TARGET_PROPERTIES(example_extension PROPERTIES OUTPUT_NAME example_extension.ext)
# Build the example extension module with the SDK
ADD_OSQUERY_LIBRARY(TRUE osquery_config config.cpp)
ADD_OSQUERY_LIBRARY(FALSE osquery_config_plugins update.cpp
- plugins/filesystem.cpp)
+ plugins/filesystem.cpp
+ parsers/query_packs.cpp)
FILE(GLOB OSQUERY_CONFIG_TESTS "tests/*.cpp")
ADD_OSQUERY_TEST(TRUE ${OSQUERY_CONFIG_TESTS})
*
*/
+#include <chrono>
#include <mutex>
#include <random>
#include <sstream>
#include <osquery/filesystem.h>
#include <osquery/logger.h>
#include <osquery/registry.h>
+#include <osquery/tables.h>
namespace pt = boost::property_tree;
+namespace osquery {
+
typedef pt::ptree::value_type tree_node;
typedef std::map<std::string, std::vector<std::string> > EventFileMap_t;
-
-namespace osquery {
+typedef std::chrono::high_resolution_clock chrono_clock;
CLI_FLAG(string, config_plugin, "filesystem", "Config plugin name");
// Now merge all sources together.
for (const auto& source : getInstance().raw_) {
- mergeConfig(source.second, conf);
+ auto status = mergeConfig(source.second, conf);
+ if (getInstance().force_merge_success_ && !status.ok()) {
+ return Status(1, status.what());
+ }
}
// Call each parser with the optionally-empty, requested, top level keys.
+ getInstance().data_ = conf;
for (const auto& plugin : Registry::all("config_parser")) {
auto parser = std::static_pointer_cast<ConfigParserPlugin>(plugin.second);
if (parser == nullptr || parser.get() == nullptr) {
parser->update(parser_config);
}
- getInstance().data_ = conf;
return Status(0, "OK");
}
conf.all_data.add_child("options." + key, option.second);
}
-inline void mergeScheduledQuery(const std::string& name,
- const tree_node& node,
- ConfigData& conf) {
+inline void additionalScheduledQuery(const std::string& name,
+ const tree_node& node,
+ ConfigData& conf) {
// Read tree/JSON into a query structure.
ScheduledQuery query;
query.query = node.second.get<std::string>("query", "");
query.interval = node.second.get<int>("interval", 0);
+ if (query.interval == 0) {
+ VLOG(1) << "Setting invalid interval=0 to 84600 for query: " << name;
+ query.interval = 86400;
+ }
+
// This is a candidate for a catch-all iterator with a catch for boolean type.
query.options["snapshot"] = node.second.get<bool>("snapshot", false);
+ query.options["removed"] = node.second.get<bool>("removed", true);
// Check if this query exists, if so, check if it was changed.
if (conf.schedule.count(name) > 0) {
splayValue(query.interval, FLAGS_schedule_splay_percent);
// Update the schedule map and replace the all_data node record.
conf.schedule[name] = query;
+}
+
+inline void mergeScheduledQuery(const std::string& name,
+ const tree_node& node,
+ ConfigData& conf) {
+ // Add the new query to the configuration.
+ additionalScheduledQuery(name, node, conf);
+ // Replace the all_data node record.
if (conf.all_data.count("schedule") > 0) {
conf.all_data.get_child("schedule").erase(name);
}
if (node.second.count("") == 0 && conf.all_data.count(name) > 0) {
conf.all_data.get_child(name).erase(subitem.first);
}
- conf.all_data.add_child(name + "." + subitem.first, subitem.second);
+
+ if (subitem.first.size() == 0) {
+ if (conf.all_data.count(name) == 0) {
+ conf.all_data.add_child(name, subitem.second);
+ }
+ conf.all_data.get_child(name).push_back(subitem);
+ } else {
+ conf.all_data.add_child(name + "." + subitem.first, subitem.second);
+ }
}
}
conf.all_data.add_child(name + "." + node.first, node.second);
}
-void Config::mergeConfig(const std::string& source, ConfigData& conf) {
+Status Config::mergeConfig(const std::string& source, ConfigData& conf) {
pt::ptree tree;
try {
std::stringstream json_data;
json_data << source;
pt::read_json(json_data, tree);
} catch (const pt::json_parser::json_parser_error& e) {
- VLOG(1) << "Error parsing config JSON: " << e.what();
- return;
+ LOG(WARNING) << "Error parsing config JSON: " << e.what();
+ return Status(1, e.what());
}
if (tree.count("additional_monitoring") > 0) {
mergeExtraKey(key, item, conf);
}
}
+
+ return Status(0, "OK");
}
const pt::ptree& Config::getParsedData(const std::string& key) {
return Status(0, "OK");
}
-Status Config::checkConfig() { return load(); }
+void Config::addScheduledQuery(const std::string& name,
+ const std::string& query,
+ const int interval) {
+ // Create structure to add to the schedule.
+ tree_node node;
+ node.second.put("query", query);
+ node.second.put("interval", interval);
+
+ // Call to the inline function.
+ additionalScheduledQuery(name, node, getInstance().data_);
+}
+
+Status Config::checkConfig() {
+ getInstance().force_merge_success_ = true;
+ return load();
+}
+
+bool Config::checkScheduledQuery(const std::string& query) {
+ for (const auto& scheduled_query : getInstance().data_.schedule) {
+ if (scheduled_query.second.query == query) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool Config::checkScheduledQueryName(const std::string& query_name) {
+ return (getInstance().data_.schedule.count(query_name) == 0) ? false : true;
+}
void Config::recordQueryPerformance(const std::string& name,
size_t delay,
// Grab access to the non-const schedule item.
auto& query = getInstance().data_.schedule.at(name);
- auto diff = strtol(r1.at("user_time").c_str(), nullptr, 10) -
- strtol(r0.at("user_time").c_str(), nullptr, 10);
- query.user_time += diff;
- diff = strtol(r1.at("system_time").c_str(), nullptr, 10) -
- strtol(r0.at("system_time").c_str(), nullptr, 10);
- query.system_time += diff;
- diff = strtol(r1.at("resident_size").c_str(), nullptr, 10) -
- strtol(r0.at("resident_size").c_str(), nullptr, 10);
- // Memory is stored as an average of BSS changes between query executions.
- query.memory =
- (query.memory * query.executions + diff) / (query.executions + 1);
+ auto diff = AS_LITERAL(BIGINT_LITERAL, r1.at("user_time")) -
+ AS_LITERAL(BIGINT_LITERAL, r0.at("user_time"));
+ if (diff > 0) {
+ query.user_time += diff;
+ }
+
+ diff = AS_LITERAL(BIGINT_LITERAL, r1.at("system_time")) -
+ AS_LITERAL(BIGINT_LITERAL, r0.at("system_time"));
+ if (diff > 0) {
+ query.system_time += diff;
+ }
+
+ diff = AS_LITERAL(BIGINT_LITERAL, r1.at("resident_size")) -
+ AS_LITERAL(BIGINT_LITERAL, r0.at("resident_size"));
+ if (diff > 0) {
+ // Memory is stored as an average of RSS changes between query executions.
+ query.memory = (query.memory * query.executions) + diff;
+ query.memory = (query.memory / (query.executions + 1));
+ }
+
query.wall_time += delay;
query.output_size += size;
query.executions += 1;
}
std::default_random_engine generator;
+ generator.seed(chrono_clock::now().time_since_epoch().count());
std::uniform_int_distribution<int> distribution(min_value, max_value);
return distribution(generator);
}
--- /dev/null
+/*
+ * Copyright (c) 2015, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <map>
+#include <string>
+
+#include <osquery/config.h>
+#include <osquery/core.h>
+#include <osquery/filesystem.h>
+#include <osquery/logger.h>
+
+namespace pt = boost::property_tree;
+
+namespace osquery {
+
+/**
+ * @brief A simple ConfigParserPlugin for a "packs" dictionary key.
+ *
+ */
+class QueryPackConfigParserPlugin : public ConfigParserPlugin {
+ public:
+ /// Request "packs" top level key.
+ std::vector<std::string> keys() { return {"packs"}; }
+
+ private:
+ /// Store the signatures and file_paths and compile the rules.
+ Status update(const ConfigTreeMap& config);
+};
+
+// Function to check if the pack is valid for this version of osquery.
+// If the osquery version is greater or equal than the pack, it is good to go.
+bool versionChecker(const std::string& pack, const std::string& version) {
+ auto required_version = split(pack, ".");
+ auto build_version = split(version, ".");
+
+ size_t index = 0;
+ for (const auto& chunk : build_version) {
+ if (required_version.size() <= index) {
+ return true;
+ }
+ try {
+ if (std::stoi(chunk) < std::stoi(required_version[index])) {
+ return false;
+ }
+ } catch (const std::invalid_argument& e) {
+ if (chunk.compare(required_version[index]) < 0) {
+ return false;
+ }
+ }
+ index++;
+ }
+ return true;
+}
+
+// Perform a string string search for the actual platform within the required.
+bool platformChecker(const std::string& required, const std::string& platform) {
+ // Match if platform is 'ubuntu12' and required is 'ubuntu'.
+ // Do not match if platform is 'ubuntu12' and required is 'ubuntu14'.
+#ifdef __linux__
+ if (required.find("linux") != std::string::npos) {
+ return true;
+ }
+#endif
+ if (required.find("any") != std::string::npos ||
+ required.find("all") != std::string::npos) {
+ return true;
+ }
+ return (required.find(platform) != std::string::npos);
+}
+
+Status parsePack(const std::string& name, const pt::ptree& data) {
+ if (data.count("queries") == 0) {
+ return Status(0, "Pack contains no queries");
+ }
+
+ // Check the pack-global minimum SDK version and platform.
+ auto version = data.get("version", "");
+ if (version.size() > 0 && !versionChecker(version, kSDKVersion)) {
+ return Status(0, "Minimum SDK version not met");
+ }
+
+ auto platform = data.get("platform", "");
+ if (platform.size() > 0 && !platformChecker(platform, kSDKPlatform)) {
+ return Status(0, "Platform version mismatch");
+ }
+
+ // For each query in the pack's queries, check their version/platform.
+ for (const auto& query : data.get_child("queries")) {
+ auto query_string = query.second.get("query", "");
+ if (Config::checkScheduledQuery(query_string)) {
+ VLOG(1) << "Query pack " << name
+ << " contains a duplicated query: " << query.first;
+ continue;
+ }
+
+ // Check the specific query's required version.
+ version = query.second.get("version", "");
+ if (version.size() > 0 && !versionChecker(version, kSDKVersion)) {
+ continue;
+ }
+
+ // Check the specific query's required platform.
+ platform = query.second.get("platform", "");
+ if (platform.size() > 0 && !platformChecker(platform, kSDKPlatform)) {
+ continue;
+ }
+
+ // Hope there is a supplied/non-0 query interval to apply this query pack
+ // query to the osquery schedule.
+ auto query_interval = query.second.get("interval", 0);
+ if (query_interval > 0) {
+ auto query_name = "pack_" + name + "_" + query.first;
+ Config::addScheduledQuery(query_name, query_string, query_interval);
+ }
+ }
+
+ return Status(0, "OK");
+}
+
+Status QueryPackConfigParserPlugin::update(const ConfigTreeMap& config) {
+ // Iterate through all the packs to get the configuration.
+ for (auto const& pack : config.at("packs")) {
+ auto pack_name = std::string(pack.first.data());
+ auto pack_path = std::string(pack.second.data());
+
+ // Read each pack configuration in JSON
+ pt::ptree pack_data;
+ auto status = osquery::parseJSON(pack_path, pack_data);
+ if (!status.ok()) {
+ LOG(WARNING) << "Error parsing Query Pack " << pack_name << ": "
+ << status.getMessage();
+ continue;
+ }
+
+ // Parse the pack, meaning compare version/platform requirements and
+ // check the sanity of each query in the pack's queries.
+ status = parsePack(pack_name, pack_data);
+ if (!status.ok()) {
+ return status;
+ }
+
+ // Save the queries list for table-based introspection.
+ data_.put_child(pack_name, pack_data);
+ // Record the pack path.
+ data_.put(pack_name + ".path", pack_path);
+ }
+
+ return Status(0, "OK");
+}
+
+/// Call the simple Query Packs ConfigParserPlugin "packs".
+REGISTER_INTERNAL(QueryPackConfigParserPlugin, "config_parser", "packs");
+}
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <gtest/gtest.h>
+
+#include <osquery/logger.h>
+
+#include "osquery/core/test_util.h"
+
+namespace pt = boost::property_tree;
+
+namespace osquery {
+
+// Test the pack version checker.
+bool versionChecker(const std::string& pack, const std::string& version);
+// Test the pack platform checker.
+bool platformChecker(const std::string& required, const std::string& platform);
+
+pt::ptree getQueryPacksContent() {
+ pt::ptree pack_tree;
+ auto pack_path = kTestDataPath + "test_pack.conf";
+ auto status = osquery::parseJSON(pack_path, pack_tree);
+ return pack_tree.get_child("queries");
+}
+
+std::map<std::string, pt::ptree> getQueryPacksExpectedResults() {
+ std::map<std::string, pt::ptree> result;
+ pt::ptree aux_data;
+
+ std::string query = "select * from launchd";
+ aux_data.put("query", query);
+ int interval = 414141;
+ aux_data.put("interval", interval);
+ std::string platform = "whatever";
+ aux_data.put("platform", platform);
+ std::string version = "1.0.0";
+ aux_data.put("version", version);
+ std::string description = "Very descriptive description";
+ aux_data.put("description", description);
+ std::string value = "Value overflow";
+ aux_data.put("value", value);
+
+ result.insert(std::pair<std::string, pt::ptree>("launchd", aux_data));
+
+ return result;
+}
+
+class QueryPacksConfigTests : public testing::Test {};
+
+TEST_F(QueryPacksConfigTests, version_comparisons) {
+ EXPECT_TRUE(versionChecker("1.0.0", "1.0.0"));
+ EXPECT_TRUE(versionChecker("1.0.0", "1.2.0"));
+ EXPECT_TRUE(versionChecker("1.0", "1.2.0"));
+ EXPECT_TRUE(versionChecker("1.0", "1.0.2"));
+ EXPECT_TRUE(versionChecker("1.0.0", "1.0.2-r1"));
+ EXPECT_FALSE(versionChecker("1.2", "1.0.2"));
+ EXPECT_TRUE(versionChecker("1.0.0-r1", "1.0.0"));
+}
+
+TEST_F(QueryPacksConfigTests, platform_comparisons) {
+#ifdef __linux__
+ // If the platform is linux and the required platform is linux, match
+ EXPECT_TRUE(platformChecker("linux", "ubuntu"));
+ EXPECT_TRUE(platformChecker("linux", "who_knows_what"));
+#endif
+ EXPECT_TRUE(platformChecker("linux,darwin", "darwin"));
+ EXPECT_TRUE(platformChecker("darwin", "darwin"));
+ EXPECT_FALSE(platformChecker("darwin", "linux"));
+
+ EXPECT_TRUE(platformChecker(" darwin", "darwin"));
+ // There are no logical operators, just matching.
+ EXPECT_TRUE(platformChecker("!darwin", "darwin"));
+
+ EXPECT_TRUE(platformChecker("all", "darwin"));
+ EXPECT_TRUE(platformChecker("any", "darwin"));
+}
+
+TEST_F(QueryPacksConfigTests, test_query_packs_configuration) {
+ auto data = getQueryPacksContent();
+ auto expected = getQueryPacksExpectedResults();
+ auto& real_ld = data.get_child("launchd");
+ auto& expect_ld = expected["launchd"];
+
+ EXPECT_EQ(expect_ld.get("query", ""), real_ld.get("query", ""));
+ EXPECT_EQ(expect_ld.get("interval", 0), real_ld.get("interval", 0));
+ EXPECT_EQ(expect_ld.get("platform", ""), real_ld.get("platform", ""));
+ EXPECT_EQ(expect_ld.get("version", ""), real_ld.get("version", ""));
+ EXPECT_EQ(expect_ld.get("description", ""), real_ld.get("description", ""));
+ EXPECT_EQ(expect_ld.get("value", ""), real_ld.get("value", ""));
+}
+}
CLI_FLAG(string,
config_path,
"/var/osquery/osquery.conf",
- "(filesystem) config plugin path to JSON config file");
+ "Path to JSON config file");
class FilesystemConfigPlugin : public ConfigPlugin {
public:
void Flag::printFlags(bool shell, bool external, bool cli) {
std::vector<GFLAGS_NAMESPACE::CommandLineFlagInfo> info;
GFLAGS_NAMESPACE::GetAllFlags(&info);
+ auto& details = instance().flags_;
// Determine max indent needed for all flag names.
size_t max = 0;
- for (const auto& flag : info) {
- max = (max > flag.name.size()) ? max : flag.name.size();
+ for (const auto& flag : details) {
+ max = (max > flag.first.size()) ? max : flag.first.size();
}
// Additional index for flag values.
- max += 5;
+ max += 6;
auto& aliases = instance().aliases_;
- auto& details = instance().flags_;
for (const auto& flag : info) {
if (details.count(flag.name) > 0) {
const auto& detail = details.at(flag.name);
fprintf(stdout, " --%s", flag.name.c_str());
- size_t pad = max;
+ int pad = max;
if (flag.type != "bool") {
fprintf(stdout, " VALUE");
pad -= 6;
}
+ pad -= flag.name.size();
- fprintf(stdout, "%s", std::string(pad - flag.name.size(), ' ').c_str());
- fprintf(stdout, "%s\n", getDescription(flag.name).c_str());
+ if (pad > 0 && pad < 80) {
+ // Never pad more than 80 characters.
+ fprintf(stdout, "%s", std::string(pad, ' ').c_str());
+ }
+ fprintf(stdout, " %s\n", getDescription(flag.name).c_str());
}
}
}
*
*/
+#include <chrono>
+#include <random>
+
#include <syslog.h>
#include <stdio.h>
#include <time.h>
#include <osquery/config.h>
#include <osquery/core.h>
-#include <osquery/database.h>
#include <osquery/events.h>
#include <osquery/extensions.h>
#include <osquery/flags.h>
#include <osquery/registry.h>
#include "osquery/core/watcher.h"
+#include "osquery/database/db_handle.h"
+
+#ifdef __linux__
+#include <sys/resource.h>
+#include <sys/syscall.h>
+
+/*
+ * These are the io priority groups as implemented by CFQ. RT is the realtime
+ * class, it always gets premium service. BE is the best-effort scheduling
+ * class, the default for any process. IDLE is the idle scheduling class, it
+ * is only served when no one else is using the disk.
+ */
+enum {
+ IOPRIO_CLASS_NONE,
+ IOPRIO_CLASS_RT,
+ IOPRIO_CLASS_BE,
+ IOPRIO_CLASS_IDLE,
+};
+
+/*
+ * 8 best effort priority levels are supported
+ */
+#define IOPRIO_BE_NR (8)
+
+enum {
+ IOPRIO_WHO_PROCESS = 1,
+ IOPRIO_WHO_PGRP,
+ IOPRIO_WHO_USER,
+};
+#endif
namespace fs = boost::filesystem;
" - https://osquery.readthedocs.org/en/latest/introduction/using-osqueryd/" \
"\n\n";
+typedef std::chrono::high_resolution_clock chrono_clock;
+
CLI_FLAG(bool,
config_check,
false,
void printUsage(const std::string& binary, int tool) {
// Parse help options before gflags. Only display osquery-related options.
- fprintf(stdout, DESCRIPTION, OSQUERY_VERSION);
+ fprintf(stdout, DESCRIPTION, kVersion.c_str());
if (tool == OSQUERY_TOOL_SHELL) {
// The shell allows a caller to run a single SQL statement and exit.
fprintf(stdout, USAGE, binary.c_str(), "[SQL STATEMENT]");
argv_(&argv),
tool_(tool),
binary_(fs::path(std::string(argv[0])).filename().string()) {
- std::srand(time(nullptr));
+ std::srand(chrono_clock::now().time_since_epoch().count());
// osquery implements a custom help/usage output.
for (int i = 1; i < *argc_; i++) {
#endif
// Set version string from CMake build
- GFLAGS_NAMESPACE::SetVersionString(OSQUERY_VERSION);
+ GFLAGS_NAMESPACE::SetVersionString(kVersion.c_str());
// Let gflags parse the non-help options/flags.
GFLAGS_NAMESPACE::ParseCommandLineFlags(
auto homedir = osqueryHomeDirectory();
if (osquery::pathExists(homedir).ok() ||
boost::filesystem::create_directory(homedir)) {
- osquery::FLAGS_database_path = homedir + "/shell.db";
- osquery::FLAGS_extensions_socket = homedir + "/shell.em";
+ // Only apply user/shell-specific paths if not overridden by CLI flag.
+ if (Flag::isDefault("database_path")) {
+ osquery::FLAGS_database_path = homedir + "/shell.db";
+ }
+ if (Flag::isDefault("extensions_socket")) {
+ osquery::FLAGS_extensions_socket = homedir + "/shell.em";
+ }
}
}
VLOG(1) << "osquery worker initialized [watcher="
<< getenv("OSQUERY_WORKER") << "]";
} else {
- VLOG(1) << "osquery initialized [version=" << OSQUERY_VERSION << "]";
+ VLOG(1) << "osquery initialized [version=" << kVersion << "]";
}
} else {
- VLOG(1) << "osquery extension initialized [sdk=" << OSQUERY_SDK_VERSION
- << "]";
+ VLOG(1) << "osquery extension initialized [sdk=" << kSDKVersion << "]";
}
}
// Print the version to SYSLOG.
syslog(
- LOG_NOTICE, "%s started [version=%s]", binary_.c_str(), OSQUERY_VERSION);
+ LOG_NOTICE, "%s started [version=%s]", binary_.c_str(), kVersion.c_str());
// Check if /var/osquery exists
if ((Flag::isDefault("pidfile") || Flag::isDefault("database_path")) &&
LOG(ERROR) << binary_ << " initialize failed: " << pid_status.toString();
::exit(EXIT_FAILURE);
}
+
+ // Nice ourselves if using a watchdog and the level is not too permissive.
+ if (!FLAGS_disable_watchdog &&
+ FLAGS_watchdog_level >= WATCHDOG_LEVEL_DEFAULT &&
+ FLAGS_watchdog_level != WATCHDOG_LEVEL_DEBUG) {
+ // Set CPU scheduling IO limits.
+ setpriority(PRIO_PGRP, 0, 10);
+#ifdef __linux__
+ // Using: ioprio_set(IOPRIO_WHO_PGRP, 0, IOPRIO_CLASS_IDLE);
+ syscall(SYS_ioprio_set, IOPRIO_WHO_PGRP, 0, IOPRIO_CLASS_IDLE);
+#elif defined(__APPLE__) || defined(__FreeBSD__)
+ setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE);
+#endif
+ }
}
void Initializer::initWatcher() {
void Initializer::initWorker(const std::string& name) {
// Clear worker's arguments.
size_t name_size = strlen((*argv_)[0]);
+ auto original_name = std::string((*argv_)[0]);
for (int i = 0; i < *argc_; i++) {
if ((*argv_)[i] != nullptr) {
- memset((*argv_)[i], 0, strlen((*argv_)[i]));
+ memset((*argv_)[i], ' ', strlen((*argv_)[i]));
}
}
// Set the worker's process name.
- if (name.size() <= name_size) {
+ if (name.size() < name_size) {
std::copy(name.begin(), name.end(), (*argv_)[0]);
+ (*argv_)[0][name.size()] = '\0';
+ } else {
+ std::copy(original_name.begin(), original_name.end(), (*argv_)[0]);
+ (*argv_)[0][original_name.size()] = '\0';
}
// Start a watcher watcher thread to exit the process if the watcher exits.
// Load registry/extension modules before extensions.
osquery::loadModules();
+ // Pre-extension manager initialization options checking.
+ if (FLAGS_config_check && !Watcher::hasManagedExtensions()) {
+ FLAGS_disable_extensions = true;
+ }
+
+ // Check the backing store by allocating and exiting on error.
+ if (!DBHandle::checkDB()) {
+ LOG(ERROR) << binary_ << " initialize failed: Could not open RocksDB";
+ if (isWorker()) {
+ ::exit(EXIT_CATASTROPHIC);
+ } else {
+ ::exit(EXIT_FAILURE);
+ }
+ }
+
// Bind to an extensions socket and wait for registry additions.
osquery::startExtensionManager();
// Load the osquery config using the default/active config plugin.
Config::load();
- // Check the backing store by allocating and exiting on error.
- if (!DBHandle::checkDB()) {
- LOG(ERROR) << binary_ << " initialize failed: Could not create DB handle";
- if (isWorker()) {
- ::exit(EXIT_CATASTROPHIC);
- } else {
- ::exit(EXIT_FAILURE);
- }
- }
-
// Initialize the status and result plugin logger.
initActivePlugin("logger", FLAGS_logger_plugin);
initLogger(binary_);
#include <boost/uuid/uuid_io.hpp>
#include <osquery/core.h>
-#include <osquery/database/db_handle.h>
#include <osquery/filesystem.h>
#include <osquery/logger.h>
#include <osquery/sql.h>
"Force osqueryd to kill previously-running daemons");
std::string getHostname() {
- char hostname[256]; // Linux max should be 64.
- memset(hostname, 0, sizeof(hostname));
+ char hostname[256] = {0}; // Linux max should be 64.
gethostname(hostname, sizeof(hostname) - 1);
std::string hostname_string = std::string(hostname);
boost::algorithm::trim(hostname_string);
namespace pt = boost::property_tree;
namespace osquery {
-namespace tables {
Status TablePlugin::addExternal(const std::string& name,
const PluginResponse& response) {
}
std::string TablePlugin::columnDefinition() const {
- return tables::columnDefinition(columns());
+ return osquery::columnDefinition(columns());
}
PluginResponse TablePlugin::routeInfo() const {
affinity = tree.get<std::string>("affinity");
}
}
-}
namespace osquery {
+/// Most tests will use binary or disk-backed content for parsing tests.
+std::string kTestDataPath = "../../tools/tests/";
+
QueryData getTestDBExpectedResults() {
QueryData d;
Row row1;
/// Any SQL-dependent tests should use kTestQuery for a pre-populated example.
const std::string kTestQuery = "SELECT * FROM test_table";
-const std::string kTestDataPath = "../../tools/tests/";
+extern std::string kTestDataPath;
/// Tests should limit intermediate input/output to a working directory.
/// Config data, logging results, and intermediate database/caching usage.
#include <osquery/tables.h>
namespace osquery {
-namespace tables {
class TablesTests : public testing::Test {};
struct ConstraintList cl;
// An empty constraint list has expectations.
EXPECT_FALSE(cl.exists());
+ EXPECT_FALSE(cl.exists(GREATER_THAN));
EXPECT_TRUE(cl.notExistsOrMatches("some"));
auto constraint = Constraint(EQUALS);
constraint.expr = "some";
cl.add(constraint);
+ // Test existence checks based on flags.
EXPECT_TRUE(cl.exists());
+ EXPECT_TRUE(cl.exists(EQUALS));
+ EXPECT_TRUE(cl.exists(EQUALS | LESS_THAN));
+ EXPECT_FALSE(cl.exists(LESS_THAN));
+
EXPECT_TRUE(cl.notExistsOrMatches("some"));
EXPECT_TRUE(cl.matches("some"));
EXPECT_FALSE(cl.notExistsOrMatches("not_some"));
EXPECT_TRUE(cm["path"].existsAndMatches("some"));
}
}
-}
#include <cstring>
+#include <math.h>
#include <sys/wait.h>
#include <signal.h>
const std::map<WatchdogLimitType, std::vector<size_t> > kWatchdogLimits = {
// Maximum MB worker can privately allocate.
- {MEMORY_LIMIT, {50, 30, 10, 1000}},
+ {MEMORY_LIMIT, {80, 50, 30, 1000}},
// Percent of user or system CPU worker can utilize for LATENCY_LIMIT
// seconds.
{UTILIZATION_LIMIT, {90, 80, 60, 1000}},
return (Watcher::getWorker() >= 0 || Watcher::hasManagedExtensions());
}
-void WatcherRunner::enter() {
+void WatcherRunner::start() {
// Set worker performance counters to an initial state.
Watcher::resetWorkerCounters(0);
signal(SIGCHLD, childHandler);
}
bool WatcherRunner::isChildSane(pid_t child) {
- auto rows =
- SQL::selectAllFrom("processes", "pid", tables::EQUALS, INTEGER(child));
+ auto rows = SQL::selectAllFrom("processes", "pid", EQUALS, INTEGER(child));
if (rows.size() == 0) {
// Could not find worker process?
return false;
// Get the performance state for the worker or extension.
size_t sustained_latency = 0;
// Compare CPU utilization since last check.
- BIGINT_LITERAL footprint, user_time, system_time, parent;
+ BIGINT_LITERAL footprint = 0, user_time = 0, system_time = 0, parent = 0;
// IV is the check interval in seconds, and utilization is set per-second.
auto iv = std::max(getWorkerLimit(INTERVAL), (size_t)1);
{
WatcherLocker locker;
- auto state = Watcher::getState(child);
+ auto& state = Watcher::getState(child);
try {
parent = AS_LITERAL(BIGINT_LITERAL, rows[0].at("parent"));
user_time = AS_LITERAL(BIGINT_LITERAL, rows[0].at("user_time")) / iv;
state.sustained_latency = 0;
}
- // Check the different of CPU time used since last check.
- if (state.user_time + getWorkerLimit(UTILIZATION_LIMIT) < user_time ||
- state.system_time + getWorkerLimit(UTILIZATION_LIMIT) < system_time) {
+ // Check the difference of CPU time used since last check.
+ if (user_time - state.user_time > getWorkerLimit(UTILIZATION_LIMIT) ||
+ system_time - state.system_time > getWorkerLimit(UTILIZATION_LIMIT)) {
state.sustained_latency++;
} else {
state.sustained_latency = 0;
LOG(WARNING) << "osqueryd worker system performance limits exceeded";
return false;
}
-
// Check if the private memory exceeds a memory limit.
if (footprint > 0 && footprint > getWorkerLimit(MEMORY_LIMIT) * 1024 * 1024) {
LOG(WARNING) << "osqueryd worker memory limits exceeded: " << footprint;
if (Watcher::getState(Watcher::getWorker()).last_respawn_time >
getUnixTime() - getWorkerLimit(RESPAWN_LIMIT)) {
LOG(WARNING) << "osqueryd worker respawning too quickly";
+ Watcher::workerRestarted();
interruptableSleep(getWorkerLimit(RESPAWN_DELAY) * 1000);
+ // Exponential back off for quickly-respawning clients.
+ interruptableSleep(pow(2, Watcher::workerRestartCount()) * 1000);
}
}
// Get the path of the current process.
- auto qd = SQL::selectAllFrom("processes", "pid", tables::EQUALS,
- INTEGER(getpid()));
+ auto qd = SQL::selectAllFrom("processes", "pid", EQUALS, INTEGER(getpid()));
if (qd.size() != 1 || qd[0].count("path") == 0 || qd[0]["path"].size() == 0) {
LOG(ERROR) << "osquery watcher cannot determine process path";
::exit(EXIT_FAILURE);
setenv("OSQUERY_EXTENSIONS", "true", 1);
}
+ // Get the complete path of the osquery process binary.
+ auto exec_path = fs::system_complete(fs::path(qd[0]["path"]));
+ if (!safePermissions(
+ exec_path.parent_path().string(), exec_path.string(), true)) {
+ // osqueryd binary has become unsafe.
+ LOG(ERROR) << "osqueryd has unsafe permissions: " << exec_path.string();
+ ::exit(EXIT_FAILURE);
+ }
+
auto worker_pid = fork();
if (worker_pid < 0) {
// Unrecoverable error, cannot create a worker process.
} else if (worker_pid == 0) {
// This is the new worker process, no watching needed.
setenv("OSQUERY_WORKER", std::to_string(getpid()).c_str(), 1);
- // Get the complete path of the osquery process binary.
- auto exec_path = fs::system_complete(fs::path(qd[0]["path"]));
execve(exec_path.string().c_str(), argv_, environ);
// Code should never reach this point.
LOG(ERROR) << "osqueryd could not start worker process";
return true;
}
-void WatcherWatcherRunner::enter() {
+void WatcherWatcherRunner::start() {
while (true) {
if (getppid() != watcher_) {
// Watcher died, the worker must follow.
VLOG(1) << "osqueryd worker (" << getpid()
<< ") detected killed watcher (" << watcher_ << ")";
- Dispatcher::removeServices();
- Dispatcher::joinServices();
+ Dispatcher::stopServices();
+ // The watcher watcher is a thread. Do not join services after removing.
::exit(EXIT_SUCCESS);
}
interruptableSleep(getWorkerLimit(INTERVAL) * 1000);
#include "osquery/dispatcher/dispatcher.h"
+/// Define a special debug/testing watchdog level.
+#define WATCHDOG_LEVEL_DEBUG 3
+/// Define the default watchdog level, level below are considered permissive.
+#define WATCHDOG_LEVEL_DEFAULT 1
+
namespace osquery {
DECLARE_bool(disable_watchdog);
+DECLARE_int32(watchdog_level);
+
+class WatcherRunner;
/**
* @brief Categories of process performance limitations.
*
* Performance limits are applied by a watcher thread on autoloaded extensions
- * and optional a daemon worker process. The performance types are identified
+ * and a optional daemon worker process. The performance types are identified
* here, and organized into levels. Such that a caller may enforce rigor or
* relax the performance expectations of a osquery daemon.
*/
/// Reset pid and performance counters for a worker or extension process.
static void reset(pid_t child);
+ /// Count the number of worker restarts.
+ static size_t workerRestartCount() { return instance().worker_restarts_; }
+
/**
* @brief Return the state of autoloadable extensions.
*
private:
/// Do not request the lock until extensions are used.
- Watcher() : worker_(-1), lock_(mutex_, boost::defer_lock) {}
+ Watcher()
+ : worker_(-1), worker_restarts_(0), lock_(mutex_, boost::defer_lock) {}
Watcher(Watcher const&);
void operator=(Watcher const&);
virtual ~Watcher() {}
private:
+ /// Inform the watcher that the worker restarted without cause.
+ static void workerRestarted() { instance().worker_restarts_++; }
+
+ private:
/// Performance state for the worker process.
PerformanceState state_;
/// Performance states for each autoloadable extension binary.
private:
/// Keep the single worker process/thread ID for inspection.
pid_t worker_;
+ /// Number of worker restarts NOT induced by a watchdog process.
+ size_t worker_restarts_;
/// Keep a list of resolved extension paths and their managed pids.
std::map<std::string, pid_t> extensions_;
/// Paths to autoload extensions.
boost::mutex mutex_;
/// Mutex and lock around extensions access.
boost::unique_lock<boost::mutex> lock_;
+
+ private:
+ friend class WatcherRunner;
};
/**
private:
/// Dispatcher (this service thread's) entry point.
- void enter();
+ void start();
/// Boilerplate function to sleep for some configured latency
bool ok();
/// Begin the worker-watcher process.
class WatcherWatcherRunner : public InternalRunnable {
public:
explicit WatcherWatcherRunner(pid_t watcher) : watcher_(watcher) {}
- void enter();
+
+ /// Runnable thread's entry point.
+ void start();
private:
+ /// Parent, or watchdog, process ID.
pid_t watcher_;
};
-ADD_OSQUERY_LIBRARY(TRUE osquery_database db_handle.cpp
- query.cpp
- results.cpp)
+ADD_OSQUERY_LIBRARY(TRUE osquery_database database.cpp)
+
+
+ADD_OSQUERY_LIBRARY(TRUE osquery_database_internal db_handle.cpp
+ query.cpp)
FILE(GLOB OSQUERY_DATABASE_TESTS "tests/*.cpp")
ADD_OSQUERY_TEST(TRUE ${OSQUERY_DATABASE_TESTS})
#include <boost/lexical_cast.hpp>
#include <boost/property_tree/json_parser.hpp>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
#include <osquery/logger.h>
namespace pt = boost::property_tree;
q.push_back(r);
return true;
}
+
+Status DatabasePlugin::call(const PluginRequest& request,
+ PluginResponse& response) {
+ if (request.count("action") == 0) {
+ return Status(1, "Database plugin must include a request action");
+ }
+
+ // Get a domain/key, which are used for most database plugin actions.
+ auto domain = (request.count("domain") > 0) ? request.at("domain") : "";
+ auto key = (request.count("key") > 0) ? request.at("key") : "";
+
+ // Switch over the possible database plugin actions.
+ if (request.at("action") == "get") {
+ std::string value;
+ auto status = this->get(domain, key, value);
+ response.push_back({{"v", value}});
+ return status;
+ } else if (request.at("action") == "put") {
+ if (request.count("value") == 0) {
+ return Status(1, "Database plugin put action requires a value");
+ }
+ return this->put(domain, key, request.at("value"));
+ } else if (request.at("action") == "remove") {
+ return this->remove(domain, key);
+ } else if (request.at("action") == "scan") {
+ std::vector<std::string> keys;
+ auto status = this->scan(domain, keys);
+ for (const auto& key : keys) {
+ response.push_back({{"k", key}});
+ }
+ return status;
+ }
+
+ return Status(1, "Unknown database plugin action");
+}
+
+Status getDatabaseValue(const std::string& domain,
+ const std::string& key,
+ std::string& value) {
+ PluginRequest request = {{"action", "get"}, {"domain", domain}, {"key", key}};
+ PluginResponse response;
+ auto status = Registry::call("database", "rocks", request, response);
+ if (!status.ok()) {
+ VLOG(1) << "Cannot get database " << domain << "/" << key << ": "
+ << status.getMessage();
+ return status;
+ }
+
+ // Set value from the internally-known "v" key.
+ if (response.size() > 0 && response[0].count("v") > 0) {
+ value = response[0].at("v");
+ }
+ return status;
+}
+
+Status setDatabaseValue(const std::string& domain,
+ const std::string& key,
+ const std::string& value) {
+ PluginRequest request = {
+ {"action", "put"}, {"domain", domain}, {"key", key}, {"value", value}};
+ return Registry::call("database", "rocks", request);
+}
+
+Status deleteDatabaseValue(const std::string& domain, const std::string& key) {
+ PluginRequest request = {
+ {"action", "remove"}, {"domain", domain}, {"key", key}};
+ return Registry::call("database", "rocks", request);
+}
+
+Status scanDatabaseKeys(const std::string& domain,
+ std::vector<std::string>& keys) {
+ PluginRequest request = {{"action", "scan"}, {"domain", domain}};
+ PluginResponse response;
+ auto status = Registry::call("database", "rocks", request, response);
+
+ for (const auto& item : response) {
+ if (item.count("k") > 0) {
+ keys.push_back(item.at("k"));
+ }
+ }
+ return status;
+}
}
#include <rocksdb/env.h>
#include <rocksdb/options.h>
-#include <osquery/database/db_handle.h>
+#include <osquery/database.h>
#include <osquery/filesystem.h>
#include <osquery/logger.h>
#include <osquery/status.h>
+#include "osquery/database/db_handle.h"
+
namespace osquery {
+class RocksDatabasePlugin : public DatabasePlugin {
+ public:
+ /// Data retrieval method.
+ Status get(const std::string& domain,
+ const std::string& key,
+ std::string& value) const;
+
+ /// Data storage method.
+ Status put(const std::string& domain,
+ const std::string& key,
+ const std::string& value);
+
+ /// Data removal method.
+ Status remove(const std::string& domain, const std::string& k);
+
+ /// Key/index lookup method.
+ Status scan(const std::string& domain,
+ std::vector<std::string>& results) const;
+};
+
+/// Backing-storage provider for osquery internal/core.
+REGISTER_INTERNAL(RocksDatabasePlugin, "database", "rocks");
+
/////////////////////////////////////////////////////////////////////////////
// Constants
/////////////////////////////////////////////////////////////////////////////
-const std::string kConfigurations = "configurations";
+const std::string kPersistentSettings = "configurations";
const std::string kQueries = "queries";
const std::string kEvents = "events";
+const std::string kLogs = "logs";
-const std::vector<std::string> kDomains = {kConfigurations, kQueries, kEvents};
-
-FLAG(string,
- database_path,
- "/var/osquery/osquery.db",
- "If using a disk-based backing store, specify a path");
+/**
+ * @brief A const vector of column families in RocksDB
+ *
+ * RocksDB has a concept of "column families" which are kind of like tables
+ * in other databases. kDomainds is populated with a list of all column
+ * families. If a string exists in kDomains, it's a column family in the
+ * database.
+ */
+const std::vector<std::string> kDomains = {
+ kPersistentSettings, kQueries, kEvents, kLogs
+};
+
+CLI_FLAG(string,
+ database_path,
+ "/var/osquery/osquery.db",
+ "If using a disk-based backing store, specify a path");
FLAG_ALIAS(std::string, db_path, database_path);
-FLAG(bool, database_in_memory, false, "Keep osquery backing-store in memory");
+CLI_FLAG(bool,
+ database_in_memory,
+ false,
+ "Keep osquery backing-store in memory");
FLAG_ALIAS(bool, use_in_memory_database, database_in_memory);
/////////////////////////////////////////////////////////////////////////////
DBHandle::DBHandle(const std::string& path, bool in_memory) {
options_.create_if_missing = true;
options_.create_missing_column_families = true;
+ options_.info_log_level = rocksdb::WARN_LEVEL;
+ options_.log_file_time_to_roll = 0;
+ options_.keep_log_file_num = 10;
+ options_.max_log_file_size = 1024 * 1024 * 1;
if (in_memory) {
// Remove when MemEnv is included in librocksdb
cf_name, rocksdb::ColumnFamilyOptions()));
}
- VLOG(1) << "Opening DB handle: " << path;
+ VLOG(1) << "Opening RocksDB handle: " << path;
auto s = rocksdb::DB::Open(options_, path, column_families_, &handles_, &db_);
if (!s.ok()) {
throw std::runtime_error(s.ToString());
/////////////////////////////////////////////////////////////////////////////
// getInstance methods
/////////////////////////////////////////////////////////////////////////////
-std::shared_ptr<DBHandle> DBHandle::getInstance() {
+
+DBHandleRef DBHandle::getInstance() {
return getInstance(FLAGS_database_path, FLAGS_database_in_memory);
}
return true;
}
-std::shared_ptr<DBHandle> DBHandle::getInstanceInMemory() {
+DBHandleRef DBHandle::getInstanceInMemory() {
return getInstance("", true);
}
-std::shared_ptr<DBHandle> DBHandle::getInstanceAtPath(const std::string& path) {
+DBHandleRef DBHandle::getInstanceAtPath(const std::string& path) {
return getInstance(path, false);
}
-std::shared_ptr<DBHandle> DBHandle::getInstance(const std::string& path,
- bool in_memory) {
- static std::shared_ptr<DBHandle> db_handle =
- std::shared_ptr<DBHandle>(new DBHandle(path, in_memory));
+DBHandleRef DBHandle::getInstance(const std::string& path, bool in_memory) {
+ static DBHandleRef db_handle = DBHandleRef(new DBHandle(path, in_memory));
return db_handle;
}
// Data manipulation methods
/////////////////////////////////////////////////////////////////////////////
-osquery::Status DBHandle::Get(const std::string& domain,
- const std::string& key,
- std::string& value) {
+Status DBHandle::Get(const std::string& domain,
+ const std::string& key,
+ std::string& value) {
auto cfh = getHandleForColumnFamily(domain);
if (cfh == nullptr) {
return Status(1, "Could not get column family for " + domain);
return Status(s.code(), s.ToString());
}
-osquery::Status DBHandle::Put(const std::string& domain,
- const std::string& key,
- const std::string& value) {
+Status DBHandle::Put(const std::string& domain,
+ const std::string& key,
+ const std::string& value) {
auto cfh = getHandleForColumnFamily(domain);
if (cfh == nullptr) {
return Status(1, "Could not get column family for " + domain);
return Status(s.code(), s.ToString());
}
-osquery::Status DBHandle::Delete(const std::string& domain,
- const std::string& key) {
+Status DBHandle::Delete(const std::string& domain, const std::string& key) {
auto cfh = getHandleForColumnFamily(domain);
if (cfh == nullptr) {
return Status(1, "Could not get column family for " + domain);
return Status(s.code(), s.ToString());
}
-osquery::Status DBHandle::Scan(const std::string& domain,
- std::vector<std::string>& results) {
+Status DBHandle::Scan(const std::string& domain,
+ std::vector<std::string>& results) {
auto cfh = getHandleForColumnFamily(domain);
if (cfh == nullptr) {
return Status(1, "Could not get column family for " + domain);
delete it;
return Status(0, "OK");
}
+
+Status RocksDatabasePlugin::get(const std::string& domain,
+ const std::string& key,
+ std::string& value) const {
+ return DBHandle::getInstance()->Get(domain, key, value);
+}
+
+Status RocksDatabasePlugin::put(const std::string& domain,
+ const std::string& key,
+ const std::string& value) {
+ return DBHandle::getInstance()->Put(domain, key, value);
+}
+
+Status RocksDatabasePlugin::remove(const std::string& domain,
+ const std::string& key) {
+ return DBHandle::getInstance()->Delete(domain, key);
+}
+
+Status RocksDatabasePlugin::scan(const std::string& domain,
+ std::vector<std::string>& results) const {
+ return DBHandle::getInstance()->Scan(domain, results);
+}
}
#include <rocksdb/db.h>
+#include <boost/noncopyable.hpp>
+
+#include <osquery/core.h>
#include <osquery/flags.h>
-#include <osquery/status.h>
namespace osquery {
DECLARE_string(database_path);
-/////////////////////////////////////////////////////////////////////////////
-// Constants
-/////////////////////////////////////////////////////////////////////////////
-
-/// The default path of the RocksDB database on disk
-extern const std::string kDBPath;
-
-/**
- * @brief A const vector of column families in RocksDB
- *
- * RocksDB has a concept of "column families" which are kind of like tables
- * in other databases. kDomainds is populated with a list of all column
- * families. If a string exists in kDomains, it's a column family in the
- * database.
- */
-extern const std::vector<std::string> kDomains;
-
-/// The "domain" where the results of scheduled queries are stored
-extern const std::string kQueries;
-
-/// The "domain" where certain global configurations are stored
-extern const std::string kConfigurations;
-
-/// The "domain" where event results are stored, queued for querytime
-extern const std::string kEvents;
-
-/////////////////////////////////////////////////////////////////////////////
-// DBHandle RAII singleton
-/////////////////////////////////////////////////////////////////////////////
-
class DBHandle;
typedef std::shared_ptr<DBHandle> DBHandleRef;
*/
class DBHandle {
public:
- /**
- * @brief Destructor which takes care of deallocating all previously
- * allocated resources
- */
+ /// Removes every column family handle and single DB handle/lock.
~DBHandle();
/**
- * @brief The primary way to access the DBHandle singleton
+ * @brief The primary way to access the DBHandle singleton.
*
* DBHandle::getInstance() provides access to the DBHandle singleton.
*
*/
static bool checkDB();
- /**
- * @brief Helper method which can be used to get a raw pointer to the
- * underlying RocksDB database handle
- *
- * You probably shouldn't use this. DBHandle::getDB() should only be used
- * when you're positive that it's the right thing to use.
- *
- * @return a pointer to the underlying RocksDB database handle
- */
- rocksdb::DB* getDB();
-
+ private:
/////////////////////////////////////////////////////////////////////////////
// Data access methods
/////////////////////////////////////////////////////////////////////////////
*/
rocksdb::ColumnFamilyHandle* getHandleForColumnFamily(const std::string& cf);
+ /**
+ * @brief Helper method which can be used to get a raw pointer to the
+ * underlying RocksDB database handle
+ *
+ * You probably shouldn't use this. DBHandle::getDB() should only be used
+ * when you're positive that it's the right thing to use.
+ *
+ * @return a pointer to the underlying RocksDB database handle
+ */
+ rocksdb::DB* getDB();
+
private:
/////////////////////////////////////////////////////////////////////////////
// Private members
rocksdb::Options options_;
private:
+ friend class RocksDatabasePlugin;
+ friend class Query;
+ friend class EventSubscriberPlugin;
+
/////////////////////////////////////////////////////////////////////////////
// Unit tests which can access private members
/////////////////////////////////////////////////////////////////////////////
friend class DBHandleTests;
+ FRIEND_TEST(DBHandleTests, test_get);
+ FRIEND_TEST(DBHandleTests, test_put);
+ FRIEND_TEST(DBHandleTests, test_delete);
+ FRIEND_TEST(DBHandleTests, test_scan);
+ friend class QueryTests;
+ FRIEND_TEST(QueryTests, test_get_query_results);
+ FRIEND_TEST(QueryTests, test_is_query_name_in_database);
+ FRIEND_TEST(QueryTests, test_get_stored_query_names);
friend class EventsTests;
friend class EventsDatabaseTests;
- friend class QueryTests;
};
}
#include <algorithm>
-#include <osquery/database/query.h>
+#include "osquery/database/query.h"
namespace osquery {
#include <vector>
#include <osquery/status.h>
-#include <osquery/database/db_handle.h>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
+
+#include "osquery/database/db_handle.h"
namespace osquery {
#include <gtest/gtest.h>
-#include <osquery/database/db_handle.h>
#include <osquery/logger.h>
#include <osquery/tables.h>
+#include "osquery/database/db_handle.h"
+
const std::string kTestingDBHandlePath = "/tmp/rocksdb-osquery-dbhandletests";
namespace osquery {
#include <gtest/gtest.h>
-#include <osquery/database/query.h>
-
#include "osquery/core/test_util.h"
+#include "osquery/database/query.h"
const std::string kTestingQueryDBPath = "/tmp/rocksdb-osquery-querytests";
#include <gtest/gtest.h>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
#include <osquery/logger.h>
#include "osquery/core/test_util.h"
#include <string>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
#include <osquery/flags.h>
namespace osquery {
#include <boost/algorithm/string/predicate.hpp>
-#include <osquery/database/results.h>
+#include <osquery/database.h>
#include <osquery/filesystem.h>
#include <osquery/flags.h>
SHELL_FLAG(string, A, "", "Select all from a table");
}
+/*
+** Text of a help message
+*/
+static char zHelp[] =
+ "Welcome to the osquery shell. Please explore your OS!\n"
+ "You are connected to a transient 'in-memory' virtual database.\n"
+ "\n"
+ ".all [TABLE] Select all from a table\n"
+ ".bail ON|OFF Stop after hitting an error; default OFF\n"
+ ".echo ON|OFF Turn command echo on or off\n"
+ ".exit Exit this program\n"
+ ".header(s) ON|OFF Turn display of headers on or off\n"
+ ".help Show this message\n"
+ ".mode MODE Set output mode where MODE is one of:\n"
+ " csv Comma-separated values\n"
+ " column Left-aligned columns. (See .width)\n"
+ " line One value per line\n"
+ " list Values delimited by .separator string\n"
+ " pretty Pretty printed SQL results\n"
+ ".nullvalue STR Use STRING in place of NULL values\n"
+ ".print STR... Print literal STRING\n"
+ ".quit Exit this program\n"
+ ".schema [TABLE] Show the CREATE statements\n"
+ ".separator STR Change separator used by output mode and .import\n"
+ ".show Show the current values for various settings\n"
+ ".tables [TABLE] List names of tables\n"
+ ".trace FILE|off Output each SQL statement as it is run\n"
+ ".width [NUM1]+ Set column widths for \"column\" mode\n";
+
+static char zTimerHelp[] =
+ ".timer ON|OFF Turn the CPU timer measurement on or off\n";
+
+/*
+** These are the allowed modes.
+*/
+#define MODE_Line 0 /* One column per line. Blank line between records */
+#define MODE_Column 1 /* One record per line in neat columns */
+#define MODE_List 2 /* One record per line with a separator */
+#define MODE_Semi 3 /* Same as MODE_List but append ";" to each line */
+#define MODE_Csv 4 /* Quote strings, numbers are plain */
+#define MODE_Pretty 5 /* Pretty print the SQL results */
+
+static const char *modeDescr[] = {
+ "line", "column", "list", "semi", "csv", "pretty",
+};
+
/* Make sure isatty() has a prototype.
*/
extern int isatty(int);
};
/*
-** These are the allowed modes.
-*/
-#define MODE_Line 0 /* One column per line. Blank line between records */
-#define MODE_Column 1 /* One record per line in neat columns */
-#define MODE_List 2 /* One record per line with a separator */
-#define MODE_Semi 3 /* Same as MODE_List but append ";" to each line */
-#define MODE_Csv 7 /* Quote strings, numbers are plain */
-#define MODE_Pretty 9 /* Pretty print the SQL results */
-
-static const char *modeDescr[] = {
- "line",
- "column",
- "list",
- "semi",
- "csv",
- "pretty",
-};
-
-/*
** Number of elements in an array
*/
#define ArraySize(X) (int)(sizeof(X) / sizeof(X[0]))
}
/*
-** This is the callback routine that the SQLite library
-** invokes for each row of a query result.
-*/
-static int callback(void *pArg, int nArg, char **azArg, char **azCol) {
- /* since we don't have type info, call the shell_callback with a NULL value */
- return shell_callback(pArg, nArg, azArg, azCol, NULL);
-}
-
-/*
** Set the destination table field of the callback_data structure to
** the name of the table given. Escape any quote characters in the
** table name.
return rc;
}
-
-/*
-** Text of a help message
-*/
-static char zHelp[] =
- "Welcome to the osquery shell. Please explore your OS!\n"
- "You are connected to a transient 'in-memory' virtual database.\n"
- "\n"
- ".all [TABLE] Select all from a table\n"
- ".bail ON|OFF Stop after hitting an error; default OFF\n"
- ".echo ON|OFF Turn command echo on or off\n"
- ".exit Exit this program\n"
- ".header(s) ON|OFF Turn display of headers on or off\n"
- ".help Show this message\n"
- ".indices [TABLE] Show names of all indices\n"
- ".mode MODE Set output mode where MODE is one of:\n"
- " csv Comma-separated values\n"
- " column Left-aligned columns. (See .width)\n"
- " line One value per line\n"
- " list Values delimited by .separator string\n"
- " pretty Pretty printed SQL results\n"
- ".nullvalue STR Use STRING in place of NULL values\n"
- ".print STR... Print literal STRING\n"
- ".quit Exit this program\n"
- ".schema [TABLE] Show the CREATE statements\n"
- ".separator STR Change separator used by output mode and .import\n"
- ".show Show the current values for various settings\n"
- ".tables [TABLE] List names of tables\n"
- ".trace FILE|off Output each SQL statement as it is run\n"
- ".width [NUM1]+ Set column widths for \"column\" mode\n";
-
-static char zTimerHelp[] =
- ".timer ON|OFF Turn the CPU timer measurement on or off\n";
-
/* Forward reference */
static int process_input(struct callback_data *p, FILE *in);
return f;
}
+inline void meta_tables(int nArg, char **azArg) {
+ auto tables = osquery::Registry::names("table");
+ std::sort(tables.begin(), tables.end());
+ for (const auto &table_name : tables) {
+ if (nArg == 1 || table_name.find(azArg[1]) == 0) {
+ printf(" => %s\n", table_name.c_str());
+ }
+ }
+}
+
+inline void meta_schema(int nArg, char **azArg) {
+ for (const auto &table_name : osquery::Registry::names("table")) {
+ if (nArg > 1 && table_name.find(azArg[1]) != 0) {
+ continue;
+ }
+
+ osquery::PluginRequest request = {{"action", "columns"}};
+ osquery::PluginResponse response;
+
+ osquery::Registry::call("table", table_name, request, response);
+ std::vector<std::string> columns;
+ for (const auto &column : response) {
+ columns.push_back(column.at("name") + " " + column.at("type"));
+ }
+
+ printf("CREATE TABLE %s(%s);\n",
+ table_name.c_str(),
+ osquery::join(columns, ", ").c_str());
+ }
+}
+
/*
** If an input line begins with "." then invoke this routine to
** process that line.
if (HAS_TIMER) {
fprintf(stderr, "%s", zTimerHelp);
}
- } else if (c == 'i' && strncmp(azArg[0], "indices", n) == 0 && nArg < 3) {
- struct callback_data data;
- char *zErrMsg = 0;
- memcpy(&data, p, sizeof(data));
- data.showHeader = 0;
- data.mode = MODE_List;
- if (nArg == 1) {
- rc = sqlite3_exec(db,
- "SELECT name FROM sqlite_master "
- "WHERE type='index' AND name NOT LIKE 'sqlite_%' "
- "UNION ALL "
- "SELECT name FROM sqlite_temp_master "
- "WHERE type='index' "
- "ORDER BY 1",
- callback,
- &data,
- &zErrMsg);
- } else {
- zShellStatic = azArg[1];
- rc = sqlite3_exec(db,
- "SELECT name FROM sqlite_master "
- "WHERE type='index' AND tbl_name LIKE shellstatic() "
- "UNION ALL "
- "SELECT name FROM sqlite_temp_master "
- "WHERE type='index' AND tbl_name LIKE shellstatic() "
- "ORDER BY 1",
- callback,
- &data,
- &zErrMsg);
- zShellStatic = 0;
- }
- if (zErrMsg) {
- fprintf(stderr, "Error: %s\n", zErrMsg);
- sqlite3_free(zErrMsg);
- rc = 1;
- } else if (rc != SQLITE_OK) {
- fprintf(stderr, "Error: querying sqlite_master and sqlite_temp_master\n");
- rc = 1;
- }
} else if (c == 'l' && strncmp(azArg[0], "log", n) == 0 && nArg >= 2) {
const char *zFile = azArg[1];
output_file_close(p->pLog);
} else if (c == 'q' && strncmp(azArg[0], "quit", n) == 0 && nArg == 1) {
rc = 2;
} else if (c == 's' && strncmp(azArg[0], "schema", n) == 0 && nArg < 3) {
- struct callback_data data;
- char *zErrMsg = 0;
- memcpy(&data, p, sizeof(data));
- data.showHeader = 0;
- data.mode = MODE_Semi;
- if (nArg > 1) {
- int i;
- for (i = 0; azArg[1][i]; i++)
- azArg[1][i] = ToLower(azArg[1][i]);
- if (strcmp(azArg[1], "sqlite_master") == 0) {
- char *new_argv[2], *new_colv[2];
- new_argv[0] = (char*)"CREATE TABLE sqlite_master (\n"
- " type text,\n"
- " name text,\n"
- " tbl_name text,\n"
- " rootpage integer,\n"
- " sql text\n"
- ")";
- new_argv[1] = 0;
- new_colv[0] = (char *)"sql";
- new_colv[1] = 0;
- callback(&data, 1, new_argv, new_colv);
- rc = SQLITE_OK;
- } else if (strcmp(azArg[1], "sqlite_temp_master") == 0) {
- char *new_argv[2], *new_colv[2];
- new_argv[0] = (char*)"CREATE TEMP TABLE sqlite_temp_master (\n"
- " type text,\n"
- " name text,\n"
- " tbl_name text,\n"
- " rootpage integer,\n"
- " sql text\n"
- ")";
- new_argv[1] = 0;
- new_colv[0] = (char *)"sql";
- new_colv[1] = 0;
- callback(&data, 1, new_argv, new_colv);
- rc = SQLITE_OK;
- } else {
- zShellStatic = azArg[1];
- rc = sqlite3_exec(db,
- "SELECT sql FROM "
- " (SELECT sql sql, type type, tbl_name tbl_name, "
- "name name, rowid x"
- " FROM sqlite_master UNION ALL"
- " SELECT sql, type, tbl_name, name, rowid FROM "
- "sqlite_temp_master) "
- "WHERE lower(tbl_name) LIKE shellstatic()"
- " AND type!='meta' AND sql NOTNULL "
- "ORDER BY rowid",
- callback,
- &data,
- &zErrMsg);
- zShellStatic = 0;
- }
- } else {
- rc = sqlite3_exec(
- db,
- "SELECT sql FROM "
- " (SELECT sql sql, type type, tbl_name tbl_name, name name, rowid x"
- " FROM sqlite_master UNION ALL"
- " SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) "
- "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%'"
- "ORDER BY rowid",
- callback,
- &data,
- &zErrMsg);
- }
- if (zErrMsg) {
- fprintf(stderr, "Error: %s\n", zErrMsg);
- sqlite3_free(zErrMsg);
- rc = 1;
- } else if (rc != SQLITE_OK) {
- fprintf(stderr, "Error: querying schema information\n");
- rc = 1;
- } else {
- rc = 0;
- }
+ meta_schema(nArg, azArg);
} else if (c == 's' && strncmp(azArg[0], "separator", n) == 0 && nArg == 2) {
sqlite3_snprintf(sizeof(p->separator),
p->separator,
fprintf(p->out, "\n");
} else if (c == 't' && n > 1 && strncmp(azArg[0], "tables", n) == 0 &&
nArg < 3) {
- sqlite3_stmt *pStmt;
- char **azResult;
- int nRow, nAlloc;
- char *zSql = 0;
- int ii;
- rc = sqlite3_prepare_v2(db, "PRAGMA database_list", -1, &pStmt, 0);
- if (rc)
- return rc;
- zSql = sqlite3_mprintf(
- "SELECT name FROM sqlite_master"
- " WHERE type IN ('table','view')"
- " AND name NOT LIKE 'sqlite_%%'"
- " AND name LIKE ?1");
- while (sqlite3_step(pStmt) == SQLITE_ROW) {
- const char *zDbName = (const char *)sqlite3_column_text(pStmt, 1);
- if (zDbName == 0 || strcmp(zDbName, "main") == 0)
- continue;
- if (strcmp(zDbName, "temp") == 0) {
- zSql = sqlite3_mprintf(
- "%z UNION ALL "
- "SELECT 'temp.' || name FROM sqlite_temp_master"
- " WHERE type IN ('table','view')"
- " AND name NOT LIKE 'sqlite_%%'"
- " AND name LIKE ?1",
- zSql);
- } else {
- zSql = sqlite3_mprintf(
- "%z UNION ALL "
- "SELECT '%q.' || name FROM \"%w\".sqlite_master"
- " WHERE type IN ('table','view')"
- " AND name NOT LIKE 'sqlite_%%'"
- " AND name LIKE ?1",
- zSql,
- zDbName,
- zDbName);
- }
- }
- sqlite3_finalize(pStmt);
- zSql = sqlite3_mprintf("%z ORDER BY 1", zSql);
- rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
- sqlite3_free(zSql);
- if (rc)
- return rc;
- nRow = nAlloc = 0;
- azResult = 0;
- if (nArg > 1) {
- sqlite3_bind_text(pStmt, 1, azArg[1], -1, SQLITE_TRANSIENT);
- } else {
- sqlite3_bind_text(pStmt, 1, "%", -1, SQLITE_STATIC);
- }
- while (sqlite3_step(pStmt) == SQLITE_ROW) {
- if (nRow >= nAlloc) {
- char **azNew;
- int n = nAlloc * 2 + 10;
- azNew = (char **)sqlite3_realloc(azResult, sizeof(azResult[0]) * n);
- if (azNew == 0) {
- fprintf(stderr, "Error: out of memory\n");
- break;
- }
- nAlloc = n;
- azResult = azNew;
- }
- azResult[nRow] = sqlite3_mprintf("%s", sqlite3_column_text(pStmt, 0));
- if (azResult[nRow])
- nRow++;
- }
- sqlite3_finalize(pStmt);
- if (nRow > 0) {
- int len, maxlen = 0;
- int i, j;
- int nPrintCol, nPrintRow;
- for (i = 0; i < nRow; i++) {
- len = strlen30(azResult[i]);
- if (len > maxlen)
- maxlen = len;
- }
- nPrintCol = 80 / (maxlen + 2);
- if (nPrintCol < 1)
- nPrintCol = 1;
- nPrintRow = (nRow + nPrintCol - 1) / nPrintCol;
- std::vector<std::string> tables;
- for (i = 0; i < nPrintRow; i++) {
- for (j = i; j < nRow; j += nPrintRow) {
- std::string tablename = std::string(azResult[j] ? azResult[j] : "");
- if (boost::starts_with(tablename, "temp.")) {
- tablename.erase(0, 5);
- }
- tables.push_back(tablename);
- }
- }
- std::sort(tables.begin(), tables.end());
- for (const auto &table : tables) {
- std::cout << " => " << table << "\n";
- }
- }
- for (ii = 0; ii < nRow; ii++)
- sqlite3_free(azResult[ii]);
- sqlite3_free(azResult);
+ meta_tables(nArg, azArg);
} else if (c == 't' && n > 4 && strncmp(azArg[0], "timeout", n) == 0 &&
nArg == 2) {
sqlite3_busy_timeout(db, (int)integerValue(azArg[1]));
output_file_close(p->traceOut);
p->traceOut = output_file_open(azArg[1]);
} else if (c == 'v' && strncmp(azArg[0], "version", n) == 0) {
- fprintf(p->out, "osquery %s\n", TEXT(OSQUERY_VERSION).c_str());
- fprintf(p->out,
- "SQLite %s %s\n" /*extra-version-info*/,
- sqlite3_libversion(),
- sqlite3_sourceid());
+ fprintf(p->out, "osquery %s\n", osquery::kVersion.c_str());
+ fprintf(p->out, "using SQLite %s\n", sqlite3_libversion());
} else if (c == 'w' && strncmp(azArg[0], "width", n) == 0 && nArg > 1) {
int j;
assert(nArg <= ArraySize(azArg));
Dispatcher::Dispatcher() {
thread_manager_ = InternalThreadManager::newSimpleThreadManager(
(size_t)FLAGS_worker_threads, 0);
- auto threadFactory = ThriftThreadFactory(new PosixThreadFactory());
- thread_manager_->threadFactory(threadFactory);
+ auto thread_factory = ThriftThreadFactory(new PosixThreadFactory());
+ thread_manager_->threadFactory(thread_factory);
thread_manager_->start();
}
+Dispatcher::~Dispatcher() { join(); }
+
Status Dispatcher::add(ThriftInternalRunnableRef task) {
+ auto& self = instance();
try {
+ if (self.state() != InternalThreadManager::STARTED) {
+ self.thread_manager_->start();
+ }
instance().thread_manager_->add(task, 0, 0);
} catch (std::exception& e) {
return Status(1, e.what());
return instance().thread_manager_;
}
-void Dispatcher::join() { instance().thread_manager_->join(); }
+void Dispatcher::join() {
+ if (instance().thread_manager_ != nullptr) {
+ instance().thread_manager_->stop();
+ instance().thread_manager_->join();
+ }
+}
void Dispatcher::joinServices() {
for (auto& thread : instance().service_threads_) {
}
}
-void Dispatcher::removeServices() {
+void Dispatcher::stopServices() {
auto& self = instance();
for (const auto& service : self.services_) {
while (true) {
- // Wait for each thread's entry point (enter) meaning the thread context
+ // Wait for each thread's entry point (start) meaning the thread context
// was allocated and (run) was called by boost::thread started.
if (service->hasRun()) {
break;
// the boost::thread is created.
::usleep(200);
}
+ service->stop();
}
for (auto& thread : self.service_threads_) {
thread->interrupt();
}
-
- // Deallocate services.
- self.service_threads_.clear();
- self.services_.clear();
}
InternalThreadManager::STATE Dispatcher::state() const {
namespace osquery {
+using namespace apache::thrift::concurrency;
+
+/// Create easier to reference typedefs for Thrift layer implementations.
+#define SHARED_PTR_IMPL OSQUERY_THRIFT_POINTER::shared_ptr
typedef apache::thrift::concurrency::ThreadManager InternalThreadManager;
-typedef OSQUERY_THRIFT_POINTER::shared_ptr<InternalThreadManager> InternalThreadManagerRef;
+typedef SHARED_PTR_IMPL<InternalThreadManager> InternalThreadManagerRef;
/**
* @brief Default number of threads in the thread pool.
*/
extern const int kDefaultThreadPoolSize;
-class InternalRunnable : public apache::thrift::concurrency::Runnable {
+class InternalRunnable : public Runnable {
public:
virtual ~InternalRunnable() {}
InternalRunnable() : run_(false) {}
/// The boost::thread entrypoint.
void run() {
run_ = true;
- enter();
+ start();
}
/// Check if the thread's entrypoint (run) executed, meaning thread context
/// was allocated.
bool hasRun() { return run_; }
+ /// The runnable may also tear down services before the thread context
+ /// is removed.
+ virtual void stop() {}
+
protected:
/// Require the runnable thread define an entrypoint.
- virtual void enter() = 0;
+ virtual void start() = 0;
private:
bool run_;
typedef std::shared_ptr<InternalRunnable> InternalRunnableRef;
typedef std::shared_ptr<boost::thread> InternalThreadRef;
/// A thrift internal runnable with variable pointer wrapping.
-typedef OSQUERY_THRIFT_POINTER::shared_ptr<InternalRunnable> ThriftInternalRunnableRef;
-typedef OSQUERY_THRIFT_POINTER::shared_ptr<
- apache::thrift::concurrency::PosixThreadFactory> ThriftThreadFactory;
+typedef SHARED_PTR_IMPL<InternalRunnable> ThriftInternalRunnableRef;
+typedef SHARED_PTR_IMPL<PosixThreadFactory> ThriftThreadFactory;
/**
* @brief Singleton for queueing asynchronous tasks to be executed in parallel
static void joinServices();
/// Destroy and stop all osquery service threads and service objects.
- static void removeServices();
+ static void stopServices();
/**
* @brief Get the current state of the thread manager.
Dispatcher();
Dispatcher(Dispatcher const&);
void operator=(Dispatcher const&);
- virtual ~Dispatcher() {}
+ virtual ~Dispatcher();
private:
/**
* @see getThreadManager
*/
InternalThreadManagerRef thread_manager_;
+
/// The set of shared osquery service threads.
std::vector<InternalThreadRef> service_threads_;
- /// THe set of shared osquery services.
+
+ /// The set of shared osquery services.
std::vector<InternalRunnableRef> services_;
+
+ private:
+ friend class ExtensionsTest;
};
/// Allow a dispatched thread to wait while processing or to prevent thrashing.
#include <osquery/logger.h>
#include <osquery/sql.h>
+#include "osquery/database/query.h"
#include "osquery/dispatcher/scheduler.h"
namespace osquery {
FLAG(bool, enable_monitor, false, "Enable the schedule monitor");
-CLI_FLAG(uint64, schedule_timeout, 0, "Limit the schedule, 0 for no limit")
+FLAG(uint64, schedule_timeout, 0, "Limit the schedule, 0 for no limit")
Status getHostIdentifier(std::string& ident) {
- std::shared_ptr<DBHandle> db;
- try {
- db = DBHandle::getInstance();
- } catch (const std::runtime_error& e) {
- return Status(1, e.what());
- }
-
if (FLAGS_host_identifier != "uuid") {
// use the hostname as the default machine identifier
ident = osquery::getHostname();
return Status(0, "OK");
}
- std::vector<std::string> results;
- auto status = db->Scan(kConfigurations, results);
-
+ // Lookup the host identifier (UUID) previously generated and stored.
+ auto status = getDatabaseValue(kPersistentSettings, "hostIdentifier", ident);
if (!status.ok()) {
+ // The lookup failed, there is a problem accessing the database.
VLOG(1) << "Could not access database; using hostname as host identifier";
ident = osquery::getHostname();
return Status(0, "OK");
}
- if (std::find(results.begin(), results.end(), "hostIdentifier") !=
- results.end()) {
- status = db->Get(kConfigurations, "hostIdentifier", ident);
- if (!status.ok()) {
- VLOG(1) << "Could not access database; using hostname as host identifier";
- ident = osquery::getHostname();
- }
- return status;
+ if (ident.size() == 0) {
+ // There was no uuid stored in the database, generate one and store it.
+ ident = osquery::generateHostUuid();
+ VLOG(1) << "Using uuid " << ident << " as host identifier";
+ return setDatabaseValue(kPersistentSettings, "hostIdentifier", ident);
}
-
- // There was no uuid stored in the database, generate one and store it.
- ident = osquery::generateHostUuid();
- VLOG(1) << "Using uuid " << ident << " as host identifier";
- return db->Put(kConfigurations, "hostIdentifier", ident);
+ return status;
}
inline SQL monitor(const std::string& name, const ScheduledQuery& query) {
// Snapshot the performance and times for the worker before running.
auto pid = std::to_string(getpid());
- auto r0 = SQL::selectAllFrom("processes", "pid", tables::EQUALS, pid);
+ auto r0 = SQL::selectAllFrom("processes", "pid", EQUALS, pid);
auto t0 = time(nullptr);
auto sql = SQL(query.query);
// Snapshot the performance after, and compare.
auto t1 = time(nullptr);
- auto r1 = SQL::selectAllFrom("processes", "pid", tables::EQUALS, pid);
+ auto r1 = SQL::selectAllFrom("processes", "pid", EQUALS, pid);
if (r0.size() > 0 && r1.size() > 0) {
size_t size = 0;
for (const auto& row : sql.rows()) {
VLOG(1) << "Found results for query (" << name << ") for host: " << ident;
item.results = diff_results;
+ if (query.options.count("removed") && !query.options.at("removed")) {
+ item.results.removed.clear();
+ }
+
status = logQueryLogItem(item);
if (!status.ok()) {
LOG(ERROR) << "Error logging the results of query (" << query.query
}
}
-void SchedulerRunner::enter() {
+void SchedulerRunner::start() {
time_t t = std::time(nullptr);
struct tm* local = std::localtime(&t);
unsigned long int i = local->tm_sec;
public:
/// The Dispatcher thread entry point.
- void enter();
+ void start();
protected:
/// The UNIX domain socket path for the ExtensionManager.
public:
int* i;
explicit TestRunnable(int* i) : i(i) {}
- virtual void enter() { ++*i; }
+ virtual void start() { ++*i; }
};
TEST_F(DispatcherTests, test_add_work) {
#include <boost/property_tree/json_parser.hpp>
#include <osquery/core.h>
-#include <osquery/database.h>
#include <osquery/logger.h>
#include "osquery/distributed/distributed.h"
#include <boost/property_tree/ptree.hpp>
-#include <osquery/database/results.h>
#include <osquery/sql.h>
namespace osquery {
#include <boost/lexical_cast.hpp>
#include <osquery/core.h>
-#include <osquery/database.h>
#include <osquery/events.h>
#include <osquery/flags.h>
#include <osquery/logger.h>
#include "osquery/core/conversions.h"
+#include "osquery/database/db_handle.h"
namespace osquery {
+++ /dev/null
-/*
- * Copyright (c) 2014, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
- * of patent rights can be found in the PATENTS file in the same directory.
- *
- */
-
-#include <typeinfo>
-
-#include <boost/filesystem/operations.hpp>
-
-#include <gtest/gtest.h>
-
-#include <osquery/database.h>
-#include <osquery/events.h>
-#include <osquery/tables.h>
-
-namespace osquery {
-
-const std::string kTestingEventsDBPath = "/tmp/rocksdb-osquery-testevents";
-
-class EventsTests : public ::testing::Test {
- public:
- void SetUp() {
- // Setup a testing DB instance
- DBHandle::getInstanceAtPath(kTestingEventsDBPath);
- }
-
- void TearDown() { EventFactory::end(); }
-};
-
-// The most basic event publisher uses useless Subscription/Event.
-class BasicEventPublisher
- : public EventPublisher<SubscriptionContext, EventContext> {};
-
-class AnotherBasicEventPublisher
- : public EventPublisher<SubscriptionContext, EventContext> {};
-
-// Create some semi-useless subscription and event structures.
-struct FakeSubscriptionContext : SubscriptionContext {
- int require_this_value;
-};
-struct FakeEventContext : EventContext {
- int required_value;
-};
-
-// Typdef the shared_ptr accessors.
-typedef std::shared_ptr<FakeSubscriptionContext> FakeSubscriptionContextRef;
-typedef std::shared_ptr<FakeEventContext> FakeEventContextRef;
-
-// Now a publisher with a type.
-class FakeEventPublisher
- : public EventPublisher<FakeSubscriptionContext, FakeEventContext> {
- DECLARE_PUBLISHER("FakePublisher");
-};
-
-class AnotherFakeEventPublisher
- : public EventPublisher<FakeSubscriptionContext, FakeEventContext> {
- DECLARE_PUBLISHER("AnotherFakePublisher");
-};
-
-TEST_F(EventsTests, test_event_pub) {
- auto pub = std::make_shared<FakeEventPublisher>();
- EXPECT_EQ(pub->type(), "FakePublisher");
-
- // Test type names.
- auto pub_sub = pub->createSubscriptionContext();
- EXPECT_EQ(typeid(FakeSubscriptionContext), typeid(*pub_sub));
-}
-
-TEST_F(EventsTests, test_register_event_pub) {
- auto basic_pub = std::make_shared<BasicEventPublisher>();
- auto status = EventFactory::registerEventPublisher(basic_pub);
-
- // This class is the SAME, there was no type override.
- auto another_basic_pub = std::make_shared<AnotherBasicEventPublisher>();
- status = EventFactory::registerEventPublisher(another_basic_pub);
- EXPECT_FALSE(status.ok());
-
- // This class is different but also uses different types!
- auto fake_pub = std::make_shared<FakeEventPublisher>();
- status = EventFactory::registerEventPublisher(fake_pub);
- EXPECT_TRUE(status.ok());
-
- // May also register the event_pub instance
- auto another_fake_pub = std::make_shared<AnotherFakeEventPublisher>();
- status = EventFactory::registerEventPublisher(another_fake_pub);
- EXPECT_TRUE(status.ok());
-}
-
-TEST_F(EventsTests, test_event_pub_types) {
- auto pub = std::make_shared<FakeEventPublisher>();
- EXPECT_EQ(pub->type(), "FakePublisher");
-
- EventFactory::registerEventPublisher(pub);
- auto pub2 = EventFactory::getEventPublisher("FakePublisher");
- EXPECT_EQ(pub->type(), pub2->type());
-}
-
-TEST_F(EventsTests, test_create_event_pub) {
- auto pub = std::make_shared<BasicEventPublisher>();
- auto status = EventFactory::registerEventPublisher(pub);
- EXPECT_TRUE(status.ok());
-
- // Make sure only the first event type was recorded.
- EXPECT_EQ(EventFactory::numEventPublishers(), 1);
-}
-
-class UniqueEventPublisher
- : public EventPublisher<FakeSubscriptionContext, FakeEventContext> {
- DECLARE_PUBLISHER("unique");
-};
-
-TEST_F(EventsTests, test_create_using_registry) {
- // The events API uses attachEvents to move registry event publishers and
- // subscribers into the events factory.
- EXPECT_EQ(EventFactory::numEventPublishers(), 0);
- attachEvents();
-
- // Store the number of default event publishers (in core).
- int default_publisher_count = EventFactory::numEventPublishers();
-
- // Now add another registry item, but do not yet attach it.
- auto UniqueEventPublisherRegistryItem =
- Registry::add<UniqueEventPublisher>("event_publisher", "unique");
- EXPECT_EQ(EventFactory::numEventPublishers(), default_publisher_count);
-
- // Now attach and make sure it was added.
- attachEvents();
- EXPECT_EQ(EventFactory::numEventPublishers(), default_publisher_count + 1);
-}
-
-TEST_F(EventsTests, test_create_subscription) {
- auto pub = std::make_shared<BasicEventPublisher>();
- EventFactory::registerEventPublisher(pub);
-
- // Make sure a subscription cannot be added for a non-existent event type.
- // Note: It normally would not make sense to create a blank subscription.
- auto subscription = Subscription::create("FakeSubscriber");
- auto status = EventFactory::addSubscription("FakePublisher", subscription);
- EXPECT_FALSE(status.ok());
-
- // In this case we can still add a blank subscription to an existing event
- // type.
- status = EventFactory::addSubscription("publisher", subscription);
- EXPECT_TRUE(status.ok());
-
- // Make sure the subscription is added.
- EXPECT_EQ(EventFactory::numSubscriptions("publisher"), 1);
-}
-
-TEST_F(EventsTests, test_multiple_subscriptions) {
- Status status;
-
- auto pub = std::make_shared<BasicEventPublisher>();
- EventFactory::registerEventPublisher(pub);
-
- auto subscription = Subscription::create("subscriber");
- status = EventFactory::addSubscription("publisher", subscription);
- status = EventFactory::addSubscription("publisher", subscription);
-
- EXPECT_EQ(EventFactory::numSubscriptions("publisher"), 2);
-}
-
-struct TestSubscriptionContext : public SubscriptionContext {
- int smallest;
-};
-
-class TestEventPublisher
- : public EventPublisher<TestSubscriptionContext, EventContext> {
- DECLARE_PUBLISHER("TestPublisher");
-
- public:
- Status setUp() {
- smallest_ever_ += 1;
- return Status(0, "OK");
- }
-
- void configure() {
- int smallest_subscription = smallest_ever_;
-
- configure_run = true;
- for (const auto& subscription : subscriptions_) {
- auto subscription_context = getSubscriptionContext(subscription->context);
- if (smallest_subscription > subscription_context->smallest) {
- smallest_subscription = subscription_context->smallest;
- }
- }
-
- smallest_ever_ = smallest_subscription;
- }
-
- void tearDown() { smallest_ever_ += 1; }
-
- TestEventPublisher() : EventPublisher() {
- smallest_ever_ = 0;
- configure_run = false;
- }
-
- // Custom methods do not make sense, but for testing it exists.
- int getTestValue() { return smallest_ever_; }
-
- public:
- bool configure_run;
-
- private:
- int smallest_ever_;
-};
-
-TEST_F(EventsTests, test_create_custom_event_pub) {
- auto basic_pub = std::make_shared<BasicEventPublisher>();
- EventFactory::registerEventPublisher(basic_pub);
- auto pub = std::make_shared<TestEventPublisher>();
- auto status = EventFactory::registerEventPublisher(pub);
-
- // These event types have unique event type IDs
- EXPECT_TRUE(status.ok());
- EXPECT_EQ(EventFactory::numEventPublishers(), 2);
-
- // Make sure the setUp function was called.
- EXPECT_EQ(pub->getTestValue(), 1);
-}
-
-TEST_F(EventsTests, test_custom_subscription) {
- // Step 1, register event type
- auto pub = std::make_shared<TestEventPublisher>();
- auto status = EventFactory::registerEventPublisher(pub);
-
- // Step 2, create and configure a subscription context
- auto sc = std::make_shared<TestSubscriptionContext>();
- sc->smallest = -1;
-
- // Step 3, add the subscription to the event type
- status = EventFactory::addSubscription("TestPublisher", "TestSubscriber", sc);
- EXPECT_TRUE(status.ok());
- EXPECT_EQ(pub->numSubscriptions(), 1);
-
- // The event type must run configure for each added subscription.
- EXPECT_TRUE(pub->configure_run);
- EXPECT_EQ(pub->getTestValue(), -1);
-}
-
-TEST_F(EventsTests, test_tear_down) {
- auto pub = std::make_shared<TestEventPublisher>();
- auto status = EventFactory::registerEventPublisher(pub);
-
- // Make sure set up incremented the test value.
- EXPECT_EQ(pub->getTestValue(), 1);
-
- status = EventFactory::deregisterEventPublisher("TestPublisher");
- EXPECT_TRUE(status.ok());
-
- // Make sure tear down inremented the test value.
- EXPECT_EQ(pub->getTestValue(), 2);
-
- // Once more, now deregistering all event types.
- status = EventFactory::registerEventPublisher(pub);
- EXPECT_EQ(pub->getTestValue(), 3);
- EventFactory::end();
- EXPECT_EQ(pub->getTestValue(), 4);
-
- // Make sure the factory state represented.
- EXPECT_EQ(EventFactory::numEventPublishers(), 0);
-}
-
-static int kBellHathTolled = 0;
-
-Status TestTheeCallback(EventContextRef context, const void* user_data) {
- kBellHathTolled += 1;
- return Status(0, "OK");
-}
-
-class FakeEventSubscriber : public EventSubscriber<FakeEventPublisher> {
- public:
- bool bellHathTolled;
- bool contextBellHathTolled;
- bool shouldFireBethHathTolled;
-
- FakeEventSubscriber() {
- setName("FakeSubscriber");
- bellHathTolled = false;
- contextBellHathTolled = false;
- shouldFireBethHathTolled = false;
- }
-
- Status Callback(const EventContextRef& ec, const void* user_data) {
- // We don't care about the subscription or the event contexts.
- bellHathTolled = true;
- return Status(0, "OK");
- }
-
- Status SpecialCallback(const FakeEventContextRef& ec, const void* user_data) {
- // Now we care that the event context is corrected passed.
- if (ec->required_value == 42) {
- contextBellHathTolled = true;
- }
- return Status(0, "OK");
- }
-
- void lateInit() {
- auto sub_ctx = createSubscriptionContext();
- subscribe(&FakeEventSubscriber::Callback, sub_ctx, nullptr);
- }
-
- void laterInit() {
- auto sub_ctx = createSubscriptionContext();
- sub_ctx->require_this_value = 42;
- subscribe(&FakeEventSubscriber::SpecialCallback, sub_ctx, nullptr);
- }
-};
-
-TEST_F(EventsTests, test_event_sub) {
- auto sub = std::make_shared<FakeEventSubscriber>();
- EXPECT_EQ(sub->type(), "FakePublisher");
- EXPECT_EQ(sub->getName(), "FakeSubscriber");
-}
-
-TEST_F(EventsTests, test_event_sub_subscribe) {
- auto pub = std::make_shared<FakeEventPublisher>();
- EventFactory::registerEventPublisher(pub);
-
- auto sub = std::make_shared<FakeEventSubscriber>();
- EventFactory::registerEventSubscriber(sub);
-
- // Don't overload the normal `init` Subscription member.
- sub->lateInit();
- EXPECT_EQ(pub->numSubscriptions(), 1);
-
- auto ec = pub->createEventContext();
- pub->fire(ec, 0);
-
- EXPECT_TRUE(sub->bellHathTolled);
-}
-
-TEST_F(EventsTests, test_event_sub_context) {
- auto pub = std::make_shared<FakeEventPublisher>();
- EventFactory::registerEventPublisher(pub);
-
- auto sub = std::make_shared<FakeEventSubscriber>();
- EventFactory::registerEventSubscriber(sub);
-
- sub->laterInit();
- auto ec = pub->createEventContext();
- ec->required_value = 42;
- pub->fire(ec, 0);
-
- EXPECT_TRUE(sub->contextBellHathTolled);
-}
-
-TEST_F(EventsTests, test_fire_event) {
- Status status;
-
- auto pub = std::make_shared<BasicEventPublisher>();
- status = EventFactory::registerEventPublisher(pub);
-
- auto sub = std::make_shared<FakeEventSubscriber>();
- auto subscription = Subscription::create("FakeSubscriber");
- subscription->callback = TestTheeCallback;
- status = EventFactory::addSubscription("publisher", subscription);
-
- // The event context creation would normally happen in the event type.
- auto ec = pub->createEventContext();
- pub->fire(ec, 0);
- EXPECT_EQ(kBellHathTolled, 1);
-
- auto second_subscription = Subscription::create("FakeSubscriber");
- status = EventFactory::addSubscription("publisher", second_subscription);
-
- // Now there are two subscriptions (one sans callback).
- pub->fire(ec, 0);
- EXPECT_EQ(kBellHathTolled, 2);
-
- // Now both subscriptions have callbacks.
- second_subscription->callback = TestTheeCallback;
- pub->fire(ec, 0);
- EXPECT_EQ(kBellHathTolled, 4);
-}
-}
#include <gtest/gtest.h>
-#include <osquery/database.h>
#include <osquery/events.h>
#include <osquery/tables.h>
+#include "osquery/database/db_handle.h"
+
namespace osquery {
const std::string kTestingEventsDBPath = "/tmp/rocksdb-osquery-testevents";
#include <gtest/gtest.h>
-#include <osquery/database.h>
#include <osquery/events.h>
#include <osquery/tables.h>
+#include "osquery/database/db_handle.h"
+
namespace osquery {
const std::string kTestingEventsDBPath = "/tmp/rocksdb-osquery-testevents";
}
};
-class ExampleTable : public tables::TablePlugin {
+class ExampleTable : public TablePlugin {
private:
- tables::TableColumns columns() const {
+ TableColumns columns() const {
return {{"example_text", "TEXT"}, {"example_integer", "INTEGER"}};
}
- QueryData generate(tables::QueryContext& request) {
+ QueryData generate(QueryContext& request) {
QueryData results;
Row r;
using namespace osquery;
-class ExampleTable : public tables::TablePlugin {
+class ExampleTable : public TablePlugin {
private:
- tables::TableColumns columns() const {
+ TableColumns columns() const {
return {{"example_text", "TEXT"}, {"example_integer", "INTEGER"}};
}
- QueryData generate(tables::QueryContext& request) {
+ QueryData generate(QueryContext& request) {
QueryData results;
Row r;
EXTENSION_FLAG_ALIAS(timeout, extensions_timeout);
EXTENSION_FLAG_ALIAS(interval, extensions_interval);
-void ExtensionWatcher::enter() {
+void ExtensionWatcher::start() {
// Watch the manager, if the socket is removed then the extension will die.
while (true) {
watch();
void loadModules() {
auto status = loadModules(FLAGS_modules_autoload);
if (!status.ok()) {
- VLOG(1) << "Modules autoload contains invalid paths: "
- << FLAGS_modules_autoload;
+ VLOG(1) << "Could not autoload modules: " << status.what();
}
}
}
return Status(0, "OK");
}
- return Status(1, "Cannot read extensions autoload file");
+ return Status(1, "Failed reading: " + loadfile);
}
Status loadModuleFile(const std::string& path) {
// Return an aggregate failure if any load fails (invalid search path).
return status;
}
- return Status(1, "Cannot read modules autoload file");
+ return Status(1, "Failed reading: " + loadfile);
}
Status extensionPathActive(const std::string& path, bool use_timeout = false) {
Status getQueryColumnsExternal(const std::string& manager_path,
const std::string& query,
- tables::TableColumns& columns) {
+ TableColumns& columns) {
// Make sure the extension path exists, and is writable.
auto status = extensionPathActive(manager_path);
if (!status.ok()) {
}
Status getQueryColumnsExternal(const std::string& query,
- tables::TableColumns& columns) {
+ TableColumns& columns) {
return getQueryColumnsExternal(FLAGS_extensions_socket, query, columns);
}
}
// Add the extension manager to the list called (core).
- extensions[0] = {"core", OSQUERY_VERSION, "0.0.0", OSQUERY_SDK_VERSION};
+ extensions[0] = {"core", kVersion, "0.0.0", kSDKVersion};
// Convert from Thrift-internal list type to RouteUUID/ExtenionInfo type.
for (const auto& ext : ext_list) {
void ExtensionManagerHandler::getQueryColumns(ExtensionResponse& _return,
const std::string& sql) {
- tables::TableColumns columns;
+ TableColumns columns;
auto status = osquery::getQueryColumns(sql, columns);
_return.status.code = status.getCode();
_return.status.message = status.getMessage();
}
}
-ExtensionRunner::~ExtensionRunner() { remove(path_); }
+ExtensionRunnerCore::~ExtensionRunnerCore() { remove(path_); }
-void ExtensionRunner::enter() {
- // Set the socket information for the extension manager.
- auto socket_path = path_;
+void ExtensionRunnerCore::stop() {
+ if (server_ != nullptr) {
+ server_->stop();
+ }
+}
- // Create the thrift instances.
- OSQUERY_THRIFT_POINTER::shared_ptr<ExtensionHandler> handler(
- new ExtensionHandler(uuid_));
- OSQUERY_THRIFT_POINTER::shared_ptr<TProcessor> processor(
- new ExtensionProcessor(handler));
- OSQUERY_THRIFT_POINTER::shared_ptr<TServerTransport> serverTransport(
- new TServerSocket(socket_path));
- OSQUERY_THRIFT_POINTER::shared_ptr<TTransportFactory> transportFactory(
- new TBufferedTransportFactory());
- OSQUERY_THRIFT_POINTER::shared_ptr<TProtocolFactory> protocolFactory(
- new TBinaryProtocolFactory());
-
- OSQUERY_THRIFT_POINTER::shared_ptr<ThreadManager> threadManager =
- ThreadManager::newSimpleThreadManager(FLAGS_worker_threads);
- OSQUERY_THRIFT_POINTER::shared_ptr<PosixThreadFactory> threadFactory =
- OSQUERY_THRIFT_POINTER::shared_ptr<PosixThreadFactory>(
- new PosixThreadFactory());
- threadManager->threadFactory(threadFactory);
- threadManager->start();
+void ExtensionRunnerCore::startServer(TProcessorRef processor) {
+ auto transport = TServerTransportRef(new TServerSocket(path_));
+ auto transport_fac = TTransportFactoryRef(new TBufferedTransportFactory());
+ auto protocol_fac = TProtocolFactoryRef(new TBinaryProtocolFactory());
+
+ auto thread_manager_ =
+ ThreadManager::newSimpleThreadManager((size_t)FLAGS_worker_threads, 0);
+ auto thread_fac = ThriftThreadFactory(new PosixThreadFactory());
+ thread_manager_->threadFactory(thread_fac);
+ thread_manager_->start();
// Start the Thrift server's run loop.
+ server_ = TThreadPoolServerRef(new TThreadPoolServer(
+ processor, transport, transport_fac, protocol_fac, thread_manager_));
+ server_->serve();
+}
+
+void ExtensionRunner::start() {
+ // Create the thrift instances.
+ auto handler = ExtensionHandlerRef(new ExtensionHandler(uuid_));
+ auto processor = TProcessorRef(new ExtensionProcessor(handler));
+
+ VLOG(1) << "Extension service starting: " << path_;
try {
- VLOG(1) << "Extension service starting: " << socket_path;
- TThreadPoolServer server(processor,
- serverTransport,
- transportFactory,
- protocolFactory,
- threadManager);
- server.serve();
+ startServer(processor);
} catch (const std::exception& e) {
- LOG(ERROR) << "Cannot start extension handler: " << socket_path << " ("
+ LOG(ERROR) << "Cannot start extension handler: " << path_ << " ("
<< e.what() << ")";
- return;
}
}
-ExtensionManagerRunner::~ExtensionManagerRunner() {
- // Remove the socket path.
- remove(path_);
-}
-
-void ExtensionManagerRunner::enter() {
- // Set the socket information for the extension manager.
- auto socket_path = path_;
-
+void ExtensionManagerRunner::start() {
// Create the thrift instances.
- OSQUERY_THRIFT_POINTER::shared_ptr<ExtensionManagerHandler> handler(
- new ExtensionManagerHandler());
- OSQUERY_THRIFT_POINTER::shared_ptr<TProcessor> processor(
- new ExtensionManagerProcessor(handler));
- OSQUERY_THRIFT_POINTER::shared_ptr<TServerTransport> serverTransport(
- new TServerSocket(socket_path));
- OSQUERY_THRIFT_POINTER::shared_ptr<TTransportFactory> transportFactory(
- new TBufferedTransportFactory());
- OSQUERY_THRIFT_POINTER::shared_ptr<TProtocolFactory> protocolFactory(
- new TBinaryProtocolFactory());
-
- OSQUERY_THRIFT_POINTER::shared_ptr<ThreadManager> threadManager =
- ThreadManager::newSimpleThreadManager(FLAGS_worker_threads);
- OSQUERY_THRIFT_POINTER::shared_ptr<PosixThreadFactory> threadFactory =
- OSQUERY_THRIFT_POINTER::shared_ptr<PosixThreadFactory>(
- new PosixThreadFactory());
- threadManager->threadFactory(threadFactory);
- threadManager->start();
+ auto handler = ExtensionManagerHandlerRef(new ExtensionManagerHandler());
+ auto processor = TProcessorRef(new ExtensionManagerProcessor(handler));
- // Start the Thrift server's run loop.
+ VLOG(1) << "Extension manager service starting: " << path_;
try {
- VLOG(1) << "Extension manager service starting: " << socket_path;
- TThreadPoolServer server(processor,
- serverTransport,
- transportFactory,
- protocolFactory,
- threadManager);
- server.serve();
+ startServer(processor);
} catch (const std::exception& e) {
LOG(WARNING) << "Extensions disabled: cannot start extension manager ("
- << socket_path << ") (" << e.what() << ")";
+ << path_ << ") (" << e.what() << ")";
}
}
}
#endif
namespace osquery {
-namespace extensions {
using namespace apache::thrift;
using namespace apache::thrift::protocol;
using namespace apache::thrift::server;
using namespace apache::thrift::concurrency;
+/// Create easier to reference typedefs for Thrift layer implementations.
+#define SHARED_PTR_IMPL OSQUERY_THRIFT_POINTER::shared_ptr
+typedef SHARED_PTR_IMPL<TSocket> TSocketRef;
+typedef SHARED_PTR_IMPL<TTransport> TTransportRef;
+typedef SHARED_PTR_IMPL<TProtocol> TProtocolRef;
+
+typedef SHARED_PTR_IMPL<TProcessor> TProcessorRef;
+typedef SHARED_PTR_IMPL<TServerTransport> TServerTransportRef;
+typedef SHARED_PTR_IMPL<TTransportFactory> TTransportFactoryRef;
+typedef SHARED_PTR_IMPL<TProtocolFactory> TProtocolFactoryRef;
+typedef SHARED_PTR_IMPL<PosixThreadFactory> PosixThreadFactoryRef;
+typedef std::shared_ptr<TThreadPoolServer> TThreadPoolServerRef;
+
+namespace extensions {
+
/**
* @brief The Thrift API server used by an osquery Extension process.
*
const ExtensionPluginRequest& request);
protected:
+ /// Transient UUID assigned to the extension after registering.
RouteUUID uuid_;
};
private:
/// Check if an extension exists by the name it registered.
bool exists(const std::string& name);
+
/// Introspect into the registry, checking if any extension routes have been
/// removed.
void refresh();
/// Maintain a map of extension UUID to metadata for tracking deregistration.
InternalExtensionList extensions_;
};
+
+typedef SHARED_PTR_IMPL<ExtensionHandler> ExtensionHandlerRef;
+typedef SHARED_PTR_IMPL<ExtensionManagerHandler> ExtensionManagerHandlerRef;
}
/// A Dispatcher service thread that watches an ExtensionManagerHandler.
public:
/// The Dispatcher thread entry point.
- void enter();
+ void start();
+
/// Perform health checks.
virtual void watch();
protected:
/// The UNIX domain socket path for the ExtensionManager.
std::string path_;
+
/// The internal in milliseconds to ping the ExtensionManager.
size_t interval_;
+
/// If the ExtensionManager socket is closed, should the extension exit.
bool fatal_;
};
ExtensionManagerWatcher(const std::string& path, size_t interval)
: ExtensionWatcher(path, interval, false) {}
+ /// Start a specialized health check for an ExtensionManager.
void watch();
};
-/// A Dispatcher service thread that starts ExtensionHandler.
-class ExtensionRunner : public InternalRunnable {
+class ExtensionRunnerCore : public InternalRunnable {
+ public:
+ virtual ~ExtensionRunnerCore();
+ ExtensionRunnerCore(const std::string& path)
+ : path_(path), server_(nullptr) {}
+
+ public:
+ /// Given a handler transport and protocol start a thrift threaded server.
+ void startServer(TProcessorRef processor);
+
+ // The Dispatcher thread service stop point.
+ void stop();
+
+ protected:
+ /// The UNIX domain socket used for requests from the ExtensionManager.
+ std::string path_;
+
+ /// Server instance, will be stopped if thread service is removed.
+ TThreadPoolServerRef server_;
+};
+
+/**
+ * @brief A Dispatcher service thread that starts ExtensionHandler.
+ *
+ * This runner will start a Thrift Extension server, call serve, and wait
+ * until the extension exists or the ExtensionManager (core) terminates or
+ * deregisters the extension.
+ *
+ */
+class ExtensionRunner : public ExtensionRunnerCore {
public:
- virtual ~ExtensionRunner();
ExtensionRunner(const std::string& manager_path, RouteUUID uuid)
- : uuid_(uuid) {
+ : ExtensionRunnerCore(""), uuid_(uuid) {
path_ = getExtensionSocket(uuid, manager_path);
}
public:
- /// The Dispatcher thread entry point.
- void enter();
+ void start();
/// Access the UUID provided by the ExtensionManager.
RouteUUID getUUID() { return uuid_; }
private:
- /// The UNIX domain socket used for requests from the ExtensionManager.
- std::string path_;
/// The unique and transient Extension UUID assigned by the ExtensionManager.
RouteUUID uuid_;
};
-/// A Dispatcher service thread that starts ExtensionManagerHandler.
-class ExtensionManagerRunner : public InternalRunnable {
+/**
+ * @brief A Dispatcher service thread that starts ExtensionManagerHandler.
+ *
+ * This runner will start a Thrift ExtensionManager server, call serve, and wait
+ * until for extensions to register, or thrift API calls.
+ *
+ */
+class ExtensionManagerRunner : public ExtensionRunnerCore {
public:
- virtual ~ExtensionManagerRunner();
explicit ExtensionManagerRunner(const std::string& manager_path)
- : path_(manager_path) {}
+ : ExtensionRunnerCore(manager_path) {}
public:
- void enter();
-
- private:
- std::string path_;
+ void start();
};
/// Internal accessor for extension clients.
class EXInternal {
public:
explicit EXInternal(const std::string& path)
- : socket_(new extensions::TSocket(path)),
- transport_(new extensions::TBufferedTransport(socket_)),
- protocol_(new extensions::TBinaryProtocol(transport_)) {}
+ : socket_(new TSocket(path)),
+ transport_(new TBufferedTransport(socket_)),
+ protocol_(new TBinaryProtocol(transport_)) {}
virtual ~EXInternal() { transport_->close(); }
protected:
- OSQUERY_THRIFT_POINTER::shared_ptr<extensions::TSocket> socket_;
- OSQUERY_THRIFT_POINTER::shared_ptr<extensions::TTransport> transport_;
- OSQUERY_THRIFT_POINTER::shared_ptr<extensions::TProtocol> protocol_;
+ TSocketRef socket_;
+ TTransportRef transport_;
+ TProtocolRef protocol_;
};
/// Internal accessor for a client to an extension (from an extension manager).
#include <osquery/extensions.h>
#include <osquery/filesystem.h>
-#include <osquery/database.h>
#include "osquery/core/test_util.h"
#include "osquery/extensions/interface.h"
class ExtensionsTest : public testing::Test {
protected:
void SetUp() {
- remove(kTestManagerSocket);
- if (pathExists(kTestManagerSocket).ok()) {
- throw std::domain_error("Cannot test sockets: " + kTestManagerSocket);
+ socket_path = kTestManagerSocket + std::to_string(rand());
+ remove(socket_path);
+ if (pathExists(socket_path).ok()) {
+ throw std::domain_error("Cannot test sockets: " + socket_path);
}
}
void TearDown() {
- Dispatcher::removeServices();
+ Dispatcher::stopServices();
Dispatcher::joinServices();
- remove(kTestManagerSocket);
+ remove(socket_path);
}
bool ping(int attempts = 3) {
ExtensionStatus status;
for (int i = 0; i < attempts; ++i) {
try {
- EXManagerClient client(kTestManagerSocket);
+ EXManagerClient client(socket_path);
client.get()->ping(status);
return (status.code == ExtensionCode::EXT_SUCCESS);
} catch (const std::exception& e) {
ExtensionResponse response;
for (int i = 0; i < attempts; ++i) {
try {
- EXManagerClient client(kTestManagerSocket);
+ EXManagerClient client(socket_path);
client.get()->query(response, sql);
} catch (const std::exception& e) {
::usleep(kDelayUS);
ExtensionList registeredExtensions(int attempts = 3) {
ExtensionList extensions;
for (int i = 0; i < attempts; ++i) {
- if (getExtensions(kTestManagerSocket, extensions).ok()) {
+ if (getExtensions(socket_path, extensions).ok()) {
break;
}
}
}
return false;
}
+
+ public:
+ std::string socket_path;
};
TEST_F(ExtensionsTest, test_manager_runnable) {
// Start a testing extension manager.
- auto status = startExtensionManager(kTestManagerSocket);
+ auto status = startExtensionManager(socket_path);
EXPECT_TRUE(status.ok());
// Call success if the Unix socket was created.
- EXPECT_TRUE(socketExists(kTestManagerSocket));
+ EXPECT_TRUE(socketExists(socket_path));
}
TEST_F(ExtensionsTest, test_extension_runnable) {
- auto status = startExtensionManager(kTestManagerSocket);
+ auto status = startExtensionManager(socket_path);
EXPECT_TRUE(status.ok());
// Wait for the extension manager to start.
- EXPECT_TRUE(socketExists(kTestManagerSocket));
+ EXPECT_TRUE(socketExists(socket_path));
// Test the extension manager API 'ping' call.
EXPECT_TRUE(ping());
}
TEST_F(ExtensionsTest, test_extension_start) {
- auto status = startExtensionManager(kTestManagerSocket);
+ auto status = startExtensionManager(socket_path);
EXPECT_TRUE(status.ok());
- EXPECT_TRUE(socketExists(kTestManagerSocket));
+ EXPECT_TRUE(socketExists(socket_path));
// Now allow duplicates (for testing, since EM/E are the same).
Registry::allowDuplicates(true);
- status = startExtension(kTestManagerSocket, "test", "0.1", "0.0.0", "0.0.1");
+ status = startExtension(socket_path, "test", "0.1", "0.0.0", "0.0.1");
// This will not be false since we are allowing deplicate items.
// Otherwise, starting an extension and extensionManager would fatal.
ASSERT_TRUE(status.ok());
RouteUUID uuid = (RouteUUID)stoi(status.getMessage(), nullptr, 0);
// We can test-wait for the extensions's socket to open.
- EXPECT_TRUE(socketExists(kTestManagerSocket + "." + std::to_string(uuid)));
+ EXPECT_TRUE(socketExists(socket_path + "." + std::to_string(uuid)));
// Then clean up the registry modifications.
Registry::removeBroadcast(uuid);
CREATE_REGISTRY(ExtensionPlugin, "extension_test");
TEST_F(ExtensionsTest, test_extension_broadcast) {
- auto status = startExtensionManager(kTestManagerSocket);
+ auto status = startExtensionManager(socket_path);
EXPECT_TRUE(status.ok());
- EXPECT_TRUE(socketExists(kTestManagerSocket));
+ EXPECT_TRUE(socketExists(socket_path));
// This time we're going to add a plugin to the extension_test registry.
Registry::add<TestExtensionPlugin>("extension_test", "test_item");
EXPECT_TRUE(Registry::exists("extension_test", "test_item"));
EXPECT_FALSE(Registry::exists("extension_test", "test_alias"));
- status = startExtension(kTestManagerSocket, "test", "0.1", "0.0.0", "0.0.1");
+ status = startExtension(socket_path, "test", "0.1", "0.0.0", "0.0.1");
EXPECT_TRUE(status.ok());
RouteUUID uuid;
return;
}
- auto ext_socket = kTestManagerSocket + "." + std::to_string(uuid);
+ auto ext_socket = socket_path + "." + std::to_string(uuid);
EXPECT_TRUE(socketExists(ext_socket));
// Make sure the EM registered the extension (called in start extension).
auto extensions = registeredExtensions();
// Expect two, since `getExtensions` includes the core.
- EXPECT_EQ(extensions.size(), 2);
+ ASSERT_EQ(extensions.size(), 2);
EXPECT_EQ(extensions.count(uuid), 1);
EXPECT_EQ(extensions.at(uuid).name, "test");
EXPECT_EQ(extensions.at(uuid).version, "0.1");
ADD_OSQUERY_LIBRARY(TRUE osquery_logger logger.cpp)
-ADD_OSQUERY_LIBRARY(FALSE osquery_logger_plugins plugins/filesystem.cpp)
+ADD_OSQUERY_LIBRARY(FALSE osquery_logger_plugins plugins/filesystem.cpp
+ plugins/syslog.cpp)
FILE(GLOB OSQUERY_LOGGER_TESTS "tests/*.cpp")
ADD_OSQUERY_TEST(FALSE ${OSQUERY_LOGGER_TESTS})
+
+file(GLOB OSQUERY_LOGGER_PLUGIN_TESTS "plugins/tests/*.cpp")
+ADD_OSQUERY_TEST(FALSE ${OSQUERY_LOGGER_PLUGIN_TESTS})
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <syslog.h>
+
+#include <osquery/flags.h>
+#include <osquery/logger.h>
+
+namespace osquery {
+
+FLAG(int32,
+ logger_syslog_facility,
+ LOG_LOCAL3 >> 3,
+ "Syslog facility for status and results logs (0-23, default 19)");
+
+class SyslogLoggerPlugin : public LoggerPlugin {
+ public:
+ Status logString(const std::string& s);
+ Status init(const std::string& name, const std::vector<StatusLogLine>& log);
+ Status logStatus(const std::vector<StatusLogLine>& log);
+};
+
+REGISTER(SyslogLoggerPlugin, "logger", "syslog");
+
+Status SyslogLoggerPlugin::logString(const std::string& s) {
+ for (const auto& line : osquery::split(s, "\n")) {
+ syslog(LOG_INFO, "result=%s", line.c_str());
+ }
+ return Status(0, "OK");
+}
+
+Status SyslogLoggerPlugin::logStatus(const std::vector<StatusLogLine>& log) {
+ for (const auto& item : log) {
+ int severity = LOG_NOTICE;
+ if (item.severity == O_INFO) {
+ severity = LOG_NOTICE;
+ } else if (item.severity == O_WARNING) {
+ severity = LOG_WARNING;
+ } else if (item.severity == O_ERROR) {
+ severity = LOG_ERR;
+ } else if (item.severity == O_FATAL) {
+ severity = LOG_CRIT;
+ }
+
+ std::string line = "severity=" + std::to_string(item.severity)
+ + " location=" + item.filename + ":" + std::to_string(item.line) +
+ " message=" + item.message;
+
+ syslog(severity, "%s", line.c_str());
+ }
+ return Status(0, "OK");
+}
+
+Status SyslogLoggerPlugin::init(const std::string& name,
+ const std::vector<StatusLogLine>& log) {
+ closelog();
+
+ // Define the syslog/target's application name.
+ if (FLAGS_logger_syslog_facility < 0 ||
+ FLAGS_logger_syslog_facility > 23) {
+ FLAGS_logger_syslog_facility = LOG_LOCAL3 >> 3;
+ }
+ openlog(name.c_str(), LOG_PID | LOG_CONS, FLAGS_logger_syslog_facility << 3);
+
+ // Now funnel the intermediate status logs provided to `init`.
+ return logStatus(log);
+}
+}
#include <osquery/core.h>
+// If CMake/gmake did not define a build version set the version to 1.0.
+// clang-format off
+#ifndef OSQUERY_BUILD_VERSION
+#define OSQUERY_BUILD_VERSION 1.0.0-unknown
+#endif
+// clang-format on
+
namespace osquery {
-const std::string kVersion = OSQUERY_VERSION;
+const std::string kVersion = STR(OSQUERY_BUILD_VERSION);
const std::string kSDKVersion = OSQUERY_SDK_VERSION;
+const std::string kSDKPlatform = OSQUERY_PLATFORM;
}
#include <stdio.h>
#include <osquery/core.h>
+#include <osquery/extensions.h>
+#include "osquery/core/watcher.h"
#include "osquery/devtools/devtools.h"
int main(int argc, char *argv[]) {
osquery::FLAGS_L) {
// A query was set as a positional argument for via stdin.
osquery::FLAGS_disable_events = true;
+ // The shell may have loaded table extensions, if not, disable the manager.
+ if (!osquery::Watcher::hasManagedExtensions()) {
+ osquery::FLAGS_disable_extensions = true;
+ }
}
runner.start();
*/
#include <cstdlib>
+#include <chrono>
#include <time.h>
#include <gtest/gtest.h>
-#include <osquery/database.h>
-
#include "osquery/core/test_util.h"
+#include "osquery/database/db_handle.h"
+
+namespace fs = boost::filesystem;
namespace osquery {
+
DECLARE_string(database_path);
DECLARE_string(extensions_socket);
DECLARE_string(modules_autoload);
DECLARE_bool(disable_logging);
DECLARE_bool(verbose);
+typedef std::chrono::high_resolution_clock chrono_clock;
+
void initTesting() {
// Seed the random number generator, some tests generate temporary files
// ports, sockets, etc using random numbers.
- std::chrono::milliseconds ms =
- std::chrono::duration_cast<std::chrono::milliseconds>(
- std::chrono::system_clock::now().time_since_epoch());
- srand(ms.count());
+ std::srand(chrono_clock::now().time_since_epoch().count());
// Set safe default values for path-based flags.
// Specific unittests may edit flags temporarily.
- boost::filesystem::remove_all(kTestWorkingDirectory);
- boost::filesystem::create_directories(kTestWorkingDirectory);
+ fs::remove_all(kTestWorkingDirectory);
+ fs::create_directories(kTestWorkingDirectory);
FLAGS_database_path = kTestWorkingDirectory + "unittests.db";
FLAGS_extensions_socket = kTestWorkingDirectory + "unittests.em";
FLAGS_extensions_autoload = kTestWorkingDirectory + "unittests-ext.load";
}
int main(int argc, char* argv[]) {
+ // Allow unit test execution from anywhere in the osquery source/build tree.
+ while (osquery::kTestDataPath != "/") {
+ if (!fs::exists(osquery::kTestDataPath)) {
+ osquery::kTestDataPath =
+ osquery::kTestDataPath.substr(3, osquery::kTestDataPath.size());
+ } else {
+ break;
+ }
+ }
+
osquery::initTesting();
testing::InitGoogleTest(&argc, argv);
// Optionally enable Goggle Logging
std::stringstream query;
query << "SELECT * FROM " << table;
- tables::TableColumns columns;
+ TableColumns columns;
getQueryColumns(query.str(), columns);
// Extract column names
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <gtest/gtest.h>
+
+#include <osquery/core.h>
+#include <osquery/database.h>
+#include <osquery/enroll.h>
+
+#include "osquery/core/test_util.h"
+
+namespace osquery {
+
+class EnrollTests : public testing::Test {
+ public:
+ void SetUp() {
+ deleteDatabaseValue(kPersistentSettings, "nodeKey");
+ deleteDatabaseValue(kPersistentSettings, "nodeKeyTime");
+ }
+};
+
+class SimpleEnrollPlugin : public EnrollPlugin {
+ public:
+ SimpleEnrollPlugin() : times_forced_(0) {}
+
+ protected:
+ std::string enroll(bool force) {
+ if (force) {
+ forced_response_ = std::to_string(times_forced_);
+ times_forced_++;
+ return forced_response_;
+ }
+ return "fetched_a_node_key";
+ }
+
+ private:
+ std::string forced_response_;
+ size_t times_forced_;
+};
+
+// Register our simple enroll plugin.
+REGISTER(SimpleEnrollPlugin, "enroll", "test_simple");
+
+TEST_F(EnrollTests, test_enroll_key_retrieval) {
+ FLAGS_disable_enrollment = true;
+ // Without enrollment, and with an empty nodeKey storage value, no node key
+ // will be fetched or returned from cached.
+ EXPECT_EQ(getNodeKey("test_simple"), "");
+
+ // Turn the enrollment features back on and expect a key.
+ FLAGS_disable_enrollment = false;
+ EXPECT_EQ(getNodeKey("test_simple"), "fetched_a_node_key");
+}
+
+TEST_F(EnrollTests, test_enroll_key_caching) {
+ // Cause a fetch of the node key.
+ auto node_key = getNodeKey("test_simple");
+
+ // Now fetch the time the node key was last cached from the database.
+ std::string key_time;
+ auto status = getDatabaseValue(kPersistentSettings, "nodeKeyTime", key_time);
+ EXPECT_TRUE(status.ok());
+
+ // A subsequent call to getNodeKey will return the same node key.
+ // But, our simple enroll plugin is not enforcing any secret check and is
+ // always returning the same node key.
+ auto node_key2 = getNodeKey("test_simple");
+ // In most scenarios subsequent calls to EnrollPlugin::enroll and the backing
+ // enrollment service will generate and return different node keys.
+ EXPECT_EQ(node_key2, node_key);
+
+ // To work around our contrived example we make sure the node time was not
+ // updated, meaning no call to EnrollPlugin::enroll occurred.
+ std::string key_time2;
+ getDatabaseValue(kPersistentSettings, "nodeKeyTime", key_time2);
+ EXPECT_EQ(key_time2, key_time);
+}
+}
FLAG(int32, value_max, 512, "Maximum returned row value size");
-const std::map<tables::ConstraintOperator, std::string> kSQLOperatorRepr = {
- {tables::EQUALS, "="},
- {tables::GREATER_THAN, ">"},
- {tables::LESS_THAN_OR_EQUALS, "<="},
- {tables::LESS_THAN, "<"},
- {tables::GREATER_THAN_OR_EQUALS, ">="},
+const std::map<ConstraintOperator, std::string> kSQLOperatorRepr = {
+ {EQUALS, "="},
+ {GREATER_THAN, ">"},
+ {LESS_THAN_OR_EQUALS, "<="},
+ {LESS_THAN, "<"},
+ {GREATER_THAN_OR_EQUALS, ">="},
};
SQL::SQL(const std::string& q) { status_ = query(q, results_); }
QueryData SQL::selectAllFrom(const std::string& table,
const std::string& column,
- tables::ConstraintOperator op,
+ ConstraintOperator op,
const std::string& expr) {
PluginResponse response;
PluginRequest request = {{"action", "generate"}};
- tables::QueryContext ctx;
- ctx.constraints[column].add(tables::Constraint(op, expr));
+ QueryContext ctx;
+ ctx.constraints[column].add(Constraint(op, expr));
- tables::TablePlugin::setRequestFromContext(ctx, request);
+ TablePlugin::setRequestFromContext(ctx, request);
Registry::call("table", table, request, response);
return response;
}
if (request.at("action") == "query") {
return this->query(request.at("query"), response);
} else if (request.at("action") == "columns") {
- tables::TableColumns columns;
+ TableColumns columns;
auto status = this->getQueryColumns(request.at("query"), columns);
// Convert columns to response
for (const auto& column : columns) {
"sql", "sql", {{"action", "query"}, {"query", q}}, results);
}
-Status getQueryColumns(const std::string& q, tables::TableColumns& columns) {
+Status getQueryColumns(const std::string& q, TableColumns& columns) {
PluginResponse response;
auto status = Registry::call(
"sql", "sql", {{"action", "columns"}, {"query", q}}, response);
*/
#include <osquery/core.h>
-#include <osquery/database.h>
#include <osquery/flags.h>
#include <osquery/logger.h>
#include <osquery/sql.h>
return status;
}
- auto statement = tables::columnDefinition(response);
- return tables::attachTableInternal(name, statement, dbc.db());
+ auto statement = columnDefinition(response);
+ return attachTableInternal(name, statement, dbc.db());
}
void SQLiteSQLPlugin::detach(const std::string& name) {
if (!dbc.isPrimary()) {
return;
}
- tables::detachTableInternal(name, dbc.db());
+ detachTableInternal(name, dbc.db());
}
SQLiteDBInstance::SQLiteDBInstance() {
primary_ = false;
sqlite3_open(":memory:", &db_);
- tables::attachVirtualTables(db_);
+ attachVirtualTables(db_);
}
SQLiteDBInstance::SQLiteDBInstance(sqlite3*& db) {
if (self.db_ == nullptr) {
// Create primary sqlite DB instance.
sqlite3_open(":memory:", &self.db_);
- tables::attachVirtualTables(self.db_);
+ attachVirtualTables(self.db_);
}
return SQLiteDBInstance(self.db_);
} else {
}
Status getQueryColumnsInternal(const std::string& q,
- tables::TableColumns& columns,
+ TableColumns& columns,
sqlite3* db) {
int rc;
// Get column count
int num_columns = sqlite3_column_count(stmt);
- tables::TableColumns results;
+ TableColumns results;
results.reserve(num_columns);
// Get column names and types
* @return status indicating success or failure of the operation
*/
Status getQueryColumnsInternal(const std::string& q,
- tables::TableColumns& columns,
+ TableColumns& columns,
sqlite3* db);
/// The SQLiteSQLPlugin implements the "sql" registry for internal/core.
return queryInternal(q, results, dbc.db());
}
- Status getQueryColumns(const std::string& q,
- tables::TableColumns& columns) const {
+ Status getQueryColumns(const std::string& q, TableColumns& columns) const {
auto dbc = SQLiteDBManager::get();
return getQueryColumnsInternal(q, columns, dbc.db());
}
EXPECT_EQ(results.size(), 1);
}
-class TestTablePlugin : public tables::TablePlugin {
+class TestTablePlugin : public TablePlugin {
private:
- tables::TableColumns columns() const {
+ TableColumns columns() const {
return {{"test_int", "INTEGER"}, {"test_text", "TEXT"}};
}
- QueryData generate(tables::QueryContext& ctx) {
+ QueryData generate(QueryContext& ctx) {
QueryData results;
if (ctx.constraints["test_int"].existsAndMatches("1")) {
results.push_back({{"test_int", "1"}, {"test_text", "0"}});
results.push_back({{"test_int", "0"}, {"test_text", "1"}});
}
- auto ints = ctx.constraints["test_int"].getAll<int>(tables::EQUALS);
+ auto ints = ctx.constraints["test_int"].getAll<int>(EQUALS);
for (const auto& int_match : ints) {
results.push_back({{"test_int", INTEGER(int_match)}});
}
EXPECT_EQ(results.size(), 1);
EXPECT_EQ(results[0]["test_text"], "1");
- results = SQL::selectAllFrom("test", "test_int", tables::EQUALS, "1");
+ results = SQL::selectAllFrom("test", "test_int", EQUALS, "1");
EXPECT_EQ(results.size(), 2);
- results = SQL::selectAllFrom("test", "test_int", tables::EQUALS, "2");
+ results = SQL::selectAllFrom("test", "test_int", EQUALS, "2");
EXPECT_EQ(results.size(), 2);
EXPECT_EQ(results[0]["test_int"], "0");
}
TEST_F(SQLiteUtilTests, test_get_query_columns) {
auto dbc = getTestDBC();
- tables::TableColumns results;
+ TableColumns results;
std::string query = "SELECT seconds, version FROM time JOIN osquery_info";
auto status = getQueryColumnsInternal(query, results, dbc.db());
#include "osquery/sql/virtual_table.h"
namespace osquery {
-namespace tables {
class VirtualTableTests : public testing::Test {};
auto dbc = SQLiteDBManager::get();
// Virtual tables require the registry/plugin API to query tables.
- auto status =
- tables::attachTableInternal("failed_sample", "(foo INTEGER)", dbc.db());
+ auto status = attachTableInternal("failed_sample", "(foo INTEGER)", dbc.db());
EXPECT_EQ(status.getCode(), SQLITE_ERROR);
// The table attach will complete only when the table name is registered.
EXPECT_TRUE(status.ok());
// Use the table name, plugin-generated schema to attach.
- status = tables::attachTableInternal(
- "sample", tables::columnDefinition(response), dbc.db());
+ status = attachTableInternal("sample", columnDefinition(response), dbc.db());
EXPECT_EQ(status.getCode(), SQLITE_OK);
std::string q = "SELECT sql FROM sqlite_temp_master WHERE tbl_name='sample';";
results[0]["sql"]);
}
}
-}
<< ") to BIGINT";
}
sqlite3_result_int64(ctx, afinite);
+ } else if (type == "DOUBLE") {
+ double afinite;
+ try {
+ afinite = boost::lexical_cast<double>(value);
+ } catch (const boost::bad_lexical_cast &e) {
+ afinite = 0;
+ VLOG(1) << "Error casting" << column_name << " (" << value
+ << ") to DOUBLE";
+ }
+ sqlite3_result_double(ctx, afinite);
}
return SQLITE_OK;
return SQLITE_OK;
}
+}
Status attachTableInternal(const std::string &name,
const std::string &statement,
}
// A static module structure does not need specific logic per-table.
+ // clang-format off
static sqlite3_module module = {
- 0, xCreate, xCreate, xBestIndex, xDestroy, xDestroy, xOpen,
- xClose, xFilter, xNext, xEof, xColumn, xRowid, 0,
- 0, 0, 0, 0, 0, 0,
+ 0,
+ tables::xCreate,
+ tables::xCreate,
+ tables::xBestIndex,
+ tables::xDestroy,
+ tables::xDestroy,
+ tables::xOpen,
+ tables::xClose,
+ tables::xFilter,
+ tables::xNext,
+ tables::xEof,
+ tables::xColumn,
+ tables::xRowid,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
};
+ // clang-format on
// Note, if the clientData API is used then this will save a registry call
// within xCreate.
}
}
}
-}
#include "osquery/sql/sqlite_util.h"
namespace osquery {
-namespace tables {
/**
* @brief osquery cursor object.
/// Attach all table plugins to an in-memory SQLite database.
void attachVirtualTables(sqlite3 *db);
}
-}
-ADD_OSQUERY_LIBRARY(FALSE osquery_tables_linux events/linux/passwd_changes.cpp
- events/linux/file_events.cpp
- events/linux/hardware_events.cpp
- networking/etc_protocols.cpp
- networking/linux/routes.cpp
- networking/linux/process_open_sockets.cpp
- networking/linux/arp_cache.cpp
- system/linux/acpi_tables.cpp
- system/linux/processes.cpp
- system/linux/process_open_files.cpp
- system/linux/shared_memory.cpp
- system/linux/smbios_tables.cpp
- system/linux/sysctl_utils.cpp
- system/linux/users.cpp
- system/linux/groups.cpp
- system/linux/kernel_info.cpp
- system/linux/kernel_modules.cpp
- system/linux/memory_map.cpp
- system/linux/mounts.cpp
- system/linux/os_version.cpp
- system/linux/pci_devices.cpp
- system/linux/usb_devices.cpp)
+FILE(GLOB OSQUERY_LINUX_TABLES "*/linux/*.cpp")
+ADD_OSQUERY_LIBRARY(TRUE osquery_linux_tables ${OSQUERY_LINUX_TABLES})
-ADD_OSQUERY_LIBRARY(TRUE osquery_tables networking/utils.cpp
- networking/etc_hosts.cpp # TODO(sangwan.kwon): Change to FALSE)
- networking/etc_services.cpp
- networking/listening_ports.cpp
- utility/time.cpp
- utility/hash.cpp
- utility/file.cpp
- utility/osquery.cpp
- system/crontab.cpp
- system/smbios_utils.cpp
- system/last.cpp
- system/shell_history.cpp
- system/suid_bin.cpp
- system/system_controls.cpp
- system/logged_in_users.cpp)
+FILE(GLOB OSQUERY_CROSS_TABLES "*/*.cpp")
+ADD_OSQUERY_LIBRARY(TRUE osquery_tables ${OSQUERY_CROSS_TABLES})
-FILE(GLOB OSQUERY_CROSS_TABLES_TESTS "[!u]*/tests/*.cpp")
+FILE(GLOB OSQUERY_CROSS_TABLES_TESTS "[!uo]*/tests/*.cpp")
ADD_OSQUERY_TEST(FALSE ${OSQUERY_CROSS_TABLES_TESTS})
#include "osquery/events/linux/inotify.h"
namespace osquery {
-namespace tables {
/**
* @brief Track time, action changes to /etc/passwd
return Status(0, "OK");
}
}
-}
#include "osquery/events/linux/udev.h"
namespace osquery {
-namespace tables {
/**
* @brief Track udev events in Linux
return Status(0, "OK");
}
}
-}
#include "osquery/events/linux/inotify.h"
namespace osquery {
-namespace tables {
/**
* @brief Track time, action changes to /etc/passwd
return Status(0, "OK");
}
}
-}
--- /dev/null
+/*
+ * 25-Jul-1998 Major changes to allow for ip chain table
+ *
+ * 3-Jan-2000 Named tables to allow packet selection for different uses.
+ */
+
+/*
+ * Format of an IP firewall descriptor
+ *
+ * src, dst, src_mask, dst_mask are always stored in network byte order.
+ * flags are stored in host byte order (of course).
+ * Port numbers are stored in HOST byte order.
+ */
+
+#ifndef _IPTABLES_H
+#define _IPTABLES_H
+
+#include <linux/types.h>
+
+#include <linux/netfilter_ipv4.h>
+
+#include <linux/netfilter/x_tables.h>
+
+#define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
+#define ipt_match xt_match
+#define ipt_target xt_target
+#define ipt_table xt_table
+#define ipt_get_revision xt_get_revision
+#define ipt_entry_match xt_entry_match
+#define ipt_entry_target xt_entry_target
+#define ipt_standard_target xt_standard_target
+#define ipt_error_target xt_error_target
+#define ipt_counters xt_counters
+#define IPT_CONTINUE XT_CONTINUE
+#define IPT_RETURN XT_RETURN
+
+/* This group is older than old (iptables < v1.4.0-rc1~89) */
+#include <linux/netfilter/xt_tcpudp.h>
+#define ipt_udp xt_udp
+#define ipt_tcp xt_tcp
+#define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT
+#define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT
+#define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS
+#define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION
+#define IPT_TCP_INV_MASK XT_TCP_INV_MASK
+#define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT
+#define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT
+#define IPT_UDP_INV_MASK XT_UDP_INV_MASK
+
+/* The argument to IPT_SO_ADD_COUNTERS. */
+#define ipt_counters_info xt_counters_info
+/* Standard return verdict, or do jump. */
+#define IPT_STANDARD_TARGET XT_STANDARD_TARGET
+/* Error verdict. */
+#define IPT_ERROR_TARGET XT_ERROR_TARGET
+
+/* fn returns 0 to continue iteration */
+#define IPT_MATCH_ITERATE(e, fn, args...) \
+ XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args)
+
+/* fn returns 0 to continue iteration */
+#define IPT_ENTRY_ITERATE(entries, size, fn, args...) \
+ XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args)
+
+/* Yes, Virginia, you have to zero the padding. */
+struct ipt_ip {
+ /* Source and destination IP addr */
+ struct in_addr src, dst;
+ /* Mask for src and dest IP addr */
+ struct in_addr smsk, dmsk;
+ char iniface[IFNAMSIZ], outiface[IFNAMSIZ];
+ unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ];
+
+ /* Protocol, 0 = ANY */
+ __u16 proto;
+
+ /* Flags word */
+ __u8 flags;
+ /* Inverse flags */
+ __u8 invflags;
+};
+
+/* Values for "flag" field in struct ipt_ip (general ip structure). */
+#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
+#define IPT_F_GOTO 0x02 /* Set if jump is a goto */
+#define IPT_F_MASK 0x03 /* All possible flag bits mask. */
+
+/* Values for "inv" field in struct ipt_ip. */
+#define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
+#define IPT_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */
+#define IPT_INV_TOS 0x04 /* Invert the sense of TOS. */
+#define IPT_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */
+#define IPT_INV_DSTIP 0x10 /* Invert the sense of DST OP. */
+#define IPT_INV_FRAG 0x20 /* Invert the sense of FRAG. */
+#define IPT_INV_PROTO XT_INV_PROTO
+#define IPT_INV_MASK 0x7F /* All possible flag bits mask. */
+
+/* This structure defines each of the firewall rules. Consists of 3
+ parts which are 1) general IP header stuff 2) match specific
+ stuff 3) the target to perform if the rule matches */
+struct ipt_entry {
+ struct ipt_ip ip;
+
+ /* Mark with fields that we care about. */
+ unsigned int nfcache;
+
+ /* Size of ipt_entry + matches */
+ __u16 target_offset;
+ /* Size of ipt_entry + matches + target */
+ __u16 next_offset;
+
+ /* Back pointer */
+ unsigned int comefrom;
+
+ /* Packet and byte counters. */
+ struct xt_counters counters;
+
+ /* The matches (if any), then the target. */
+ unsigned char elems[0];
+};
+
+/*
+ * New IP firewall options for [gs]etsockopt at the RAW IP level.
+ * Unlike BSD Linux inherits IP options so you don't have to use a raw
+ * socket for this. Instead we check rights in the calls.
+ *
+ * ATTENTION: check linux/in.h before adding new number here.
+ */
+#define IPT_BASE_CTL 64
+
+#define IPT_SO_SET_REPLACE (IPT_BASE_CTL)
+#define IPT_SO_SET_ADD_COUNTERS (IPT_BASE_CTL + 1)
+#define IPT_SO_SET_MAX IPT_SO_SET_ADD_COUNTERS
+
+#define IPT_SO_GET_INFO (IPT_BASE_CTL)
+#define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1)
+#define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2)
+#define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3)
+#define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET
+
+/* ICMP matching stuff */
+struct ipt_icmp {
+ __u8 type; /* type to match */
+ __u8 code[2]; /* range of code */
+ __u8 invflags; /* Inverse flags */
+};
+
+/* Values for "inv" field for struct ipt_icmp. */
+#define IPT_ICMP_INV 0x01 /* Invert the sense of type/code test */
+
+/* The argument to IPT_SO_GET_INFO */
+struct ipt_getinfo {
+ /* Which table: caller fills this in. */
+ char name[XT_TABLE_MAXNAMELEN];
+
+ /* Kernel fills these in. */
+ /* Which hook entry points are valid: bitmask */
+ unsigned int valid_hooks;
+
+ /* Hook entry points: one per netfilter hook. */
+ unsigned int hook_entry[NF_INET_NUMHOOKS];
+
+ /* Underflow points. */
+ unsigned int underflow[NF_INET_NUMHOOKS];
+
+ /* Number of entries */
+ unsigned int num_entries;
+
+ /* Size of entries. */
+ unsigned int size;
+};
+
+/* The argument to IPT_SO_SET_REPLACE. */
+struct ipt_replace {
+ /* Which table. */
+ char name[XT_TABLE_MAXNAMELEN];
+
+ /* Which hook entry points are valid: bitmask. You can't
+ change this. */
+ unsigned int valid_hooks;
+
+ /* Number of entries */
+ unsigned int num_entries;
+
+ /* Total size of new entries */
+ unsigned int size;
+
+ /* Hook entry points. */
+ unsigned int hook_entry[NF_INET_NUMHOOKS];
+
+ /* Underflow points. */
+ unsigned int underflow[NF_INET_NUMHOOKS];
+
+ /* Information about old entries: */
+ /* Number of counters (must be equal to current number of entries). */
+ unsigned int num_counters;
+ /* The old entries' counters. */
+ struct xt_counters *counters;
+
+ /* The entries (hang off end: not really an array). */
+ struct ipt_entry entries[0];
+};
+
+/* The argument to IPT_SO_GET_ENTRIES. */
+struct ipt_get_entries {
+ /* Which table: user fills this in. */
+ char name[XT_TABLE_MAXNAMELEN];
+
+ /* User fills this in: total entry size. */
+ unsigned int size;
+
+ /* The entries. */
+ struct ipt_entry entrytable[0];
+};
+
+/* Helper functions */
+static __inline__ struct xt_entry_target *
+ipt_get_target(struct ipt_entry *e)
+{
+ return (struct ipt_entry_target *)((char *)e + e->target_offset);
+}
+
+/*
+ * Main firewall chains definitions and global var's definitions.
+ */
+#endif /* _IPTABLES_H */
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <sstream>
+
+#include <arpa/inet.h>
+#include "libiptc.h"
+
+#include <boost/algorithm/string/split.hpp>
+#include <boost/algorithm/string/trim.hpp>
+
+#include <osquery/filesystem.h>
+#include <osquery/logger.h>
+#include <osquery/tables.h>
+
+#include "osquery/tables/networking/utils.h"
+
+namespace osquery {
+namespace tables {
+
+const std::string kLinuxIpTablesNames = "/proc/net/ip_tables_names";
+const char MAP[] = {'0','1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
+const int HIGH_BITS = 4;
+const int LOW_BITS = 15;
+
+void parseIpEntry(ipt_ip *ip, Row &r) {
+ r["protocol"] = INTEGER(ip->proto);
+ if (strlen(ip->iniface)) {
+ r["iniface"] = TEXT(ip->iniface);
+ } else {
+ r["iniface"] = "all";
+ }
+
+ if (strlen(ip->outiface)) {
+ r["outiface"] = TEXT(ip->outiface);
+ } else {
+ r["outiface"] = "all";
+ }
+
+ r["src_ip"] = ipAsString((struct in_addr *)&ip->src);
+ r["dst_ip"] = ipAsString((struct in_addr *)&ip->dst);
+ r["src_mask"] = ipAsString((struct in_addr *)&ip->smsk);
+ r["dst_mask"] = ipAsString((struct in_addr *)&ip->dmsk);
+
+ char aux_char[2] = {0};
+ std::string iniface_mask;
+ for (int i = 0; ip->iniface_mask[i] != 0x00 && i<IFNAMSIZ; i++) {
+ aux_char[0] = MAP[(int) ip->iniface_mask[i] >> HIGH_BITS];
+ aux_char[1] = MAP[(int) ip->iniface_mask[i] & LOW_BITS];
+ iniface_mask += aux_char[0];
+ iniface_mask += aux_char[1];
+ }
+
+ r["iniface_mask"] = TEXT(iniface_mask);
+ std::string outiface_mask = "";
+ for (int i = 0; ip->outiface_mask[i] != 0x00 && i<IFNAMSIZ; i++) {
+ aux_char[0] = MAP[(int) ip->outiface_mask[i] >> HIGH_BITS];
+ aux_char[1] = MAP[(int) ip->outiface_mask[i] & LOW_BITS];
+ outiface_mask += aux_char[0];
+ outiface_mask += aux_char[1];
+ }
+ r["outiface_mask"] = TEXT(outiface_mask);
+}
+
+void genIPTablesRules(const std::string &filter, QueryData &results) {
+ Row r;
+ r["filter_name"] = filter;
+
+ // Initialize the access to iptc
+ auto handle = (struct iptc_handle *)iptc_init(filter.c_str());
+ if (handle == nullptr) {
+ return;
+ }
+
+ // Iterate through chains
+ for (auto chain = iptc_first_chain(handle); chain != nullptr;
+ chain = iptc_next_chain(handle)) {
+ r["chain"] = TEXT(chain);
+
+ struct ipt_counters counters;
+ auto policy = iptc_get_policy(chain, &counters, handle);
+
+ if (policy != nullptr) {
+ r["policy"] = TEXT(policy);
+ r["packets"] = INTEGER(counters.pcnt);
+ r["bytes"] = INTEGER(counters.bcnt);
+ } else {
+ r["policy"] = "";
+ r["packets"] = "0";
+ r["bytes"] = "0";
+ }
+
+ struct ipt_entry *prev_rule = nullptr;
+ // Iterating through all the rules per chain
+ for (auto chain_rule = iptc_first_rule(chain, handle); chain_rule;
+ chain_rule = iptc_next_rule(prev_rule, handle)) {
+ prev_rule = (struct ipt_entry *)chain_rule;
+
+ auto target = iptc_get_target(chain_rule, handle);
+ if (target != nullptr) {
+ r["target"] = TEXT(target);
+ } else {
+ r["target"] = "";
+ }
+
+ if (chain_rule->target_offset) {
+ r["match"] = "yes";
+ } else {
+ r["match"] = "no";
+ }
+
+ struct ipt_ip *ip = (struct ipt_ip *)&chain_rule->ip;
+ parseIpEntry(ip, r);
+
+ results.push_back(r);
+ } // Rule iteration
+ results.push_back(r);
+ } // Chain iteration
+
+ iptc_free(handle);
+}
+
+QueryData genIptables(QueryContext& context) {
+ QueryData results;
+
+ // Read in table names
+ std::string content;
+ auto s = osquery::readFile(kLinuxIpTablesNames, content);
+ if (s.ok()) {
+ for (auto &line : split(content, "\n")) {
+ boost::trim(line);
+ if (line.size() > 0) {
+ genIPTablesRules(line, results);
+ }
+ }
+ } else {
+ // Permissions issue or iptables modules are not loaded.
+ TLOG << "Error reading " << kLinuxIpTablesNames << " : " << s.toString();
+ }
+
+ return results;
+}
+}
+}
--- /dev/null
+#ifndef _LIBIPTC_H
+#define _LIBIPTC_H
+/* Library which manipulates filtering rules. */
+
+#include <linux/types.h>
+#include <libiptc/ipt_kernel_headers.h>
+#ifdef __cplusplus
+# include <climits>
+#else
+# include <limits.h> /* INT_MAX in ip_tables.h */
+#endif
+#include "ip_tables.h"
+#include <libiptc/xtcshared.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define iptc_handle xtc_handle
+#define ipt_chainlabel xt_chainlabel
+
+#define IPTC_LABEL_ACCEPT "ACCEPT"
+#define IPTC_LABEL_DROP "DROP"
+#define IPTC_LABEL_QUEUE "QUEUE"
+#define IPTC_LABEL_RETURN "RETURN"
+
+/* Does this chain exist? */
+int iptc_is_chain(const char *chain, struct xtc_handle *const handle);
+
+/* Take a snapshot of the rules. Returns NULL on error. */
+struct xtc_handle *iptc_init(const char *tablename);
+
+/* Cleanup after iptc_init(). */
+void iptc_free(struct xtc_handle *h);
+
+/* Iterator functions to run through the chains. Returns NULL at end. */
+const char *iptc_first_chain(struct xtc_handle *handle);
+const char *iptc_next_chain(struct xtc_handle *handle);
+
+/* Get first rule in the given chain: NULL for empty chain. */
+const struct ipt_entry *iptc_first_rule(const char *chain,
+ struct xtc_handle *handle);
+
+/* Returns NULL when rules run out. */
+const struct ipt_entry *iptc_next_rule(const struct ipt_entry *prev,
+ struct xtc_handle *handle);
+
+/* Returns a pointer to the target name of this entry. */
+const char *iptc_get_target(const struct ipt_entry *e,
+ struct xtc_handle *handle);
+
+/* Is this a built-in chain? */
+int iptc_builtin(const char *chain, struct xtc_handle *const handle);
+
+/* Get the policy of a given built-in chain */
+const char *iptc_get_policy(const char *chain,
+ struct xt_counters *counter,
+ struct xtc_handle *handle);
+
+/* These functions return TRUE for OK or 0 and set errno. If errno ==
+ 0, it means there was a version error (ie. upgrade libiptc). */
+/* Rule numbers start at 1 for the first rule. */
+
+/* Insert the entry `e' in chain `chain' into position `rulenum'. */
+int iptc_insert_entry(const xt_chainlabel chain,
+ const struct ipt_entry *e,
+ unsigned int rulenum,
+ struct xtc_handle *handle);
+
+/* Atomically replace rule `rulenum' in `chain' with `e'. */
+int iptc_replace_entry(const xt_chainlabel chain,
+ const struct ipt_entry *e,
+ unsigned int rulenum,
+ struct xtc_handle *handle);
+
+/* Append entry `e' to chain `chain'. Equivalent to insert with
+ rulenum = length of chain. */
+int iptc_append_entry(const xt_chainlabel chain,
+ const struct ipt_entry *e,
+ struct xtc_handle *handle);
+
+/* Check whether a mathching rule exists */
+int iptc_check_entry(const xt_chainlabel chain,
+ const struct ipt_entry *origfw,
+ unsigned char *matchmask,
+ struct xtc_handle *handle);
+
+/* Delete the first rule in `chain' which matches `e', subject to
+ matchmask (array of length == origfw) */
+int iptc_delete_entry(const xt_chainlabel chain,
+ const struct ipt_entry *origfw,
+ unsigned char *matchmask,
+ struct xtc_handle *handle);
+
+/* Delete the rule in position `rulenum' in `chain'. */
+int iptc_delete_num_entry(const xt_chainlabel chain,
+ unsigned int rulenum,
+ struct xtc_handle *handle);
+
+/* Check the packet `e' on chain `chain'. Returns the verdict, or
+ NULL and sets errno. */
+const char *iptc_check_packet(const xt_chainlabel chain,
+ struct ipt_entry *entry,
+ struct xtc_handle *handle);
+
+/* Flushes the entries in the given chain (ie. empties chain). */
+int iptc_flush_entries(const xt_chainlabel chain,
+ struct xtc_handle *handle);
+
+/* Zeroes the counters in a chain. */
+int iptc_zero_entries(const xt_chainlabel chain,
+ struct xtc_handle *handle);
+
+/* Creates a new chain. */
+int iptc_create_chain(const xt_chainlabel chain,
+ struct xtc_handle *handle);
+
+/* Deletes a chain. */
+int iptc_delete_chain(const xt_chainlabel chain,
+ struct xtc_handle *handle);
+
+/* Renames a chain. */
+int iptc_rename_chain(const xt_chainlabel oldname,
+ const xt_chainlabel newname,
+ struct xtc_handle *handle);
+
+/* Sets the policy on a built-in chain. */
+int iptc_set_policy(const xt_chainlabel chain,
+ const xt_chainlabel policy,
+ struct xt_counters *counters,
+ struct xtc_handle *handle);
+
+/* Get the number of references to this chain */
+int iptc_get_references(unsigned int *ref,
+ const xt_chainlabel chain,
+ struct xtc_handle *handle);
+
+/* read packet and byte counters for a specific rule */
+struct xt_counters *iptc_read_counter(const xt_chainlabel chain,
+ unsigned int rulenum,
+ struct xtc_handle *handle);
+
+/* zero packet and byte counters for a specific rule */
+int iptc_zero_counter(const xt_chainlabel chain,
+ unsigned int rulenum,
+ struct xtc_handle *handle);
+
+/* set packet and byte counters for a specific rule */
+int iptc_set_counter(const xt_chainlabel chain,
+ unsigned int rulenum,
+ struct xt_counters *counters,
+ struct xtc_handle *handle);
+
+/* Makes the actual changes. */
+int iptc_commit(struct xtc_handle *handle);
+
+/* Get raw socket. */
+int iptc_get_raw_socket(void);
+
+/* Translates errno numbers into more human-readable form than strerror. */
+const char *iptc_strerror(int err);
+
+extern void dump_entries(struct xtc_handle *const);
+
+extern const struct xtc_ops iptc_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _LIBIPTC_H */
return decoded;
}
-void genSocketsFromProc(const std::map<std::string, std::string> &socket_inodes,
+void genSocketsFromProc(const std::map<std::string, std::string> &inodes,
int protocol,
int family,
QueryData &results) {
std::string path = "/proc/net/";
- path += kLinuxProtocolNames.at(protocol);
- path += (family == AF_INET6) ? "6" : "";
+ if (family == AF_UNIX) {
+ path += "unix";
+ } else {
+ path += kLinuxProtocolNames.at(protocol);
+ path += (family == AF_INET6) ? "6" : "";
+ }
std::string content;
if (!osquery::readFile(path, content).ok()) {
// The system's socket information is tokenized by line.
size_t index = 0;
for (const auto &line : osquery::split(content, "\n")) {
- index += 1;
- if (index == 1) {
+ if (++index == 1) {
// The first line is a textual header and will be ignored.
- if (line.find("sl") != 0) {
+ if (line.find("sl") != 0 && line.find("sk") != 0 &&
+ line.find("Num") != 0) {
// Header fields are unknown, stop parsing.
break;
}
// The socket information is tokenized by spaces, each a field.
auto fields = osquery::split(line, " ");
- if (fields.size() < 10) {
+ // UNIX socket reporting has a smaller number of fields.
+ size_t min_fields = (family == AF_UNIX) ? 7 : 10;
+ if (fields.size() < min_fields) {
// Unknown/malformed socket information.
continue;
}
- // Two of the fields are the local/remote address/port pairs.
- auto locals = osquery::split(fields[1], ":");
- auto remotes = osquery::split(fields[2], ":");
- if (locals.size() != 2 || remotes.size() != 2) {
- // Unknown/malformed socket information.
- continue;
- }
Row r;
- r["socket"] = fields[9];
- r["family"] = INTEGER(family);
- r["protocol"] = INTEGER(protocol);
- r["local_address"] = addressFromHex(locals[0], family);
- r["local_port"] = INTEGER(portFromHex(locals[1]));
- r["remote_address"] = addressFromHex(remotes[0], family);
- r["remote_port"] = INTEGER(portFromHex(remotes[1]));
-
- if (socket_inodes.count(r["socket"]) > 0) {
- r["pid"] = socket_inodes.at(r["socket"]);
+ if (family == AF_UNIX) {
+ r["socket"] = fields[6];
+ r["family"] = "0";
+ r["protocol"] = fields[2];
+ r["local_address"] = "";
+ r["local_port"] = "0";
+ r["remote_address"] = "";
+ r["remote_port"] = "0";
+ r["path"] = (fields.size() >= 8) ? fields[7] : "";
+ } else {
+ // Two of the fields are the local/remote address/port pairs.
+ auto locals = osquery::split(fields[1], ":");
+ auto remotes = osquery::split(fields[2], ":");
+ if (locals.size() != 2 || remotes.size() != 2) {
+ // Unknown/malformed socket information.
+ continue;
+ }
+
+ r["socket"] = fields[9];
+ r["family"] = INTEGER(family);
+ r["protocol"] = INTEGER(protocol);
+ r["local_address"] = addressFromHex(locals[0], family);
+ r["local_port"] = INTEGER(portFromHex(locals[1]));
+ r["remote_address"] = addressFromHex(remotes[0], family);
+ r["remote_port"] = INTEGER(portFromHex(remotes[1]));
+ // Path is only used for UNIX domain sockets.
+ r["path"] = "";
+ }
+
+ if (inodes.count(r["socket"]) > 0) {
+ r["pid"] = inodes.at(r["socket"]);
} else {
r["pid"] = "-1";
}
// If a pid is given then set that as the only item in processes.
std::set<std::string> pids;
- if (context.constraints["pid"].exists()) {
+ if (context.constraints["pid"].exists(EQUALS)) {
pids = context.constraints["pid"].getAll(EQUALS);
} else {
osquery::procProcesses(pids);
std::map<std::string, std::string> descriptors;
if (osquery::procDescriptors(process, descriptors).ok()) {
for (const auto& fd : descriptors) {
- if (fd.second.find("socket:") != std::string::npos) {
- // See #792: std::regex is incomplete until GCC 4.9
- auto inode = fd.second.substr(fd.second.find("socket:") + 8);
+ if (fd.second.find("socket:[") == 0) {
+ // See #792: std::regex is incomplete until GCC 4.9 (skip 8 chars)
+ auto inode = fd.second.substr(8);
socket_inodes[inode.substr(0, inode.size() - 1)] = process;
}
}
genSocketsFromProc(socket_inodes, protocol.first, AF_INET6, results);
}
+ genSocketsFromProc(socket_inodes, IPPROTO_IP, AF_UNIX, results);
return results;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <gtest/gtest.h>
+
+#include <osquery/logger.h>
+
+#include <libiptc/libiptc.h>
+#include <arpa/inet.h>
+
+#include "osquery/core/test_util.h"
+
+namespace osquery {
+namespace tables {
+
+void parseIpEntry(ipt_ip *ip, Row &row);
+
+ipt_ip* getIpEntryContent() {
+ static ipt_ip ip_entry;
+
+ ip_entry.proto = 6;
+ memset(ip_entry.iniface, 0, IFNAMSIZ);
+ strcpy(ip_entry.outiface, "eth0");
+ inet_aton("123.123.123.123", &ip_entry.src);
+ inet_aton("45.45.45.45", &ip_entry.dst);
+ inet_aton("250.251.252.253", &ip_entry.smsk);
+ inet_aton("253.252.251.250", &ip_entry.dmsk);
+ memset(ip_entry.iniface_mask, 0xfe, IFNAMSIZ );
+ memset(ip_entry.outiface_mask, 0xfa, IFNAMSIZ );
+
+ return &ip_entry;
+}
+
+Row getIpEntryExpectedResults() {
+ Row row;
+
+ row["protocol"] = "6";
+ row["iniface"] = "all";
+ row["outiface"] = "eth0";
+ row["src_ip"] = "123.123.123.123";
+ row["dst_ip"] = "45.45.45.45";
+ row["src_mask"] = "250.251.252.253";
+ row["dst_mask"] = "253.252.251.250";
+ row["iniface_mask"] = "FEFEFEFEFEFEFEFEFEFEFEFEFEFEFEFE";
+ row["outiface_mask"] = "FAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFA";
+
+ return row;
+}
+
+class IptablesTests : public testing::Test {};
+
+TEST_F(IptablesTests, test_iptables_ip_entry) {
+ Row row;
+ parseIpEntry(getIpEntryContent(), row);
+ EXPECT_EQ(row, getIpEntryExpectedResults());
+}
+}
+}
#include <gtest/gtest.h>
#include <osquery/logger.h>
-#include <osquery/database.h>
#include "osquery/core/test_util.h"
#include <gtest/gtest.h>
#include <osquery/logger.h>
-#include <osquery/database.h>
#include "osquery/core/test_util.h"
namespace tables {
std::string ipAsString(const struct sockaddr *in) {
- char dst[INET6_ADDRSTRLEN];
- memset(dst, 0, sizeof(dst));
- void *in_addr;
+ char dst[INET6_ADDRSTRLEN] = {0};
+ // memset(dst, 0, sizeof(dst));
+ void *in_addr = nullptr;
if (in->sa_family == AF_INET) {
in_addr = (void *)&(((struct sockaddr_in *)in)->sin_addr);
inet_ntop(in->sa_family, in_addr, dst, sizeof(dst));
std::string address(dst);
boost::trim(address);
+ return address;
+}
+
+std::string ipAsString(const struct in_addr *in) {
+ char dst[INET6_ADDRSTRLEN] = {0};
+ inet_ntop(AF_INET, in, dst, sizeof(dst));
+ std::string address(dst);
+ boost::trim(address);
return address;
}
}
}
#else
- struct sockaddr_dl *sdl;
+ struct sockaddr_dl *sdl = nullptr;
sdl = (struct sockaddr_dl *)addr->ifa_addr;
if (sdl->sdl_alen != 6) {
// Return a string representation for an IPv4/IPv6 struct.
std::string ipAsString(const struct sockaddr *in);
+std::string ipAsString(const struct in_addr *in);
std::string macAsString(const struct ifaddrs *addr);
std::string macAsString(const char *addr);
int netmaskFromIP(const struct sockaddr *in);
const std::string kSystemCron = "/etc/crontab";
-#ifdef __APPLE__
-const std::string kUserCronsPath = "/var/at/tabs/";
-#else
-const std::string kUserCronsPath = "/var/spool/cron/crontabs/";
-#endif
+const std::vector<std::string> kUserCronPaths = {
+ "/var/at/tabs/", "/var/spool/cron/", "/var/spool/cron/crontabs/",
+};
std::vector<std::string> cronFromFile(const std::string& path) {
std::string content;
}
std::vector<std::string> user_crons;
- auto status = listFilesInDirectory(kUserCronsPath, user_crons);
- if (!status.ok()) {
- LOG(INFO) << "Could not list user crons from: " << kUserCronsPath << " ("
- << status.toString() << ")";
- return results;
+ for (const auto cron_path : kUserCronPaths) {
+ osquery::listFilesInDirectory(cron_path, user_crons);
}
// The user-based crons are identified by their path.
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <dirent.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <osquery/core.h>
+#include <osquery/filesystem.h>
+#include <osquery/logger.h>
+#include <osquery/tables.h>
+
+#define MSR_FILENAME_BUFFER_SIZE 32
+
+#define NO_MASK 0xFFFFFFFFFFFFFFFFULL
+
+// Defines taken from uapi/asm/msr-index.h from the linux kernel.
+#define MSR_PLATFORM_INFO 0x000000ce
+
+#define MSR_IA32_FEATURE_CONTROL 0x0000003a
+
+#define MSR_IA32_PERF_STATUS 0x00000198
+#define MSR_IA32_PERF_CTL 0x00000199
+#define INTEL_PERF_CTL_MASK 0xffff
+
+#define MSR_IA32_MISC_ENABLE 0x000001a0
+
+#define MSR_TURBO_RATIO_LIMIT 0x000001ad
+
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE \
+ (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
+
+// Run Time Average Power Limiting (RAPL).
+#define MSR_RAPL_POWER_UNIT 0x00000606
+#define MSR_PKG_ENERGY_STATUS 0x00000611
+#define MSR_PKG_POWER_LIMIT 0x00000610
+
+namespace osquery {
+namespace tables {
+
+// These are the entries to retrieve from the model specific register
+struct msr_record_t {
+ const char *name;
+ const off_t offset;
+ const uint64_t mask;
+ const int is_flag;
+};
+const static msr_record_t fields[] = {
+ {.name = "turbo_disabled",
+ .offset = MSR_IA32_MISC_ENABLE,
+ .mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE,
+ .is_flag = true},
+ {.name = "turbo_ratio_limit",
+ .offset = MSR_TURBO_RATIO_LIMIT,
+ .mask = NO_MASK,
+ .is_flag = false},
+ {.name = "platform_info",
+ .offset = MSR_PLATFORM_INFO,
+ .mask = NO_MASK,
+ .is_flag = false},
+ {.name = "perf_status",
+ .offset = MSR_IA32_PERF_STATUS,
+ .mask = NO_MASK,
+ .is_flag = false},
+ {.name = "perf_ctl",
+ .offset = MSR_IA32_PERF_CTL,
+ .mask = INTEL_PERF_CTL_MASK,
+ .is_flag = false},
+ {.name = "feature_control",
+ .offset = MSR_IA32_FEATURE_CONTROL,
+ .mask = NO_MASK,
+ .is_flag = false},
+ {.name = "rapl_power_limit",
+ .offset = MSR_PKG_POWER_LIMIT,
+ .mask = NO_MASK,
+ .is_flag = false},
+ {.name = "rapl_energy_status",
+ .offset = MSR_PKG_ENERGY_STATUS,
+ .mask = NO_MASK,
+ .is_flag = false},
+ {.name = "rapl_power_units",
+ .offset = MSR_RAPL_POWER_UNIT,
+ .mask = NO_MASK,
+ .is_flag = false}};
+
+void getModelSpecificRegisterData(QueryData &results, int cpu_number) {
+ auto msr_filename =
+ std::string("/dev/cpu/") + std::to_string(cpu_number) + "/msr";
+
+ int fd = open(msr_filename.c_str(), O_RDONLY);
+ if (fd < 0) {
+ int err = errno;
+ TLOG << "Could not open msr file " << msr_filename
+ << " check the msr kernel module is enabled.";
+ if (err == EACCES) {
+ LOG(WARNING) << "Could not access msr device. Run osquery as root.";
+ }
+ return;
+ }
+
+ Row r;
+ r["processor_number"] = BIGINT(cpu_number);
+ for (const msr_record_t &field : fields) {
+ uint64_t output;
+ ssize_t size = pread(fd, &output, sizeof(uint64_t), field.offset);
+ if (size != sizeof(uint64_t)) {
+ // Processor does not have a record of this type.
+ continue;
+ }
+ if (field.is_flag) {
+ r[field.name] = BIGINT((output & field.mask) ? 1 : 0);
+ } else {
+ r[field.name] = BIGINT(output & field.mask);
+ }
+ }
+ results.push_back(r);
+ close(fd);
+
+ return;
+}
+
+// Filter only for filenames starting with a digit.
+int msrScandirFilter(const struct dirent *entry) {
+ if (isdigit(entry->d_name[0])) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+QueryData genModelSpecificRegister(QueryContext &context) {
+ QueryData results;
+
+ struct dirent **entries = nullptr;
+ int num_entries = scandir("/dev/cpu", &entries, msrScandirFilter, 0);
+ if (num_entries < 1) {
+ LOG(WARNING) << "No msr information check msr kernel module is enabled.";
+ return results;
+ }
+ while (num_entries--) {
+ getModelSpecificRegisterData(results, atoi(entries[num_entries]->d_name));
+ free(entries[num_entries]);
+ }
+ free(entries);
+
+ return results;
+}
+}
+}
QueryData results;
std::set<std::string> pids;
- if (context.constraints["pid"].exists()) {
+ if (context.constraints["pid"].exists(EQUALS)) {
pids = context.constraints["pid"].getAll(EQUALS);
} else {
osquery::procProcesses(pids);
QueryData results;
std::set<std::string> pids;
- if (context.constraints["pid"].exists()) {
+ if (context.constraints["pid"].exists(EQUALS)) {
pids = context.constraints["pid"].getAll(EQUALS);
} else {
osquery::procProcesses(pids);
QueryData results;
std::set<std::string> pids;
- if (context.constraints["pid"].exists()) {
+ if (context.constraints["pid"].exists(EQUALS)) {
pids = context.constraints["pid"].getAll(EQUALS);
} else {
osquery::procProcesses(pids);
QueryData results;
std::set<std::string> pids;
- if (context.constraints["pid"].exists()) {
+ if (context.constraints["pid"].exists(EQUALS)) {
pids = context.constraints["pid"].getAll(EQUALS);
} else {
osquery::procProcesses(pids);
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include "osquery/tables/system/user_groups.h"
+
+namespace osquery {
+namespace tables {
+
+extern std::mutex pwdEnumerationMutex;
+
+QueryData genUserGroups(QueryContext &context) {
+ QueryData results;
+ struct passwd *pwd = nullptr;
+
+ if (context.constraints["uid"].exists(EQUALS)) {
+ std::set<std::string> uids = context.constraints["uid"].getAll(EQUALS);
+ for (const auto &uid : uids) {
+ pwd = getpwuid(std::strtol(uid.c_str(), NULL, 10));
+ if (pwd != nullptr) {
+ user_t<uid_t, gid_t> user;
+ user.name = pwd->pw_name;
+ user.uid = pwd->pw_uid;
+ user.gid = pwd->pw_gid;
+ getGroupsForUser<uid_t, gid_t>(results, user);
+ }
+ }
+ } else {
+ std::lock_guard<std::mutex> lock(pwdEnumerationMutex);
+ std::set<gid_t> users_in;
+ while ((pwd = getpwent()) != nullptr) {
+ if (std::find(users_in.begin(), users_in.end(), pwd->pw_uid) ==
+ users_in.end()) {
+ user_t<uid_t, gid_t> user;
+ user.name = pwd->pw_name;
+ user.uid = pwd->pw_uid;
+ user.gid = pwd->pw_gid;
+ getGroupsForUser<uid_t, gid_t>(results, user);
+ users_in.insert(pwd->pw_uid);
+ }
+ }
+ endpwent();
+ users_in.clear();
+ }
+
+ return results;
+}
+}
+}
QueryData genShellHistory(QueryContext& context) {
QueryData results;
+ // Select only the home directory for this user.
QueryData users;
- if (!getuid()) {
- // No uid is available, attempt to select from all users.
- users = SQL::selectAllFrom("users");
+ if (!context.constraints["username"].exists(EQUALS)) {
+ users =
+ SQL::selectAllFrom("users", "uid", EQUALS, std::to_string(getuid()));
} else {
- // A uid is available, select only the home directory for this user.
- struct passwd* pwd = getpwuid(getuid());
- if (pwd != nullptr && pwd->pw_name != nullptr) {
- users = SQL::selectAllFrom(
- "users", "username", EQUALS, std::string(pwd->pw_name));
+ auto usernames = context.constraints["username"].getAll(EQUALS);
+ for (const auto& username : usernames) {
+ // Use a predicated select all for each user.
+ auto user = SQL::selectAllFrom("users", "username", EQUALS, username);
+ users.insert(users.end(), user.begin(), user.end());
}
}
}
// Iterate through the sysctl-defined macro of control types.
- if (context.constraints["name"].exists()) {
+ if (context.constraints["name"].exists(EQUALS)) {
// Request MIB information by the description (name).
auto names = context.constraints["name"].getAll(EQUALS);
for (const auto& name : names) {
genControlInfoFromName(name, results, config);
}
- } else if (context.constraints["oid"].exists()) {
+ } else if (context.constraints["oid"].exists(EQUALS)) {
// Request MIB by OID as a string, parse into set of INTs.
auto oids = context.constraints["oid"].getAll(EQUALS);
for (const auto& oid_string : oids) {
genControlInfoFromOIDString(oid_string, results, config);
}
- } else if (context.constraints["subsystem"].exists()) {
+ } else if (context.constraints["subsystem"].exists(EQUALS)) {
// Limit the MIB search to a subsystem name (first find the INT).
auto subsystems = context.constraints["subsystem"].getAll(EQUALS);
for (const auto& subsystem : subsystems) {
--- /dev/null
+/*
+ * Copyright (c) 2014, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ */
+
+#include <vector>
+#include <string>
+
+#include <grp.h>
+#include <pwd.h>
+
+#include <osquery/core.h>
+#include <osquery/filesystem.h>
+#include <osquery/logger.h>
+#include <osquery/tables.h>
+
+// This is also the max supported number for OS X right now.
+#define EXPECTED_GROUPS_MAX 64
+
+namespace osquery {
+namespace tables {
+
+template <typename T>
+static inline void addGroupsToResults(QueryData &results,
+ int uid,
+ const T *groups,
+ int ngroups) {
+ for (int i = 0; i < ngroups; i++) {
+ Row r;
+ r["uid"] = BIGINT(uid);
+ r["gid"] = BIGINT(groups[i]);
+ results.push_back(r);
+ }
+
+ return;
+}
+
+template <typename uid_type, typename gid_type>
+struct user_t {
+ const char *name;
+ uid_type uid;
+ gid_type gid;
+};
+
+template <typename uid_type, typename gid_type>
+static void getGroupsForUser(QueryData &results,
+ const user_t<uid_type, gid_type> &user) {
+ gid_type groups_buf[EXPECTED_GROUPS_MAX];
+ gid_type *groups = groups_buf;
+ int ngroups = EXPECTED_GROUPS_MAX;
+
+ // GLIBC version before 2.3.3 may have a buffer overrun:
+ // http://man7.org/linux/man-pages/man3/getgrouplist.3.html
+ if (getgrouplist(user.name, user.gid, groups, &ngroups) < 0) {
+ // EXPECTED_GROUPS_MAX was probably not large enough.
+ // Try a larger size buffer.
+ // Darwin appears to not resize ngroups correctly. We can hope
+ // we had enough space to start with.
+ groups = new gid_type[ngroups];
+ if (groups == nullptr) {
+ TLOG << "Could not allocate memory to get user groups";
+ return;
+ }
+
+ if (getgrouplist(user.name, user.gid, groups, &ngroups) < 0) {
+ TLOG << "Could not get users group list";
+ } else {
+ addGroupsToResults(results, user.uid, groups, ngroups);
+ }
+
+ delete[] groups;
+ } else {
+ addGroupsToResults(results, user.uid, groups, ngroups);
+ }
+ return;
+}
+}
+}
#include <osquery/registry.h>
#include <osquery/sql.h>
#include <osquery/tables.h>
+#include <osquery/filesystem.h>
namespace osquery {
namespace tables {
+typedef pt::ptree::value_type tree_node;
+
+void genQueryPack(const tree_node& pack, QueryData& results) {
+ Row r;
+ // Packs are stored by name and contain configuration data.
+ r["name"] = pack.first;
+ r["path"] = pack.second.get("path", "");
+
+ // There are optional restrictions on the set of queries applied pack-wide.
+ auto pack_wide_version = pack.second.get("version", "");
+ auto pack_wide_platform = pack.second.get("platform", "");
+
+ // Iterate through each query in the pack.
+ for (auto const& query : pack.second.get_child("queries")) {
+ r["query_name"] = query.first;
+ r["query"] = query.second.get("query", "");
+ r["interval"] = INTEGER(query.second.get("interval", 0));
+ r["description"] = query.second.get("description", "");
+ r["value"] = query.second.get("value", "");
+
+ // Set the version requirement based on the query-specific or pack-wide.
+ if (query.second.count("version") > 0) {
+ r["version"] = query.second.get("version", "");
+ } else {
+ r["version"] = pack_wide_platform;
+ }
+
+ // Set the platform requirement based on the query-specific or pack-wide.
+ if (query.second.count("platform") > 0) {
+ r["platform"] = query.second.get("platform", "");
+ } else {
+ r["platform"] = pack_wide_platform;
+ }
+
+ // Adding a prefix to the pack queries to differentiate packs from schedule.
+ r["scheduled_name"] = "pack_" + r.at("name") + "_" + r.at("query_name");
+ if (Config::checkScheduledQueryName(r.at("scheduled_name"))) {
+ r["scheduled"] = INTEGER(1);
+ } else {
+ r["scheduled"] = INTEGER(0);
+ }
+
+ results.push_back(r);
+ }
+}
+
+QueryData genOsqueryPacks(QueryContext& context) {
+ QueryData results;
+
+ // Get a lock on the config instance.
+ ConfigDataInstance config;
+
+ // Get the loaded data tree from global JSON configuration.
+ const auto& packs_parsed_data = config.getParsedData("packs");
+
+ // Iterate through all the packs to get each configuration and set of queries.
+ for (auto const& pack : packs_parsed_data) {
+ // Make sure the pack data contains queries.
+ if (pack.second.count("queries") == 0) {
+ continue;
+ }
+ genQueryPack(pack, results);
+ }
+
+ return results;
+}
+
void genFlag(const std::string& name,
const FlagInfo& flag,
QueryData& results) {
Row r;
r["pid"] = INTEGER(getpid());
- r["version"] = TEXT(OSQUERY_VERSION);
+ r["version"] = kVersion;
std::string hash_string;
auto s = Config::getMD5(hash_string);
return results;
}
+
}
}
Name: osquery
-Version: 1.4.5
+Version: 1.4.7
Release: 0
License: Apache-2.0 and GPLv2
Summary: A SQL powered operating system instrumentation, monitoring framework.
BuildRequires: pkgconfig(libprocps)
BuildRequires: pkgconfig(libsystemd)
BuildRequires: pkgconfig(openssl)
+BuildRequires: iptables-devel
BuildRequires: python-jinja2
Requires: glog
Requires: gflag
Requires: libreadline
Requires: procps-ng
Requires: libsystemd
+Requires: iptables
%description
Osquery exposes an operating system as a high-performance relational database.
Column("comment", TEXT, "Optional comment for a service."),
])
implementation("etc_services@genEtcServices")
-
Column("groupname", TEXT, "Canonical local group name"),
])
implementation("groups@genGroups")
+examples([
+ "select * from groups where gid = 0",
+ # Group/user_groups is not JOIN optimized
+ #"select g.groupname, ug.uid from groups g, user_groups ug where g.gid = ug.gid",
+])
--- /dev/null
+table_name("iptables")
+description("Linux IP packet filtering and NAT tool.")
+schema([
+ Column("filter_name", TEXT, "Packet matching filter table name."),
+ Column("chain", TEXT, "Size of module content."),
+ Column("policy", TEXT, "Policy that applies for this rule."),
+ Column("target", TEXT, "Target that applies for this rule."),
+ Column("protocol", INTEGER, "Protocol number identification."),
+ Column("src_ip", TEXT, "Source IP address."),
+ Column("src_mask", TEXT, "Source IP address mask."),
+ Column("iniface", TEXT, "Input interface for the rule."),
+ Column("iniface_mask", TEXT, "Input interface mask for the rule."),
+ Column("dst_ip", TEXT, "Destination IP address."),
+ Column("dst_mask", TEXT, "Destination IP address mask."),
+ Column("outiface", TEXT, "Output interface for the rule."),
+ Column("outiface_mask", TEXT, "Output interface mask for the rule."),
+ Column("match", TEXT, "Matching rule that applies."),
+ Column("packets", INTEGER, "Number of matching packets for this rule."),
+ Column("bytes", INTEGER, "Number of matching bytes for this rule."),
+])
+implementation("iptables@genIptables")
--- /dev/null
+table_name("msr")
+description("Various pieces of data stored in the model specific register per "
+ "processor. NOTE: the msr kernel module must be enabled, and "
+ "osquery must be run as root.")
+schema([
+ Column("processor_number", BIGINT,
+ "The processor number as reported in /proc/cpuinfo"),
+ Column("turbo_disabled", BIGINT, "Whether the turbo feature is disabled."),
+ Column("turbo_ratio_limit", BIGINT, "The turbo feature ratio limit."),
+ Column("platform_info", BIGINT, "Platform information."),
+ Column("perf_ctl", BIGINT, "Performance setting for the processor."),
+ Column("perf_status", BIGINT, "Performance status for the processor."),
+ Column("feature_control", BIGINT, "Bitfield controling enabled features."),
+ Column("rapl_power_limit", BIGINT,
+ "Run Time Average Power Limiting power limit."),
+ Column("rapl_energy_status", BIGINT,
+ "Run Time Average Power Limiting energy status."),
+ Column("rapl_power_units", BIGINT,
+ "Run Time Average Power Limiting power units.")
+])
+implementation("model_specific_register@genModelSpecificRegister")
table_name("process_envs")
description("A key/value table of environment variables for each process.")
schema([
- Column("pid", INTEGER, "Process (or thread) ID"),
+ Column("pid", INTEGER, "Process (or thread) ID", index=True),
Column("key", TEXT, "Environment variable name"),
Column("value", TEXT, "Environment variable value"),
- ForeignKey(column="pid", table="processes"),
- ForeignKey(column="pid", table="process_open_files"),
])
implementation("system/processes@genProcessEnvs")
+examples([
+ "select * from process_envs where pid = 1",
+ '''select pe.*
+ from process_envs pe, (select * from processes limit 10) p
+ where p.pid = pe.pid;'''
+])
table_name("process_memory_map")
description("Process memory mapped files and pseudo device/regions.")
schema([
- Column("pid", INTEGER, "Process (or thread) ID"),
+ Column("pid", INTEGER, "Process (or thread) ID", index=True),
Column("start", TEXT, "Virtual start address (hex)"),
Column("end", TEXT, "Virtual end address (hex)"),
Column("permissions", TEXT, "r=read, w=write, x=execute, p=private (cow)"),
Column("pseudo", INTEGER, "1 if path is a pseudo path, else 0"),
])
implementation("processes@genProcessMemoryMap")
+examples([
+ "select * from process_memory_map where pid = 1",
+])
table_name("process_open_files")
description("File descriptors for each process.")
schema([
- Column("pid", BIGINT, "Process (or thread) ID"),
+ Column("pid", BIGINT, "Process (or thread) ID", index=True),
Column("fd", BIGINT, "Process-specific file descriptor number"),
Column("path", TEXT, "Filesystem path of descriptor"),
])
implementation("system/process_open_files@genOpenFiles")
+examples([
+ "select * from process_open_files where pid = 1",
+])
table_name("process_open_sockets")
description("Processes which have open network sockets on the system.")
schema([
- Column("pid", INTEGER, "Process (or thread) ID"),
+ Column("pid", INTEGER, "Process (or thread) ID", index=True),
Column("socket", INTEGER, "Socket descriptor number"),
Column("family", INTEGER, "Network protocol (IPv4, IPv6)"),
Column("protocol", INTEGER, "Transport protocol (TCP/UDP)"),
Column("remote_address", TEXT, "Socket remote address"),
Column("local_port", INTEGER, "Socket local port"),
Column("remote_port", INTEGER, "Socket remote port"),
+ Column("path", TEXT, "For UNIX sockets (family=AF_UNIX), the domain path"),
])
implementation("system/process_open_sockets@genOpenSockets")
-
+examples([
+ "select * from process_open_sockets where pid = 1",
+])
table_name("processes")
description("All running processes on the host system.")
schema([
- Column("pid", INTEGER, "Process (or thread) ID"),
+ Column("pid", INTEGER, "Process (or thread) ID", index=True),
Column("name", TEXT, "The process path or shorthand argv[0]"),
Column("path", TEXT, "Path to executed binary"),
Column("cmdline", TEXT, "Complete argv"),
Column("parent", INTEGER, "Process parent's PID"),
])
implementation("system/processes@genProcesses")
+examples([
+ "select * from processes where pid = 1",
+])
table_name("shell_history")
description("A line-delimited (command) table of per-user .*_history data.")
schema([
- Column("username", TEXT),
- Column("command", TEXT),
+ Column("username", TEXT, "Shell history owner",
+ additional=True),
+ Column("command", TEXT, "Unparsed date/line/command history line"),
Column("history_file", TEXT, "Path to the .*_history for this user"),
ForeignKey(column="username", table="users"),
])
--- /dev/null
+table_name("user_groups")
+description("Local system user group relationships.")
+schema([
+ Column("uid", BIGINT, "User ID"),
+ Column("gid", BIGINT, "Group ID")
+])
+implementation("user_groups@genUserGroups")
Column("uid", BIGINT, "User ID"),
Column("gid", BIGINT, "Group ID (unsigned)"),
Column("uid_signed", BIGINT, "User ID as int64 signed (Apple)"),
- Column("gid_signed", BIGINT, "Group ID as int64 signed (Apple)"),
+ Column("gid_signed", BIGINT, "Default group ID as int64 signed (Apple)"),
Column("username", TEXT),
Column("description", TEXT, "Optional user description"),
Column("directory", TEXT, "User's home directory"),
Column("shell", TEXT, "User's configured default shell"),
])
implementation("users@genUsers")
+examples([
+ "select * from users where uid = 1000",
+ "select * from users where username = 'root'",
+ "select count(*) from users u, user_groups ug where u.uid = ug.uid",
+])
])
attributes(utility=True)
implementation("utility/file@genFile")
+examples([
+ "select * from file where path = '/etc/passwd'",
+ "select * from file where directory = '/etc/'",
+ "select * from file where pattern = '/etc/%'",
+])
\ No newline at end of file
])
attributes(utility=True)
implementation("utility/hash@genHash")
+examples([
+ "select * from hash where path = '/etc/passwd'",
+ "select * from hash where directory = '/etc/'",
+])
--- /dev/null
+table_name("osquery_packs")
+description("Information about the current query packs that are loaded in osquery.")
+schema([
+ Column("name", TEXT, "The given name for this query pack"),
+ Column("path", TEXT, "Path where the pack configuration is found"),
+ Column("query_name", TEXT, "The given name for this query"),
+ Column("query", TEXT, "The exact query to run"),
+ Column("interval", INTEGER, "The interval in seconds to run this query, not an exact interval"),
+ Column("platform", TEXT, "Platforms this query is supported on"),
+ Column("version", TEXT, "Minimum osquery version that this query will run on"),
+ Column("description", TEXT, "Description of the data retrieved by this query"),
+ Column("value", TEXT, "Value of the data retrieved by this query"),
+ Column("scheduled", INTEGER, "Status if query is scheduled to run. If query is scheduled 1, else 0"),
+ Column("scheduled_name", TEXT, "Name of the query in the scheduled table")
+])
+attributes(utility=True)
+implementation("osquery@genOsqueryPacks")
from __future__ import print_function
from __future__ import unicode_literals
+import argparse
import ast
import jinja2
import logging
import os
import sys
-# set DEVELOPING to True for debug statements
-DEVELOPING = False
+SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(SCRIPT_DIR + "/../tests")
# the log format for the logging module
LOG_FORMAT = "%(levelname)s [Line %(lineno)d]: %(message)s"
INTEGER = DataType("INTEGER", "int")
BIGINT = DataType("BIGINT", "long long int")
UNSIGNED_BIGINT = DataType("UNSIGNED_BIGINT", "long long unsigned int")
+DOUBLE = DataType("DOUBLE", "double")
# Define table-category MACROS from the table specs
UNKNOWN = "UNKNOWN"
def is_blacklisted(table_name, path=None, blacklist=None):
"""Allow blacklisting by tablename."""
if blacklist is None:
- specs_path = os.path.dirname(os.path.dirname(path))
+ specs_path = os.path.dirname(path)
+ if os.path.basename(specs_path) != "specs":
+ specs_path = os.path.basename(specs_path)
blacklist_path = os.path.join(specs_path, "blacklist")
if not os.path.exists(blacklist_path):
return False
return table_name in blacklist if blacklist else False
-def setup_templates(path):
- tables_path = os.path.dirname(os.path.dirname(path))
- templates_path = os.path.join(tables_path, "templates")
+def setup_templates(templates_path):
if not os.path.exists(templates_path):
templates_path = os.path.join(os.path.dirname(tables_path), "templates")
if not os.path.exists(templates_path):
self.class_name = ""
self.description = ""
self.attributes = {}
+ self.examples = []
def columns(self):
return [i for i in self.schema if isinstance(i, Column)]
function=self.function,
class_name=self.class_name,
attributes=self.attributes,
+ examples=self.examples,
)
if self.table_name == "" or self.function == "":
self.name = name
self.type = col_type
self.description = description
+ self.options = kwargs
class ForeignKey(object):
table.table_name = name
table.description = ""
table.attributes = {}
+ table.examples = []
def schema(schema_list):
def description(text):
table.description = text
+def select_all(name=None):
+ if name == None:
+ name = table.table_name
+ return "select count(*) from %s;" % (name)
-def attributes(**kwargs):
+
+def examples(example_queries):
+ table.examples = example_queries
+
+
+def attributes(**kwargs):
for attr in kwargs:
table.attributes[attr] = kwargs[attr]
def main(argc, argv):
- if DEVELOPING:
+ parser = argparse.ArgumentParser("Generate C++ Table Plugin from specfile.")
+ parser.add_argument(
+ "--debug", default=False, action="store_true",
+ help="Output debug messages (when developing)"
+ )
+ parser.add_argument("--templates", default=SCRIPT_DIR + "/templates",
+ help="Path to codegen output .cpp.in templates")
+ parser.add_argument("spec_file", help="Path to input .table spec file")
+ parser.add_argument("output", help="Path to output .cpp file")
+ args = parser.parse_args()
+
+ if args.debug:
logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
else:
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
usage()
sys.exit(1)
- filename = argv[1]
- output = argv[2]
+ filename = args.spec_file
+ output = args.output
if filename.endswith(".table"):
# Adding a 3rd parameter will enable the blacklist
disable_blacklist = argc > 3
- setup_templates(filename)
+ setup_templates(args.templates)
with open(filename, "rU") as file_handle:
tree = ast.parse(file_handle.read())
exec(compile(tree, "<string>", "exec"))
table.generate(output)
if __name__ == "__main__":
+ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
main(len(sys.argv), sys.argv)
#include <osquery/events.h>
#include <osquery/tables.h>
-namespace osquery { namespace tables {
+
+namespace osquery {
{% for table in tables %}
{{table}}
{% endfor %}
-}}
+}
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of this source tree. An additional grant
+ * LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#include <osquery/events.h>
#include <osquery/tables.h>
-namespace osquery { namespace tables {
+namespace osquery {
/// BEGIN[GENTABLE]
+namespace tables {
{% if class_name == "" %}\
osquery::QueryData {{function}}(QueryContext& request);
{% else %}
osquery::QueryData {{function}}(QueryContext& request);
};
{% endif %}\
+}
class {{table_name_cc}}TablePlugin : public TablePlugin {
private:
return {};
}
{% else %}\
- return osquery::tables::{{function}}(request);
+ return tables::{{function}}(request);
{% endif %}\
}
};
{% endif %}
/// END[GENTABLE]
-}}
+}
--- /dev/null
+{
+ // Deprecated query schedule
+ "scheduledQueries": [
+ {
+ "name": "time",
+ "query": "select * from time;",
+ "interval": 1
+ }
+ ],
+
+ // New, recommended query schedule
+ "schedule": {
+ "time2": {"query": "select * from time;", "interval": 1}
+ },
+
+ // Deprecated collection for file monitoring
+ "additional_monitoring" : {
+ "file_paths": {
+ "downloads": [
+ "/tmp/osquery-fstests-pattern/%%"
+ ]
+ }
+ },
+
+ // New, recommended file monitoring (top-level)
+ "file_paths": {
+ "downloads2": [
+ "/tmp/osquery-fstests-pattern/%%"
+ ],
+ "system_binaries": [
+ "/tmp/osquery-fstests-pattern/%",
+ "/tmp/osquery-fstests-pattern/deep11/%"
+ ]
+ }
+}
+
+// The horror!!!
+,
"additional_monitoring" : {
"file_paths": {
"downloads": [
- "/tmp/osquery-fstests-pattern/%%"
+ "/tmp/osquery-tests/fstests-pattern/%%"
]
}
},
// New, recommended file monitoring (top-level)
"file_paths": {
"downloads2": [
- "/tmp/osquery-fstests-pattern/%%"
+ "/tmp/osquery-tests/fstests-pattern/%%"
],
"system_binaries": [
- "/tmp/osquery-fstests-pattern/%",
- "/tmp/osquery-fstests-pattern/deep11/%"
+ "/tmp/osquery-tests/fstests-pattern/%",
+ "/tmp/osquery-tests/fstests-pattern/deep11/%"
]
+ },
+
+ // Add files containing packs of queries.
+ // The queries may have platform and version requirements.
+ "packs": {
+ "test_pack": "/tmp/osquery-tests/test_pack.conf"
}
}
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2014, Facebook, Inc.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import shutil
+import time
+import unittest
+
+# osquery-specific testing utils
+import test_base
+import utils
+
+class AdditionalFeatureTests(test_base.ProcessGenerator, unittest.TestCase):
+ def test_query_packs(self):
+ query_pack_path = test_base.CONFIG_DIR + "/test_pack.conf"
+ utils.write_config({
+ "queries": {
+ "simple_test": {
+ "query": "select * from time",
+ "interval": 60,
+ },
+ "simple_test2": {
+ "query": "select * from time",
+ "interval": 60,
+ "platform": "does_not_exist",
+ }
+ }
+ }, path=query_pack_path)
+
+ # Get a daemon process, loaded with the default test configuration.
+ # We'll add a config override (overwrite) for the "packs" key.
+ # THis will point a single pack at the config written above.
+ daemon = self._run_daemon(overwrite={
+ "packs": {
+ "test_pack": query_pack_path
+ }
+ })
+ self.assertTrue(daemon.isAlive())
+
+ # Introspect into the daemon's query packs.
+ client = test_base.EXClient(daemon.options["extensions_socket"])
+ test_base.expectTrue(client.open)
+ self.assertTrue(client.open())
+ em = client.getEM()
+
+ # Every query from the pack(s) is added to the packs table.
+ result = em.query("select * from osquery_packs")
+ self.assertEqual(len(result.response), 2)
+
+ # Only the applicable queries are added to the schedule.
+ # There will be len(pack_queries) - 1 since "simple_test2" is bound
+ # to an unknown/non-existing platform.
+ result = em.query("select * from osquery_schedule")
+ self.assertEqual(len(result.response), 1)
+ daemon.kill()
+
+if __name__ == '__main__':
+ module = test_base.Tester()
+
+ # Find and import the thrift-generated python interface
+ test_base.loadThriftFromBuild(test_base.ARGS.build)
+
+ module.run()
--- /dev/null
+this_is_a_deployment_secret
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2014, Facebook, Inc.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import shutil
+import time
+import unittest
+import sys
+
+# osquery-specific testing utils
+import test_base
+import utils
+
+class ExampleQueryTests(test_base.ProcessGenerator, unittest.TestCase):
+ def setUp(self):
+ self.daemon = self._run_daemon({
+ # The set of queries will hammer the daemon process.
+ "disable_watchdog": True,
+ })
+ self.assertTrue(self.daemon.isAlive())
+
+ # The sets of example tests will use the extensions API.s
+ self.client = test_base.EXClient(self.daemon.options["extensions_socket"])
+ test_base.expectTrue(self.client.open)
+ self.assertTrue(self.client.open())
+ self.em = self.client.getEM()
+
+ def tearDown(self):
+ self.client.close()
+ self.daemon.kill()
+
+ def _execute(self, query):
+ try:
+ result = self.em.query(query)
+ self.assertEqual(result.status.code, 0)
+ return result.response
+ except Exception as e:
+ print("General exception executing query: %s" % (
+ utils.lightred(query)))
+ raise e
+
+ def _execute_set(self, queries):
+ for example in queries:
+ start_time = time.time()
+ result = self._execute(example)
+ end_time = time.time()
+ duration_ms = int((end_time - start_time) * 1000)
+ if duration_ms > 2000:
+ # Query took longer than 2 seconds.
+ duration_ms = utils.lightred(duration_ms)
+ print("Query (%sms): %s, rows: %d" % (
+ duration_ms, example, len(result)))
+
+
+ def test_cross_platform_queries(self):
+ self._execute_set(PLATFORM_EXAMPLES["specs"])
+
+ def test_platform_specific_queries(self):
+ self._execute_set(PLATFORM_EXAMPLES[utils.platform()])
+
+ def test_utility_queries(self):
+ self._execute_set(PLATFORM_EXAMPLES["utility"])
+
+if __name__ == '__main__':
+ # Import the API generation code for example query introspection.
+ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
+ SOURCE_DIR = os.path.abspath(SCRIPT_DIR + "/../../")
+ sys.path.append(SOURCE_DIR + "/tools/codegen")
+ from genapi import gen_api
+ API = gen_api(SOURCE_DIR + "/specs")
+
+ # Organize example queries by platform
+ PLATFORM_EXAMPLES = {}
+ for category in API:
+ PLATFORM_EXAMPLES[category["key"]] = []
+ for table in category["tables"]:
+ PLATFORM_EXAMPLES[category["key"]] += table["examples"]
+
+ module = test_base.Tester()
+
+ # Find and import the thrift-generated python interface
+ test_base.loadThriftFromBuild(test_base.ARGS.build)
+
+ module.run()
--- /dev/null
+{
+ "queries": {
+ "launchd": {
+ "query": "select * from launchd",
+ "interval" : "414141",
+ "platform" : "whatever",
+ "version" : "1.0.0",
+ "description" : "Very descriptive description",
+ "value" : "Value overflow"
+ },
+ "evil_things": {
+ "query": "select * from time",
+ "interval" : "666",
+ "platform" : "invalid",
+ "version" : "9.9.9",
+ "description" : "More descriptive description",
+ "value" : "It is dangerous to go alone, take this"
+ },
+ "simple": {
+ "query": "select * from osquery_info",
+ "interval": "10"
+ }
+ }
+}