tests/e2e/DimensionInCondition_e2e_combination_OR_cond_test.cpp \
tests/e2e/DimensionInCondition_e2e_simple_cond_test.cpp \
tests/e2e/Anomaly_count_e2e_test.cpp \
- tests/e2e/Anomaly_duration_sum_e2e_test.cpp
+ tests/e2e/Anomaly_duration_sum_e2e_test.cpp \
+ tests/e2e/ConfigTtl_e2e_test.cpp
LOCAL_STATIC_LIBRARIES := \
$(statsd_common_static_libraries) \
sp<AlarmMonitor> periodicAlarmMonitor;
sp<StatsLogProcessor> processor = new StatsLogProcessor(
uidMap, anomalyAlarmMonitor, periodicAlarmMonitor, timeBaseSec, [](const ConfigKey&){});
- processor->OnConfigUpdated(0, key, config);
+ processor->OnConfigUpdated(timeBaseSec * NS_PER_SEC, key, config);
return processor;
}
const int FIELD_ID_LAST_REPORT_WALL_CLOCK_NANOS = 5;
const int FIELD_ID_CURRENT_REPORT_WALL_CLOCK_NANOS = 6;
+#define NS_PER_HOUR 3600 * NS_PER_SEC
#define STATS_DATA_DIR "/data/misc/stats-data"
}
void StatsLogProcessor::onAnomalyAlarmFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
for (const auto& itr : mMetricsManagers) {
}
}
void StatsLogProcessor::onPeriodicAlarmFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
void StatsLogProcessor::OnLogEvent(LogEvent* event) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
- if (event->GetElapsedTimestampNs() < mLastLogTimestamp) {
+ const int64_t currentTimestampNs = event->GetElapsedTimestampNs();
+ if (currentTimestampNs < mLastLogTimestamp) {
return;
}
- mLastLogTimestamp = event->GetElapsedTimestampNs();
+
+ resetIfConfigTtlExpiredLocked(currentTimestampNs);
+
+ mLastLogTimestamp = currentTimestampNs;
StatsdStats::getInstance().noteAtomLogged(
event->GetTagId(), event->GetElapsedTimestampNs() / NS_PER_SEC);
return;
}
- uint64_t curTimeSec = getElapsedRealtimeSec();
+ int64_t curTimeSec = getElapsedRealtimeSec();
if (curTimeSec - mLastPullerCacheClearTimeSec > StatsdStats::kPullerCacheClearIntervalSec) {
mStatsPullerManager.ClearPullerCacheIfNecessary(curTimeSec * NS_PER_SEC);
mLastPullerCacheClearTimeSec = curTimeSec;
}
+
if (event->GetTagId() != android::util::ISOLATED_UID_CHANGED) {
// Map the isolated uid to host uid if necessary.
mapIsolatedUidToHostUidIfNecessaryLocked(event);
void StatsLogProcessor::OnConfigUpdated(const int64_t timestampNs, const ConfigKey& key,
const StatsdConfig& config) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
+ OnConfigUpdatedLocked(timestampNs, key, config);
+}
+
+void StatsLogProcessor::OnConfigUpdatedLocked(
+ const int64_t timestampNs, const ConfigKey& key, const StatsdConfig& config) {
VLOG("Updated configuration for key %s", key.ToString().c_str());
sp<MetricsManager> newMetricsManager =
new MetricsManager(key, config, mTimeBaseSec, (timestampNs - 1) / NS_PER_SEC + 1, mUidMap,
// not safe to create wp or sp from this pointer inside its constructor.
mUidMap->addListener(newMetricsManager.get());
}
+ newMetricsManager->refreshTtl(timestampNs);
mMetricsManagers[key] = newMetricsManager;
VLOG("StatsdConfig valid");
} else {
/*
* onDumpReport dumps serialized ConfigMetricsReportList into outData.
*/
-void StatsLogProcessor::onDumpReport(const ConfigKey& key, const uint64_t dumpTimeStampNs,
+void StatsLogProcessor::onDumpReport(const ConfigKey& key, const int64_t dumpTimeStampNs,
vector<uint8_t>* outData) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
* onConfigMetricsReportLocked dumps serialized ConfigMetricsReport into outData.
*/
void StatsLogProcessor::onConfigMetricsReportLocked(const ConfigKey& key,
- const uint64_t dumpTimeStampNs,
+ const int64_t dumpTimeStampNs,
ProtoOutputStream* proto) {
// We already checked whether key exists in mMetricsManagers in
// WriteDataToDisk.
proto->write(FIELD_TYPE_INT64 | FIELD_ID_CURRENT_REPORT_WALL_CLOCK_NANOS,
(long long)getWallClockNs());
+}
+void StatsLogProcessor::resetIfConfigTtlExpiredLocked(const int64_t timestampNs) {
+ std::vector<ConfigKey> configKeysTtlExpired;
+ for (auto it = mMetricsManagers.begin(); it != mMetricsManagers.end(); it++) {
+ if (it->second != nullptr && !it->second->isInTtl(timestampNs)) {
+ configKeysTtlExpired.push_back(it->first);
+ }
+ }
+
+ for (const auto& key : configKeysTtlExpired) {
+ StatsdConfig config;
+ if (StorageManager::readConfigFromDisk(key, &config)) {
+ OnConfigUpdatedLocked(timestampNs, key, config);
+ StatsdStats::getInstance().noteConfigReset(key);
+ } else {
+ ALOGE("Failed to read backup config from disk for : %s", key.ToString().c_str());
+ auto it = mMetricsManagers.find(key);
+ if (it != mMetricsManagers.end()) {
+ it->second->refreshTtl(timestampNs);
+ }
+ }
+ }
}
void StatsLogProcessor::OnConfigRemoved(const ConfigKey& key) {
}
void StatsLogProcessor::flushIfNecessaryLocked(
- uint64_t timestampNs, const ConfigKey& key, MetricsManager& metricsManager) {
+ int64_t timestampNs, const ConfigKey& key, MetricsManager& metricsManager) {
auto lastCheckTime = mLastByteSizeTimes.find(key);
if (lastCheckTime != mLastByteSizeTimes.end()) {
if (timestampNs - lastCheckTime->second < StatsdStats::kMinByteSizeCheckPeriodNs) {
size_t GetMetricsSize(const ConfigKey& key) const;
- void onDumpReport(const ConfigKey& key, const uint64_t dumpTimeNs, vector<uint8_t>* outData);
+ void onDumpReport(const ConfigKey& key, const int64_t dumpTimeNs, vector<uint8_t>* outData);
/* Tells MetricsManager that the alarms in alarmSet have fired. Modifies anomaly alarmSet. */
void onAnomalyAlarmFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet);
/* Tells MetricsManager that the alarms in alarmSet have fired. Modifies periodic alarmSet. */
void onPeriodicAlarmFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet);
/* Flushes data to disk. Data on memory will be gone after written to disk. */
void dumpStates(FILE* out, bool verbose);
+
private:
// For testing only.
inline sp<AlarmMonitor> getAnomalyAlarmMonitor() const {
sp<AlarmMonitor> mPeriodicAlarmMonitor;
- void onConfigMetricsReportLocked(const ConfigKey& key, const uint64_t dumpTimeStampNs,
+ void resetIfConfigTtlExpiredLocked(const int64_t timestampNs);
+
+ void OnConfigUpdatedLocked(
+ const int64_t currentTimestampNs, const ConfigKey& key, const StatsdConfig& config);
+
+ void onConfigMetricsReportLocked(const ConfigKey& key, const int64_t dumpTimeStampNs,
util::ProtoOutputStream* proto);
/* Check if we should send a broadcast if approaching memory limits and if we're over, we
* actually delete the data. */
- void flushIfNecessaryLocked(uint64_t timestampNs, const ConfigKey& key,
+ void flushIfNecessaryLocked(int64_t timestampNs, const ConfigKey& key,
MetricsManager& metricsManager);
// Maps the isolated uid in the log event to host uid if the log event contains uid fields.
FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
+
FRIEND_TEST(AlarmE2eTest, TestMultipleAlarms);
+ FRIEND_TEST(ConfigTtlE2eTest, TestCountMetric);
};
} // namespace statsd
"Only system uid can call informAnomalyAlarmFired");
}
- uint64_t currentTimeSec = getElapsedRealtimeSec();
+ int64_t currentTimeSec = getElapsedRealtimeSec();
std::unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet =
mAnomalyAlarmMonitor->popSoonerThan(static_cast<uint32_t>(currentTimeSec));
if (alarmSet.size() > 0) {
"Only system uid can call informAlarmForSubscriberTriggeringFired");
}
- uint64_t currentTimeSec = getElapsedRealtimeSec();
+ int64_t currentTimeSec = getElapsedRealtimeSec();
std::unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet =
mPeriodicAlarmMonitor->popSoonerThan(static_cast<uint32_t>(currentTimeSec));
if (alarmSet.size() > 0) {
namespace os {
namespace statsd {
-AlarmTracker::AlarmTracker(const uint64_t startMillis,
- const uint64_t currentMillis,
+AlarmTracker::AlarmTracker(const int64_t startMillis,
+ const int64_t currentMillis,
const Alarm& alarm, const ConfigKey& configKey,
const sp<AlarmMonitor>& alarmMonitor)
: mAlarmConfig(alarm),
}
void AlarmTracker::informAlarmsFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms) {
if (firedAlarms.empty() || mInternalAlarm == nullptr ||
firedAlarms.find(mInternalAlarm) == firedAlarms.end()) {
class AlarmTracker : public virtual RefBase {
public:
- AlarmTracker(const uint64_t startMillis,
- const uint64_t currentMillis,
+ AlarmTracker(const int64_t startMillis,
+ const int64_t currentMillis,
const Alarm& alarm, const ConfigKey& configKey,
const sp<AlarmMonitor>& subscriberAlarmMonitor);
void addSubscription(const Subscription& subscription);
- void informAlarmsFired(const uint64_t& timestampNs,
+ void informAlarmsFired(const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms);
protected:
// For test only. Returns the alarm timestamp in seconds. Otherwise returns 0.
- inline uint32_t getAlarmTimestampSec() const {
+ inline int32_t getAlarmTimestampSec() const {
return mInternalAlarm == nullptr ? 0 : mInternalAlarm->timestampSec;
}
getSumOverPastBuckets(key) + currentBucketValue > mAlert.trigger_if_sum_gt();
}
-void AnomalyTracker::declareAnomaly(const uint64_t& timestampNs, const MetricDimensionKey& key) {
+void AnomalyTracker::declareAnomaly(const int64_t& timestampNs, const MetricDimensionKey& key) {
// TODO: Why receive timestamp? RefractoryPeriod should always be based on real time right now.
if (isInRefractoryPeriod(timestampNs, key)) {
VLOG("Skipping anomaly declaration since within refractory period");
mConfigKey.GetId(), mAlert.id());
}
-void AnomalyTracker::detectAndDeclareAnomaly(const uint64_t& timestampNs,
+void AnomalyTracker::detectAndDeclareAnomaly(const int64_t& timestampNs,
const int64_t& currBucketNum,
const MetricDimensionKey& key,
const int64_t& currentBucketValue) {
}
}
-bool AnomalyTracker::isInRefractoryPeriod(const uint64_t& timestampNs,
+bool AnomalyTracker::isInRefractoryPeriod(const int64_t& timestampNs,
const MetricDimensionKey& key) const {
const auto& it = mRefractoryPeriodEndsSec.find(key);
if (it != mRefractoryPeriodEndsSec.end()) {
- return timestampNs < it->second * NS_PER_SEC;
+ return timestampNs < (it->second * (int64_t)NS_PER_SEC);
}
return false;
}
const int64_t& currentBucketValue);
// Informs incidentd about the detected alert.
- void declareAnomaly(const uint64_t& timestampNs, const MetricDimensionKey& key);
+ void declareAnomaly(const int64_t& timestampNs, const MetricDimensionKey& key);
// Detects if, based on past buckets plus the new currentBucketValue (which generally
// represents the partially-filled current bucket), an anomaly has happened, and if so,
// declares an anomaly and informs relevant subscribers.
// Also advances to currBucketNum-1.
- void detectAndDeclareAnomaly(const uint64_t& timestampNs, const int64_t& currBucketNum,
+ void detectAndDeclareAnomaly(const int64_t& timestampNs, const int64_t& currBucketNum,
const MetricDimensionKey& key, const int64_t& currentBucketValue);
// Init the AlarmMonitor which is shared across anomaly trackers.
// Declares an anomaly for each alarm in firedAlarms that belongs to this AnomalyTracker,
// and removes it from firedAlarms. Does NOT remove the alarm from the AlarmMonitor.
- virtual void informAlarmsFired(const uint64_t& timestampNs,
+ virtual void informAlarmsFired(const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms) {
return; // The base AnomalyTracker class doesn't have alarms.
}
void subtractValueFromSum(const MetricDimensionKey& key, const int64_t& bucketValue);
// Returns true if in the refractory period, else false.
- bool isInRefractoryPeriod(const uint64_t& timestampNs, const MetricDimensionKey& key) const;
+ bool isInRefractoryPeriod(const int64_t& timestampNs, const MetricDimensionKey& key) const;
// Calculates the corresponding bucket index within the circular array.
// Requires bucketNum >= 0.
}
void DurationAnomalyTracker::startAlarm(const MetricDimensionKey& dimensionKey,
- const uint64_t& timestampNs) {
+ const int64_t& timestampNs) {
// Alarms are stored in secs. Must round up, since if it fires early, it is ignored completely.
uint32_t timestampSec = static_cast<uint32_t>((timestampNs -1) / NS_PER_SEC) + 1; // round up
if (isInRefractoryPeriod(timestampNs, dimensionKey)) {
}
void DurationAnomalyTracker::stopAlarm(const MetricDimensionKey& dimensionKey,
- const uint64_t& timestampNs) {
+ const int64_t& timestampNs) {
const auto itr = mAlarms.find(dimensionKey);
if (itr == mAlarms.end()) {
return;
}
// If the alarm is set in the past but hasn't fired yet (due to lag), catch it now.
- if (itr->second != nullptr && timestampNs >= NS_PER_SEC * itr->second->timestampSec) {
+ if (itr->second != nullptr && timestampNs >= (int64_t)NS_PER_SEC * itr->second->timestampSec) {
declareAnomaly(timestampNs, dimensionKey);
}
if (mAlarmMonitor != nullptr) {
mAlarms.clear();
}
-void DurationAnomalyTracker::informAlarmsFired(const uint64_t& timestampNs,
+void DurationAnomalyTracker::informAlarmsFired(const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms) {
if (firedAlarms.empty() || mAlarms.empty()) return;
// Sets an alarm for the given timestamp.
// Replaces previous alarm if one already exists.
- void startAlarm(const MetricDimensionKey& dimensionKey, const uint64_t& eventTime);
+ void startAlarm(const MetricDimensionKey& dimensionKey, const int64_t& eventTime);
// Stops the alarm.
// If it should have already fired, but hasn't yet (e.g. because the AlarmManager is delayed),
// declare the anomaly now.
- void stopAlarm(const MetricDimensionKey& dimensionKey, const uint64_t& timestampNs);
+ void stopAlarm(const MetricDimensionKey& dimensionKey, const int64_t& timestampNs);
// Stop all the alarms owned by this tracker. Does not declare any anomalies.
void cancelAllAlarms();
// and removes it from firedAlarms. The AlarmMonitor is not informed.
// Note that this will generally be called from a different thread from the other functions;
// the caller is responsible for thread safety.
- void informAlarmsFired(const uint64_t& timestampNs,
+ void informAlarmsFired(const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms) override;
protected:
#include "guardrail/StatsdStats.h"
#include "stats_log_util.h"
#include "stats_util.h"
+#include "stats_log_util.h"
#include <android-base/file.h>
#include <dirent.h>
const int64_t timestampNs = getElapsedRealtimeNs();
// Tell everyone
- for (sp<ConfigListener> listener:broadcastList) {
+ for (sp<ConfigListener> listener : broadcastList) {
listener->OnConfigUpdated(timestampNs, key, config);
}
}
const int FIELD_ID_CONFIG_STATS_UID = 1;
const int FIELD_ID_CONFIG_STATS_ID = 2;
const int FIELD_ID_CONFIG_STATS_CREATION = 3;
+const int FIELD_ID_CONFIG_STATS_RESET = 18;
const int FIELD_ID_CONFIG_STATS_DELETION = 4;
const int FIELD_ID_CONFIG_STATS_METRIC_COUNT = 5;
const int FIELD_ID_CONFIG_STATS_CONDITION_COUNT = 6;
noteConfigRemovedInternalLocked(key);
}
+void StatsdStats::noteConfigResetInternalLocked(const ConfigKey& key) {
+ auto it = mConfigStats.find(key);
+ if (it != mConfigStats.end()) {
+ it->second->reset_time_sec = getWallClockSec();
+ }
+}
+
+void StatsdStats::noteConfigReset(const ConfigKey& key) {
+ lock_guard<std::mutex> lock(mLock);
+ noteConfigResetInternalLocked(key);
+}
+
void StatsdStats::noteBroadcastSent(const ConfigKey& key) {
noteBroadcastSent(key, getWallClockSec());
}
fprintf(out, "%lu Config in icebox: \n", (unsigned long)mIceBox.size());
for (const auto& configStats : mIceBox) {
fprintf(out,
- "Config {%d_%lld}: creation=%d, deletion=%d, #metric=%d, #condition=%d, "
+ "Config {%d_%lld}: creation=%d, deletion=%d, reset=%d, #metric=%d, #condition=%d, "
"#matcher=%d, #alert=%d, valid=%d\n",
configStats->uid, (long long)configStats->id, configStats->creation_time_sec,
- configStats->deletion_time_sec, configStats->metric_count,
+ configStats->deletion_time_sec, configStats->reset_time_sec,
+ configStats->metric_count,
configStats->condition_count, configStats->matcher_count, configStats->alert_count,
configStats->is_valid);
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_UID, configStats.uid);
proto->write(FIELD_TYPE_INT64 | FIELD_ID_CONFIG_STATS_ID, (long long)configStats.id);
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_CREATION, configStats.creation_time_sec);
+ if (configStats.reset_time_sec != 0) {
+ proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_RESET, configStats.reset_time_sec);
+ }
if (configStats.deletion_time_sec != 0) {
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_DELETION,
configStats.deletion_time_sec);
int64_t id;
int32_t creation_time_sec;
int32_t deletion_time_sec = 0;
+ int32_t reset_time_sec = 0;
int32_t metric_count;
int32_t condition_count;
int32_t matcher_count;
const static size_t kMaxBytesUsedUidMap = 50 * 1024;
/* Minimum period between two broadcasts in nanoseconds. */
- static const unsigned long long kMinBroadcastPeriodNs = 60 * NS_PER_SEC;
+ static const int64_t kMinBroadcastPeriodNs = 60 * NS_PER_SEC;
/* Min period between two checks of byte size per config key in nanoseconds. */
- static const unsigned long long kMinByteSizeCheckPeriodNs = 10 * NS_PER_SEC;
+ static const int64_t kMinByteSizeCheckPeriodNs = 10 * NS_PER_SEC;
// Maximum age (30 days) that files on disk can exist in seconds.
static const int kMaxAgeSecond = 60 * 60 * 24 * 30;
* Report a config has been removed.
*/
void noteConfigRemoved(const ConfigKey& key);
+ /**
+ * Report a config has been reset when ttl expires.
+ */
+ void noteConfigReset(const ConfigKey& key);
/**
* Report a broadcast has been sent to a config owner to collect the data.
// Stores the number of times statsd registers the periodic alarm changes
int mPeriodicAlarmRegisteredStats = 0;
+ void noteConfigResetInternalLocked(const ConfigKey& key);
void noteConfigRemovedInternalLocked(const ConfigKey& key);
CountMetricProducer::CountMetricProducer(const ConfigKey& key, const CountMetric& metric,
const int conditionIndex,
const sp<ConditionWizard>& wizard,
- const uint64_t startTimeNs)
+ const int64_t startTimeNs)
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard) {
// TODO: evaluate initial conditions. and set mConditionMet.
if (metric.has_bucket()) {
}
void CountMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
VLOG("Metric %lld onSlicedConditionMayChange", (long long)mMetricId);
}
-void CountMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
+void CountMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
ProtoOutputStream* protoOutput) {
flushIfNeededLocked(dumpTimeNs);
if (mPastBuckets.empty()) {
mPastBuckets.clear();
}
-void CountMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
+void CountMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
flushIfNeededLocked(dropTimeNs);
mPastBuckets.clear();
}
void CountMetricProducer::onConditionChangedLocked(const bool conditionMet,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
mCondition = conditionMet;
}
const size_t matcherIndex, const MetricDimensionKey& eventKey,
const ConditionKey& conditionKey, bool condition,
const LogEvent& event) {
- uint64_t eventTimeNs = event.GetElapsedTimestampNs();
+ int64_t eventTimeNs = event.GetElapsedTimestampNs();
flushIfNeededLocked(eventTimeNs);
if (condition == false) {
// When a new matched event comes in, we check if event falls into the current
// bucket. If not, flush the old counter to past buckets and initialize the new bucket.
-void CountMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
- uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
+void CountMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) {
+ int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
if (eventTimeNs < currentBucketEndTimeNs) {
return;
}
flushCurrentBucketLocked(eventTimeNs);
// Setup the bucket start time and number.
- uint64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
+ int64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
mCurrentBucketStartTimeNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs;
mCurrentBucketNum += numBucketsForward;
VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
(long long)mCurrentBucketStartTimeNs);
}
-void CountMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
- uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
+void CountMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
+ int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
CountBucket info;
info.mBucketStartNs = mCurrentBucketStartTimeNs;
if (eventTimeNs < fullBucketEndTimeNs) {
// TODO: Pass in the start time from MetricsManager, it should be consistent for all metrics.
CountMetricProducer(const ConfigKey& key, const CountMetric& countMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
- const uint64_t startTimeNs);
+ const int64_t startTimeNs);
virtual ~CountMetricProducer();
const LogEvent& event) override;
private:
- void onDumpReportLocked(const uint64_t dumpTimeNs,
+ void onDumpReportLocked(const int64_t dumpTimeNs,
android::util::ProtoOutputStream* protoOutput) override;
// Internal interface to handle condition change.
- void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
+ void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
// Internal interface to handle sliced condition change.
- void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
+ void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
// Internal function to calculate the current used bytes.
size_t byteSizeLocked() const override;
void dumpStatesLocked(FILE* out, bool verbose) const override;
- void dropDataLocked(const uint64_t dropTimeNs) override;
+ void dropDataLocked(const int64_t dropTimeNs) override;
// Util function to flush the old packet.
- void flushIfNeededLocked(const uint64_t& newEventTime) override;
+ void flushIfNeededLocked(const int64_t& newEventTime) override;
- void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
+ void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
// TODO: Add a lock to mPastBuckets.
std::unordered_map<MetricDimensionKey, std::vector<CountBucket>> mPastBuckets;
const bool nesting,
const sp<ConditionWizard>& wizard,
const FieldMatcher& internalDimensions,
- const uint64_t startTimeNs)
+ const int64_t startTimeNs)
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard),
mAggregationType(metric.aggregation_type()),
mStartIndex(startIndex),
// 2. No condition in dimension
// 3. The links covers all dimension fields in the sliced child condition predicate.
void DurationMetricProducer::onSlicedConditionMayChangeLocked_opt1(bool condition,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
if (mMetric2ConditionLinks.size() != 1 ||
!mHasLinksToAllConditionDimensionsInTracker ||
!mDimensionsInCondition.empty()) {
// 1. If combination condition, logical operation is AND, only one sliced child predicate.
// 2. Has dimensions_in_condition and it equals to the output dimensions of the sliced predicate.
void DurationMetricProducer::onSlicedConditionMayChangeLocked_opt2(bool condition,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
if (mMetric2ConditionLinks.size() > 1 || !mSameConditionDimensionsInTracker) {
return;
}
}
void DurationMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
VLOG("Metric %lld onSlicedConditionMayChange", (long long)mMetricId);
flushIfNeededLocked(eventTime);
}
void DurationMetricProducer::onConditionChangedLocked(const bool conditionMet,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
mCondition = conditionMet;
flushIfNeededLocked(eventTime);
}
}
-void DurationMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
+void DurationMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
flushIfNeededLocked(dropTimeNs);
mPastBuckets.clear();
}
-void DurationMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
+void DurationMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
ProtoOutputStream* protoOutput) {
flushIfNeededLocked(dumpTimeNs);
if (mPastBuckets.empty()) {
mPastBuckets.clear();
}
-void DurationMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
- uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
+void DurationMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) {
+ int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
if (currentBucketEndTimeNs > eventTimeNs) {
return;
mCurrentBucketNum += numBucketsForward;
}
-void DurationMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
+void DurationMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
for (auto whatIt = mCurrentSlicedDurationTrackerMap.begin();
whatIt != mCurrentSlicedDurationTrackerMap.end();) {
for (auto it = whatIt->second.begin(); it != whatIt->second.end();) {
void DurationMetricProducer::onMatchedLogEventLocked(const size_t matcherIndex,
const LogEvent& event) {
- uint64_t eventTimeNs = event.GetElapsedTimestampNs();
+ int64_t eventTimeNs = event.GetElapsedTimestampNs();
if (eventTimeNs < mStartTimeNs) {
return;
}
const int conditionIndex, const size_t startIndex,
const size_t stopIndex, const size_t stopAllIndex, const bool nesting,
const sp<ConditionWizard>& wizard,
- const FieldMatcher& internalDimensions, const uint64_t startTimeNs);
+ const FieldMatcher& internalDimensions, const int64_t startTimeNs);
virtual ~DurationMetricProducer();
void handleStartEvent(const MetricDimensionKey& eventKey, const ConditionKey& conditionKeys,
bool condition, const LogEvent& event);
- void onDumpReportLocked(const uint64_t dumpTimeNs,
+ void onDumpReportLocked(const int64_t dumpTimeNs,
android::util::ProtoOutputStream* protoOutput) override;
// Internal interface to handle condition change.
- void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
+ void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
// Internal interface to handle sliced condition change.
- void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
+ void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
- void onSlicedConditionMayChangeLocked_opt1(bool overallCondition, const uint64_t eventTime);
- void onSlicedConditionMayChangeLocked_opt2(bool overallCondition, const uint64_t eventTime);
+ void onSlicedConditionMayChangeLocked_opt1(bool overallCondition, const int64_t eventTime);
+ void onSlicedConditionMayChangeLocked_opt2(bool overallCondition, const int64_t eventTime);
// Internal function to calculate the current used bytes.
size_t byteSizeLocked() const override;
void dumpStatesLocked(FILE* out, bool verbose) const override;
- void dropDataLocked(const uint64_t dropTimeNs) override;
+ void dropDataLocked(const int64_t dropTimeNs) override;
// Util function to flush the old packet.
- void flushIfNeededLocked(const uint64_t& eventTime);
+ void flushIfNeededLocked(const int64_t& eventTime);
- void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
+ void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
const DurationMetric_AggregationType mAggregationType;
EventMetricProducer::EventMetricProducer(const ConfigKey& key, const EventMetric& metric,
const int conditionIndex,
const sp<ConditionWizard>& wizard,
- const uint64_t startTimeNs)
+ const int64_t startTimeNs)
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard) {
if (metric.links().size() > 0) {
for (const auto& link : metric.links()) {
VLOG("~EventMetricProducer() called");
}
-void EventMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
+void EventMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
mProto->clear();
}
void EventMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
}
std::unique_ptr<std::vector<uint8_t>> serializeProtoLocked(ProtoOutputStream& protoOutput) {
return buffer;
}
-void EventMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
+void EventMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
ProtoOutputStream* protoOutput) {
if (mProto->size() <= 0) {
return;
}
void EventMetricProducer::onConditionChangedLocked(const bool conditionMet,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
mCondition = conditionMet;
}
// TODO: Pass in the start time from MetricsManager, it should be consistent for all metrics.
EventMetricProducer(const ConfigKey& key, const EventMetric& eventMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
- const uint64_t startTimeNs);
+ const int64_t startTimeNs);
virtual ~EventMetricProducer();
const ConditionKey& conditionKey, bool condition,
const LogEvent& event) override;
- void onDumpReportLocked(const uint64_t dumpTimeNs,
+ void onDumpReportLocked(const int64_t dumpTimeNs,
android::util::ProtoOutputStream* protoOutput) override;
// Internal interface to handle condition change.
- void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
+ void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
// Internal interface to handle sliced condition change.
- void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
+ void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
- void dropDataLocked(const uint64_t dropTimeNs) override;
+ void dropDataLocked(const int64_t dropTimeNs) override;
// Internal function to calculate the current used bytes.
size_t byteSizeLocked() const override;
GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& metric,
const int conditionIndex,
const sp<ConditionWizard>& wizard, const int pullTagId,
- const uint64_t startTimeNs,
+ const int64_t startTimeNs,
shared_ptr<StatsPullerManager> statsPullerManager)
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard),
mStatsPullerManager(statsPullerManager),
}
}
-void GaugeMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
+void GaugeMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
ProtoOutputStream* protoOutput) {
VLOG("Gauge metric %lld report now...", (long long)mMetricId);
}
void GaugeMetricProducer::onConditionChangedLocked(const bool conditionMet,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
VLOG("GaugeMetric %lld onConditionChanged", (long long)mMetricId);
flushIfNeededLocked(eventTime);
mCondition = conditionMet;
}
void GaugeMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
VLOG("GaugeMetric %lld onSlicedConditionMayChange overall condition %d", (long long)mMetricId,
overallCondition);
flushIfNeededLocked(eventTime);
if (condition == false) {
return;
}
- uint64_t eventTimeNs = event.GetElapsedTimestampNs();
+ int64_t eventTimeNs = event.GetElapsedTimestampNs();
mTagId = event.GetTagId();
if (eventTimeNs < mCurrentBucketStartTimeNs) {
VLOG("Gauge Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
}
}
-void GaugeMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
+void GaugeMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
flushIfNeededLocked(dropTimeNs);
mPastBuckets.clear();
}
// bucket.
// if data is pushed, onMatchedLogEvent will only be called through onConditionChanged() inside
// the GaugeMetricProducer while holding the lock.
-void GaugeMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
- uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
+void GaugeMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) {
+ int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
if (eventTimeNs < currentBucketEndTimeNs) {
VLOG("Gauge eventTime is %lld, less than next bucket start time %lld",
(long long)mCurrentBucketStartTimeNs);
}
-void GaugeMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
- uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
+void GaugeMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
+ int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
GaugeBucket info;
info.mBucketStartNs = mCurrentBucketStartTimeNs;
void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data) override;
// GaugeMetric needs to immediately trigger another pull when we create the partial bucket.
- void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+ void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
const int64_t version) override {
std::lock_guard<std::mutex> lock(mMutex);
const LogEvent& event) override;
private:
- void onDumpReportLocked(const uint64_t dumpTimeNs,
+ void onDumpReportLocked(const int64_t dumpTimeNs,
android::util::ProtoOutputStream* protoOutput) override;
// for testing
GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& gaugeMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
- const int pullTagId, const uint64_t startTimeNs,
+ const int pullTagId, const int64_t startTimeNs,
std::shared_ptr<StatsPullerManager> statsPullerManager);
// Internal interface to handle condition change.
- void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
+ void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
// Internal interface to handle sliced condition change.
- void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
+ void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
// Internal function to calculate the current used bytes.
size_t byteSizeLocked() const override;
void dumpStatesLocked(FILE* out, bool verbose) const override;
- void dropDataLocked(const uint64_t dropTimeNs) override;
+ void dropDataLocked(const int64_t dropTimeNs) override;
// Util function to flush the old packet.
- void flushIfNeededLocked(const uint64_t& eventTime) override;
+ void flushIfNeededLocked(const int64_t& eventTime) override;
- void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
+ void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
void pullLocked();
using std::map;
void MetricProducer::onMatchedLogEventLocked(const size_t matcherIndex, const LogEvent& event) {
- uint64_t eventTimeNs = event.GetElapsedTimestampNs();
+ int64_t eventTimeNs = event.GetElapsedTimestampNs();
// this is old event, maybe statsd restarted?
if (eventTimeNs < mStartTimeNs) {
return;
* the flush again when the end timestamp is forced to be now, and then after flushing, update
* the start timestamp to be now.
*/
- void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+ void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
const int64_t version) override {
std::lock_guard<std::mutex> lock(mMutex);
// is a partial bucket and can merge it with the previous bucket.
};
- void notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk, const int uid) override{
+ void notifyAppRemoved(const int64_t& eventTimeNs, const string& apk, const int uid) override{
// TODO: Implement me.
};
- void onUidMapReceived(const uint64_t& eventTimeNs) override{
+ void onUidMapReceived(const int64_t& eventTimeNs) override{
// TODO: Implement me.
};
onMatchedLogEventLocked(matcherIndex, event);
}
- void onConditionChanged(const bool condition, const uint64_t eventTime) {
+ void onConditionChanged(const bool condition, const int64_t eventTime) {
std::lock_guard<std::mutex> lock(mMutex);
onConditionChangedLocked(condition, eventTime);
}
- void onSlicedConditionMayChange(bool overallCondition, const uint64_t eventTime) {
+ void onSlicedConditionMayChange(bool overallCondition, const int64_t eventTime) {
std::lock_guard<std::mutex> lock(mMutex);
onSlicedConditionMayChangeLocked(overallCondition, eventTime);
}
// Output the metrics data to [protoOutput]. All metrics reports end with the same timestamp.
// This method clears all the past buckets.
- void onDumpReport(const uint64_t dumpTimeNs, android::util::ProtoOutputStream* protoOutput) {
+ void onDumpReport(const int64_t dumpTimeNs, android::util::ProtoOutputStream* protoOutput) {
std::lock_guard<std::mutex> lock(mMutex);
return onDumpReportLocked(dumpTimeNs, protoOutput);
}
// We still need to keep future data valid and anomaly tracking work, which means we will
// have to flush old data, informing anomaly trackers then safely drop old data.
// We still keep current bucket data for future metrics' validity.
- void dropData(const uint64_t dropTimeNs) {
+ void dropData(const int64_t dropTimeNs) {
std::lock_guard<std::mutex> lock(mMutex);
dropDataLocked(dropTimeNs);
}
protected:
- virtual void onConditionChangedLocked(const bool condition, const uint64_t eventTime) = 0;
+ virtual void onConditionChangedLocked(const bool condition, const int64_t eventTime) = 0;
virtual void onSlicedConditionMayChangeLocked(bool overallCondition,
- const uint64_t eventTime) = 0;
- virtual void onDumpReportLocked(const uint64_t dumpTimeNs,
+ const int64_t eventTime) = 0;
+ virtual void onDumpReportLocked(const int64_t dumpTimeNs,
android::util::ProtoOutputStream* protoOutput) = 0;
virtual size_t byteSizeLocked() const = 0;
virtual void dumpStatesLocked(FILE* out, bool verbose) const = 0;
/**
* Flushes the current bucket if the eventTime is after the current bucket's end time.
*/
- virtual void flushIfNeededLocked(const uint64_t& eventTime){};
+ virtual void flushIfNeededLocked(const int64_t& eventTime){};
/**
* For metrics that aggregate (ie, every metric producer except for EventMetricProducer),
* flushIfNeededLocked or the app upgrade handler; the caller MUST update the bucket timestamp
* and bucket number as needed.
*/
- virtual void flushCurrentBucketLocked(const uint64_t& eventTimeNs){};
+ virtual void flushCurrentBucketLocked(const int64_t& eventTimeNs){};
// Convenience to compute the current bucket's end time, which is always aligned with the
// start time of the metric.
- uint64_t getCurrentBucketEndTimeNs() const {
+ int64_t getCurrentBucketEndTimeNs() const {
return mStartTimeNs + (mCurrentBucketNum + 1) * mBucketSizeNs;
}
- virtual void dropDataLocked(const uint64_t dropTimeNs) = 0;
+ virtual void dropDataLocked(const int64_t dropTimeNs) = 0;
const int64_t mMetricId;
// The time when this metric producer was first created. The end time for the current bucket
// can be computed from this based on mCurrentBucketNum.
- uint64_t mStartTimeNs;
+ int64_t mStartTimeNs;
// Start time may not be aligned with the start of statsd if there is an app upgrade in the
// middle of a bucket.
- uint64_t mCurrentBucketStartTimeNs;
+ int64_t mCurrentBucketStartTimeNs;
// Used by anomaly detector to track which bucket we are in. This is not sent with the produced
// report.
- uint64_t mCurrentBucketNum;
+ int64_t mCurrentBucketNum;
int64_t mBucketSizeNs;
const sp<AlarmMonitor>& anomalyAlarmMonitor,
const sp<AlarmMonitor>& periodicAlarmMonitor)
: mConfigKey(key), mUidMap(uidMap),
+ mTtlNs(config.has_ttl_in_seconds() ? config.ttl_in_seconds() * NS_PER_SEC : -1),
+ mTtlEndNs(-1),
mLastReportTimeNs(timeBaseSec * NS_PER_SEC),
mLastReportWallClockNs(getWallClockNs()) {
+ // Init the ttl end timestamp.
+ refreshTtl(timeBaseSec * NS_PER_SEC);
+
mConfigValid =
initStatsdConfig(key, config, *uidMap, anomalyAlarmMonitor, periodicAlarmMonitor,
timeBaseSec, currentTimeSec, mTagIds, mAllAtomMatchers,
return mConfigValid;
}
-void MetricsManager::notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+void MetricsManager::notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
const int64_t version) {
// check if we care this package
if (std::find(mAllowedPkg.begin(), mAllowedPkg.end(), apk) == mAllowedPkg.end()) {
initLogSourceWhiteList();
}
-void MetricsManager::notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk,
+void MetricsManager::notifyAppRemoved(const int64_t& eventTimeNs, const string& apk,
const int uid) {
// check if we care this package
if (std::find(mAllowedPkg.begin(), mAllowedPkg.end(), apk) == mAllowedPkg.end()) {
initLogSourceWhiteList();
}
-void MetricsManager::onUidMapReceived(const uint64_t& eventTimeNs) {
+void MetricsManager::onUidMapReceived(const int64_t& eventTimeNs) {
if (mAllowedPkg.size() == 0) {
return;
}
}
}
-void MetricsManager::dropData(const uint64_t dropTimeNs) {
+void MetricsManager::dropData(const int64_t dropTimeNs) {
for (const auto& producer : mAllMetricProducers) {
producer->dropData(dropTimeNs);
}
}
-void MetricsManager::onDumpReport(const uint64_t dumpTimeStampNs, ProtoOutputStream* protoOutput) {
+void MetricsManager::onDumpReport(const int64_t dumpTimeStampNs, ProtoOutputStream* protoOutput) {
VLOG("=========================Metric Reports Start==========================");
// one StatsLogReport per MetricProduer
for (const auto& producer : mAllMetricProducers) {
}
int tagId = event.GetTagId();
- uint64_t eventTime = event.GetElapsedTimestampNs();
+ int64_t eventTime = event.GetElapsedTimestampNs();
if (mTagIds.find(tagId) == mTagIds.end()) {
// not interesting...
return;
}
void MetricsManager::onAnomalyAlarmFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet) {
for (const auto& itr : mAllAnomalyTrackers) {
itr->informAlarmsFired(timestampNs, alarmSet);
}
void MetricsManager::onPeriodicAlarmFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet) {
for (const auto& itr : mAllPeriodicAlarmTrackers) {
itr->informAlarmsFired(timestampNs, alarmSet);
void onLogEvent(const LogEvent& event);
void onAnomalyAlarmFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet);
void onPeriodicAlarmFired(
- const uint64_t& timestampNs,
+ const int64_t& timestampNs,
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet);
- void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+ void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
const int64_t version) override;
- void notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk, const int uid) override;
+ void notifyAppRemoved(const int64_t& eventTimeNs, const string& apk, const int uid) override;
- void onUidMapReceived(const uint64_t& eventTimeNs) override;
+ void onUidMapReceived(const int64_t& eventTimeNs) override;
bool shouldAddUidMapListener() const {
return !mAllowedPkg.empty();
void dumpStates(FILE* out, bool verbose);
+ inline bool isInTtl(const int64_t timestampNs) const {
+ return mTtlNs <= 0 || timestampNs < mTtlEndNs;
+ };
+
+ void refreshTtl(const int64_t currentTimestampNs) {
+ if (mTtlNs > 0) {
+ mTtlEndNs = currentTimestampNs + mTtlNs;
+ }
+ };
+
// Returns the elapsed realtime when this metric manager last reported metrics.
inline int64_t getLastReportTimeNs() const {
return mLastReportTimeNs;
return mLastReportWallClockNs;
};
- virtual void dropData(const uint64_t dropTimeNs);
+ virtual void dropData(const int64_t dropTimeNs);
// Config source owner can call onDumpReport() to get all the metrics collected.
- virtual void onDumpReport(const uint64_t dumpTimeNs,
+ virtual void onDumpReport(const int64_t dumpTimeNs,
android::util::ProtoOutputStream* protoOutput);
// Computes the total byte size of all metrics managed by a single config source.
// Does not change the state.
virtual size_t byteSize();
+
private:
+ // For test only.
+ inline int64_t getTtlEndNs() const { return mTtlEndNs; }
+
const ConfigKey mConfigKey;
sp<UidMap> mUidMap;
bool mConfigValid = false;
+ const int64_t mTtlNs;
+ int64_t mTtlEndNs;
+
int64_t mLastReportTimeNs;
int64_t mLastReportWallClockNs;
FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
FRIEND_TEST(AlarmE2eTest, TestMultipleAlarms);
+ FRIEND_TEST(ConfigTtlE2eTest, TestCountMetric);
};
} // namespace statsd
ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric& metric,
const int conditionIndex,
const sp<ConditionWizard>& wizard, const int pullTagId,
- const uint64_t startTimeNs,
+ const int64_t startTimeNs,
shared_ptr<StatsPullerManager> statsPullerManager)
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard),
mValueField(metric.value_field()),
ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric& metric,
const int conditionIndex,
const sp<ConditionWizard>& wizard, const int pullTagId,
- const uint64_t startTimeNs)
+ const int64_t startTimeNs)
: ValueMetricProducer(key, metric, conditionIndex, wizard, pullTagId, startTimeNs,
make_shared<StatsPullerManager>()) {
}
}
void ValueMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
- const uint64_t eventTime) {
+ const int64_t eventTime) {
VLOG("Metric %lld onSlicedConditionMayChange", (long long)mMetricId);
}
-void ValueMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
+void ValueMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
flushIfNeededLocked(dropTimeNs);
mPastBuckets.clear();
}
-void ValueMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
+void ValueMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
ProtoOutputStream* protoOutput) {
VLOG("metric %lld dump report now...", (long long)mMetricId);
flushIfNeededLocked(dumpTimeNs);
}
void ValueMetricProducer::onConditionChangedLocked(const bool condition,
- const uint64_t eventTimeNs) {
+ const int64_t eventTimeNs) {
mCondition = condition;
if (eventTimeNs < mCurrentBucketStartTimeNs) {
}
// For scheduled pulled data, the effective event time is snap to the nearest
// bucket boundary to make bucket finalize.
- uint64_t realEventTime = allData.at(0)->GetElapsedTimestampNs();
- uint64_t eventTime = mStartTimeNs +
+ int64_t realEventTime = allData.at(0)->GetElapsedTimestampNs();
+ int64_t eventTime = mStartTimeNs +
((realEventTime - mStartTimeNs) / mBucketSizeNs) * mBucketSizeNs;
mCondition = false;
const size_t matcherIndex, const MetricDimensionKey& eventKey,
const ConditionKey& conditionKey, bool condition,
const LogEvent& event) {
- uint64_t eventTimeNs = event.GetElapsedTimestampNs();
+ int64_t eventTimeNs = event.GetElapsedTimestampNs();
if (eventTimeNs < mCurrentBucketStartTimeNs) {
VLOG("Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
(long long)mCurrentBucketStartTimeNs);
}
}
-void ValueMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
- uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
+void ValueMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) {
+ int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
if (currentBucketEndTimeNs > eventTimeNs) {
VLOG("eventTime is %lld, less than next bucket start time %lld", (long long)eventTimeNs,
(long long)mCurrentBucketStartTimeNs);
}
-void ValueMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
+void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
VLOG("finalizing bucket for %ld, dumping %d slices", (long)mCurrentBucketStartTimeNs,
(int)mCurrentSlicedBucket.size());
- uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
+ int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
ValueBucket info;
info.mBucketStartNs = mCurrentBucketStartTimeNs;
public:
ValueMetricProducer(const ConfigKey& key, const ValueMetric& valueMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
- const int pullTagId, const uint64_t startTimeNs);
+ const int pullTagId, const int64_t startTimeNs);
virtual ~ValueMetricProducer();
void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data) override;
// ValueMetric needs special logic if it's a pulled atom.
- void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+ void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
const int64_t version) override {
std::lock_guard<std::mutex> lock(mMutex);
const LogEvent& event) override;
private:
- void onDumpReportLocked(const uint64_t dumpTimeNs,
+ void onDumpReportLocked(const int64_t dumpTimeNs,
android::util::ProtoOutputStream* protoOutput) override;
// Internal interface to handle condition change.
- void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
+ void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
// Internal interface to handle sliced condition change.
- void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
+ void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
// Internal function to calculate the current used bytes.
size_t byteSizeLocked() const override;
void dumpStatesLocked(FILE* out, bool verbose) const override;
// Util function to flush the old packet.
- void flushIfNeededLocked(const uint64_t& eventTime) override;
+ void flushIfNeededLocked(const int64_t& eventTime) override;
- void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
+ void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
- void dropDataLocked(const uint64_t dropTimeNs) override;
+ void dropDataLocked(const int64_t dropTimeNs) override;
const FieldMatcher mValueField;
// for testing
ValueMetricProducer(const ConfigKey& key, const ValueMetric& valueMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
- const int pullTagId, const uint64_t startTimeNs,
+ const int pullTagId, const int64_t startTimeNs,
std::shared_ptr<StatsPullerManager> statsPullerManager);
// tagId for pulled data. -1 if this is not pulled
};
struct DurationBucket {
- uint64_t mBucketStartNs;
- uint64_t mBucketEndNs;
- uint64_t mDuration;
+ int64_t mBucketStartNs;
+ int64_t mBucketEndNs;
+ int64_t mDuration;
};
class DurationTracker {
DurationTracker(const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
sp<ConditionWizard> wizard, int conditionIndex,
const std::vector<Matcher>& dimensionInCondition, bool nesting,
- uint64_t currentBucketStartNs, uint64_t currentBucketNum, uint64_t startTimeNs,
- uint64_t bucketSizeNs, bool conditionSliced, bool fullLink,
+ int64_t currentBucketStartNs, int64_t currentBucketNum, int64_t startTimeNs,
+ int64_t bucketSizeNs, bool conditionSliced, bool fullLink,
const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
: mConfigKey(key),
mTrackerId(id),
virtual ~DurationTracker(){};
- virtual unique_ptr<DurationTracker> clone(const uint64_t eventTime) = 0;
+ virtual unique_ptr<DurationTracker> clone(const int64_t eventTime) = 0;
virtual void noteStart(const HashableDimensionKey& key, bool condition,
- const uint64_t eventTime, const ConditionKey& conditionKey) = 0;
- virtual void noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
+ const int64_t eventTime, const ConditionKey& conditionKey) = 0;
+ virtual void noteStop(const HashableDimensionKey& key, const int64_t eventTime,
const bool stopAll) = 0;
- virtual void noteStopAll(const uint64_t eventTime) = 0;
+ virtual void noteStopAll(const int64_t eventTime) = 0;
- virtual void onSlicedConditionMayChange(bool overallCondition, const uint64_t timestamp) = 0;
- virtual void onConditionChanged(bool condition, const uint64_t timestamp) = 0;
+ virtual void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) = 0;
+ virtual void onConditionChanged(bool condition, const int64_t timestamp) = 0;
// Flush stale buckets if needed, and return true if the tracker has no on-going duration
// events, so that the owner can safely remove the tracker.
virtual bool flushIfNeeded(
- uint64_t timestampNs,
+ int64_t timestampNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) = 0;
// Should only be called during an app upgrade or from this tracker's flushIfNeeded. If from
// an app upgrade, we assume that we're trying to form a partial bucket.
virtual bool flushCurrentBucket(
- const uint64_t& eventTimeNs,
+ const int64_t& eventTimeNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) = 0;
// Predict the anomaly timestamp given the current status.
}
protected:
- uint64_t getCurrentBucketEndTimeNs() const {
+ int64_t getCurrentBucketEndTimeNs() const {
return mStartTimeNs + (mCurrentBucketNum + 1) * mBucketSizeNs;
}
// Starts the anomaly alarm.
- void startAnomalyAlarm(const uint64_t eventTime) {
+ void startAnomalyAlarm(const int64_t eventTime) {
for (auto& anomalyTracker : mAnomalyTrackers) {
if (anomalyTracker != nullptr) {
- const uint64_t alarmTimestampNs =
+ const int64_t alarmTimestampNs =
predictAnomalyTimestampNs(*anomalyTracker, eventTime);
if (alarmTimestampNs > 0) {
anomalyTracker->startAlarm(mEventKey, alarmTimestampNs);
}
// Stops the anomaly alarm. If it should have already fired, declare the anomaly now.
- void stopAnomalyAlarm(const uint64_t timestamp) {
+ void stopAnomalyAlarm(const int64_t timestamp) {
for (auto& anomalyTracker : mAnomalyTrackers) {
if (anomalyTracker != nullptr) {
anomalyTracker->stopAlarm(mEventKey, timestamp);
}
}
- void detectAndDeclareAnomaly(const uint64_t& timestamp, const int64_t& currBucketNum,
+ void detectAndDeclareAnomaly(const int64_t& timestamp, const int64_t& currBucketNum,
const int64_t& currentBucketValue) {
for (auto& anomalyTracker : mAnomalyTrackers) {
if (anomalyTracker != nullptr) {
// Convenience to compute the current bucket's end time, which is always aligned with the
// start time of the metric.
- uint64_t getCurrentBucketEndTimeNs() {
+ int64_t getCurrentBucketEndTimeNs() {
return mStartTimeNs + (mCurrentBucketNum + 1) * mBucketSizeNs;
}
const bool mNested;
- uint64_t mCurrentBucketStartTimeNs;
+ int64_t mCurrentBucketStartTimeNs;
int64_t mDuration; // current recorded duration result (for partial bucket)
int64_t mDurationFullBucket; // Sum of past partial buckets in current full bucket.
- uint64_t mCurrentBucketNum;
+ int64_t mCurrentBucketNum;
- const uint64_t mStartTimeNs;
+ const int64_t mStartTimeNs;
const bool mConditionSliced;
const MetricDimensionKey& eventKey,
sp<ConditionWizard> wizard, int conditionIndex,
const vector<Matcher>& dimensionInCondition, bool nesting,
- uint64_t currentBucketStartNs, uint64_t currentBucketNum,
- uint64_t startTimeNs, uint64_t bucketSizeNs,
+ int64_t currentBucketStartNs, int64_t currentBucketNum,
+ int64_t startTimeNs, int64_t bucketSizeNs,
bool conditionSliced, bool fullLink,
const vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
: DurationTracker(key, id, eventKey, wizard, conditionIndex, dimensionInCondition, nesting,
}
}
-unique_ptr<DurationTracker> MaxDurationTracker::clone(const uint64_t eventTime) {
+unique_ptr<DurationTracker> MaxDurationTracker::clone(const int64_t eventTime) {
auto clonedTracker = make_unique<MaxDurationTracker>(*this);
for (auto it = clonedTracker->mInfos.begin(); it != clonedTracker->mInfos.end();) {
if (it->second.state != kStopped) {
}
void MaxDurationTracker::noteStart(const HashableDimensionKey& key, bool condition,
- const uint64_t eventTime, const ConditionKey& conditionKey) {
+ const int64_t eventTime, const ConditionKey& conditionKey) {
// this will construct a new DurationInfo if this key didn't exist.
if (hitGuardRail(key)) {
return;
}
-void MaxDurationTracker::noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
+void MaxDurationTracker::noteStop(const HashableDimensionKey& key, const int64_t eventTime,
bool forceStop) {
VLOG("MaxDuration: key %s stop", key.toString().c_str());
if (mInfos.find(key) == mInfos.end()) {
return false;
}
-void MaxDurationTracker::noteStopAll(const uint64_t eventTime) {
+void MaxDurationTracker::noteStopAll(const int64_t eventTime) {
std::set<HashableDimensionKey> keys;
for (const auto& pair : mInfos) {
keys.insert(pair.first);
}
bool MaxDurationTracker::flushCurrentBucket(
- const uint64_t& eventTimeNs,
+ const int64_t& eventTimeNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
VLOG("MaxDurationTracker flushing.....");
// adjust the bucket start time
int numBucketsForward = 0;
- uint64_t fullBucketEnd = getCurrentBucketEndTimeNs();
- uint64_t currentBucketEndTimeNs;
+ int64_t fullBucketEnd = getCurrentBucketEndTimeNs();
+ int64_t currentBucketEndTimeNs;
if (eventTimeNs >= fullBucketEnd) {
numBucketsForward = 1 + (eventTimeNs - fullBucketEnd) / mBucketSizeNs;
currentBucketEndTimeNs = fullBucketEnd;
}
bool MaxDurationTracker::flushIfNeeded(
- uint64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
+ int64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
if (eventTimeNs < getCurrentBucketEndTimeNs()) {
return false;
}
}
void MaxDurationTracker::onSlicedConditionMayChange(bool overallCondition,
- const uint64_t timestamp) {
+ const int64_t timestamp) {
// Now for each of the on-going event, check if the condition has changed for them.
for (auto& pair : mInfos) {
if (pair.second.state == kStopped) {
}
}
-void MaxDurationTracker::onConditionChanged(bool condition, const uint64_t timestamp) {
+void MaxDurationTracker::onConditionChanged(bool condition, const int64_t timestamp) {
for (auto& pair : mInfos) {
noteConditionChanged(pair.first, condition, timestamp);
}
}
void MaxDurationTracker::noteConditionChanged(const HashableDimensionKey& key, bool conditionMet,
- const uint64_t timestamp) {
+ const int64_t timestamp) {
auto it = mInfos.find(key);
if (it == mInfos.end()) {
return;
MaxDurationTracker(const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
sp<ConditionWizard> wizard, int conditionIndex,
const std::vector<Matcher>& dimensionInCondition, bool nesting,
- uint64_t currentBucketStartNs, uint64_t currentBucketNum,
- uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced,
+ int64_t currentBucketStartNs, int64_t currentBucketNum,
+ int64_t startTimeNs, int64_t bucketSizeNs, bool conditionSliced,
bool fullLink,
const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers);
MaxDurationTracker(const MaxDurationTracker& tracker) = default;
- unique_ptr<DurationTracker> clone(const uint64_t eventTime) override;
+ unique_ptr<DurationTracker> clone(const int64_t eventTime) override;
- void noteStart(const HashableDimensionKey& key, bool condition, const uint64_t eventTime,
+ void noteStart(const HashableDimensionKey& key, bool condition, const int64_t eventTime,
const ConditionKey& conditionKey) override;
- void noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
+ void noteStop(const HashableDimensionKey& key, const int64_t eventTime,
const bool stopAll) override;
- void noteStopAll(const uint64_t eventTime) override;
+ void noteStopAll(const int64_t eventTime) override;
bool flushIfNeeded(
- uint64_t timestampNs,
+ int64_t timestampNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
bool flushCurrentBucket(
- const uint64_t& eventTimeNs,
+ const int64_t& eventTimeNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>*) override;
- void onSlicedConditionMayChange(bool overallCondition, const uint64_t timestamp) override;
- void onConditionChanged(bool condition, const uint64_t timestamp) override;
+ void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) override;
+ void onConditionChanged(bool condition, const int64_t timestamp) override;
int64_t predictAnomalyTimestampNs(const DurationAnomalyTracker& anomalyTracker,
const int64_t currentTimestamp) const override;
std::unordered_map<HashableDimensionKey, DurationInfo> mInfos;
void noteConditionChanged(const HashableDimensionKey& key, bool conditionMet,
- const uint64_t timestamp);
+ const int64_t timestamp);
// return true if we should not allow newKey to be tracked because we are above the threshold
bool hitGuardRail(const HashableDimensionKey& newKey);
OringDurationTracker::OringDurationTracker(
const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
sp<ConditionWizard> wizard, int conditionIndex, const vector<Matcher>& dimensionInCondition,
- bool nesting, uint64_t currentBucketStartNs, uint64_t currentBucketNum,
- uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced, bool fullLink,
+ bool nesting, int64_t currentBucketStartNs, int64_t currentBucketNum,
+ int64_t startTimeNs, int64_t bucketSizeNs, bool conditionSliced, bool fullLink,
const vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
: DurationTracker(key, id, eventKey, wizard, conditionIndex, dimensionInCondition, nesting,
currentBucketStartNs, currentBucketNum, startTimeNs, bucketSizeNs,
}
}
-unique_ptr<DurationTracker> OringDurationTracker::clone(const uint64_t eventTime) {
+unique_ptr<DurationTracker> OringDurationTracker::clone(const int64_t eventTime) {
auto clonedTracker = make_unique<OringDurationTracker>(*this);
clonedTracker->mLastStartTime = eventTime;
clonedTracker->mDuration = 0;
}
void OringDurationTracker::noteStart(const HashableDimensionKey& key, bool condition,
- const uint64_t eventTime, const ConditionKey& conditionKey) {
+ const int64_t eventTime, const ConditionKey& conditionKey) {
if (hitGuardRail(key)) {
return;
}
VLOG("Oring: %s start, condition %d", key.toString().c_str(), condition);
}
-void OringDurationTracker::noteStop(const HashableDimensionKey& key, const uint64_t timestamp,
+void OringDurationTracker::noteStop(const HashableDimensionKey& key, const int64_t timestamp,
const bool stopAll) {
VLOG("Oring: %s stop", key.toString().c_str());
auto it = mStarted.find(key);
}
}
-void OringDurationTracker::noteStopAll(const uint64_t timestamp) {
+void OringDurationTracker::noteStopAll(const int64_t timestamp) {
if (!mStarted.empty()) {
mDuration += (timestamp - mLastStartTime);
VLOG("Oring Stop all: record duration %lld %lld ", (long long)timestamp - mLastStartTime,
}
bool OringDurationTracker::flushCurrentBucket(
- const uint64_t& eventTimeNs,
+ const int64_t& eventTimeNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
VLOG("OringDurationTracker Flushing.............");
// MetricProducer#notifyAppUpgrade.
int numBucketsForward = 0;
- uint64_t fullBucketEnd = getCurrentBucketEndTimeNs();
- uint64_t currentBucketEndTimeNs;
+ int64_t fullBucketEnd = getCurrentBucketEndTimeNs();
+ int64_t currentBucketEndTimeNs;
if (eventTimeNs >= fullBucketEnd) {
numBucketsForward = 1 + (eventTimeNs - fullBucketEnd) / mBucketSizeNs;
}
bool OringDurationTracker::flushIfNeeded(
- uint64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
+ int64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
if (eventTimeNs < getCurrentBucketEndTimeNs()) {
return false;
}
}
void OringDurationTracker::onSlicedConditionMayChange(bool overallCondition,
- const uint64_t timestamp) {
+ const int64_t timestamp) {
vector<pair<HashableDimensionKey, int>> startedToPaused;
vector<pair<HashableDimensionKey, int>> pausedToStarted;
if (!mStarted.empty()) {
}
}
-void OringDurationTracker::onConditionChanged(bool condition, const uint64_t timestamp) {
+void OringDurationTracker::onConditionChanged(bool condition, const int64_t timestamp) {
if (condition) {
if (!mPaused.empty()) {
VLOG("Condition true, all started");
OringDurationTracker(const ConfigKey& key, const int64_t& id,
const MetricDimensionKey& eventKey, sp<ConditionWizard> wizard,
int conditionIndex, const std::vector<Matcher>& dimensionInCondition,
- bool nesting, uint64_t currentBucketStartNs, uint64_t currentBucketNum,
- uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced,
+ bool nesting, int64_t currentBucketStartNs, int64_t currentBucketNum,
+ int64_t startTimeNs, int64_t bucketSizeNs, bool conditionSliced,
bool fullLink,
const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers);
OringDurationTracker(const OringDurationTracker& tracker) = default;
- unique_ptr<DurationTracker> clone(const uint64_t eventTime) override;
+ unique_ptr<DurationTracker> clone(const int64_t eventTime) override;
- void noteStart(const HashableDimensionKey& key, bool condition, const uint64_t eventTime,
+ void noteStart(const HashableDimensionKey& key, bool condition, const int64_t eventTime,
const ConditionKey& conditionKey) override;
- void noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
+ void noteStop(const HashableDimensionKey& key, const int64_t eventTime,
const bool stopAll) override;
- void noteStopAll(const uint64_t eventTime) override;
+ void noteStopAll(const int64_t eventTime) override;
- void onSlicedConditionMayChange(bool overallCondition, const uint64_t timestamp) override;
- void onConditionChanged(bool condition, const uint64_t timestamp) override;
+ void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) override;
+ void onConditionChanged(bool condition, const int64_t timestamp) override;
bool flushCurrentBucket(
- const uint64_t& eventTimeNs,
+ const int64_t& eventTimeNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
bool flushIfNeeded(
- uint64_t timestampNs,
+ int64_t timestampNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
int64_t predictAnomalyTimestampNs(const DurationAnomalyTracker& anomalyTracker,
public:
// Uid map will notify this listener that the app with apk name and uid has been upgraded to
// the specified version.
- virtual void notifyAppUpgrade(const uint64_t& eventTimeNs, const std::string& apk,
+ virtual void notifyAppUpgrade(const int64_t& eventTimeNs, const std::string& apk,
const int uid, const int64_t version) = 0;
// Notify interested listeners that the given apk and uid combination no longer exits.
- virtual void notifyAppRemoved(const uint64_t& eventTimeNs, const std::string& apk,
+ virtual void notifyAppRemoved(const int64_t& eventTimeNs, const std::string& apk,
const int uid) = 0;
// Notify the listener that the UidMap snapshot is available.
- virtual void onUidMapReceived(const uint64_t& eventTimeNs) = 0;
+ virtual void onUidMapReceived(const int64_t& eventTimeNs) = 0;
};
} // namespace statsd
optional int64 id = 2;
optional int32 creation_time_sec = 3;
optional int32 deletion_time_sec = 4;
+ optional int32 reset_time_sec = 19;
optional int32 metric_count = 5;
optional int32 condition_count = 6;
optional int32 matcher_count = 7;
}
repeated Annotation annotation = 14;
+ optional int64 ttl_in_seconds = 15;
+
// Field number 1000 is reserved for later use.
reserved 1000;
}
}
}
-bool StorageManager::hasIdenticalConfig(const ConfigKey& key,
- const vector<uint8_t>& config) {
+bool StorageManager::readConfigFromDisk(const ConfigKey& key, StatsdConfig* config) {
+ string content;
+ return config != nullptr &&
+ StorageManager::readConfigFromDisk(key, &content) && config->ParseFromString(content);
+}
+
+bool StorageManager::readConfigFromDisk(const ConfigKey& key, string* content) {
unique_ptr<DIR, decltype(&closedir)> dir(opendir(STATS_SERVICE_DIR),
closedir);
if (dir == NULL) {
}
string suffix = StringPrintf("%d_%lld", key.GetUid(), (long long)key.GetId());
-
dirent* de;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
int fd = open(StringPrintf("%s/%s", STATS_SERVICE_DIR, name).c_str(),
O_RDONLY | O_CLOEXEC);
if (fd != -1) {
- string content;
- if (android::base::ReadFdToString(fd, &content)) {
- vector<uint8_t> vec(content.begin(), content.end());
- if (vec == config) {
- close(fd);
- return true;
- }
+ if (android::base::ReadFdToString(fd, content)) {
+ return true;
}
close(fd);
}
return false;
}
+bool StorageManager::hasIdenticalConfig(const ConfigKey& key,
+ const vector<uint8_t>& config) {
+ string content;
+ if (StorageManager::readConfigFromDisk(key, &content)) {
+ vector<uint8_t> vec(content.begin(), content.end());
+ if (vec == config) {
+ return true;
+ }
+ }
+ return false;
+}
+
void StorageManager::trimToFit(const char* path) {
unique_ptr<DIR, decltype(&closedir)> dir(opendir(path), closedir);
if (dir == NULL) {
static void readConfigFromDisk(std::map<ConfigKey, StatsdConfig>& configsMap);
/**
+ * Call to load the specified config from disk. Returns false if the config file does not
+ * exist or error occurs when reading the file.
+ */
+ static bool readConfigFromDisk(const ConfigKey& key, StatsdConfig* config);
+ static bool readConfigFromDisk(const ConfigKey& key, string* config);
+
+ /**
* Trims files in the provided directory to limit the total size, number of
* files, accumulation of outdated files.
*/
MOCK_METHOD0(byteSize, size_t());
- MOCK_METHOD1(dropData, void(const uint64_t dropTimeNs));
+ MOCK_METHOD1(dropData, void(const int64_t dropTimeNs));
};
TEST(StatsLogProcessorTest, TestRateLimitByteSize) {
--- /dev/null
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "src/StatsLogProcessor.h"
+#include "src/stats_log_util.h"
+#include "tests/statsd_test_util.h"
+
+#include <vector>
+
+namespace android {
+namespace os {
+namespace statsd {
+
+#ifdef __ANDROID__
+
+namespace {
+
+StatsdConfig CreateStatsdConfig(int num_buckets, int threshold) {
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+ auto wakelockAcquireMatcher = CreateAcquireWakelockAtomMatcher();
+
+ *config.add_atom_matcher() = wakelockAcquireMatcher;
+
+ auto countMetric = config.add_count_metric();
+ countMetric->set_id(123456);
+ countMetric->set_what(wakelockAcquireMatcher.id());
+ *countMetric->mutable_dimensions_in_what() = CreateAttributionUidDimensions(
+ android::util::WAKELOCK_STATE_CHANGED, {Position::FIRST});
+ countMetric->set_bucket(FIVE_MINUTES);
+
+ auto alert = config.add_alert();
+ alert->set_id(StringToId("alert"));
+ alert->set_metric_id(123456);
+ alert->set_num_buckets(num_buckets);
+ alert->set_refractory_period_secs(10);
+ alert->set_trigger_if_sum_gt(threshold);
+
+ // Two hours
+ config.set_ttl_in_seconds(2 * 3600);
+ return config;
+}
+
+} // namespace
+
+TEST(ConfigTtlE2eTest, TestCountMetric) {
+ const int num_buckets = 1;
+ const int threshold = 3;
+ auto config = CreateStatsdConfig(num_buckets, threshold);
+ const uint64_t alert_id = config.alert(0).id();
+ const uint32_t refractory_period_sec = config.alert(0).refractory_period_secs();
+
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs =
+ TimeUnitToBucketSizeInMillis(config.count_metric(0).bucket()) * 1000000;
+
+ ConfigKey cfgKey;
+ auto processor = CreateStatsLogProcessor(bucketStartTimeNs / NS_PER_SEC, config, cfgKey);
+ EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
+ EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
+
+ std::vector<AttributionNodeInternal> attributions1 = {CreateAttribution(111, "App1")};
+
+ FieldValue fieldValue1(Field(android::util::WAKELOCK_STATE_CHANGED, (int32_t)0x02010101),
+ Value((int32_t)111));
+ HashableDimensionKey whatKey1({fieldValue1});
+ MetricDimensionKey dimensionKey1(whatKey1, DEFAULT_DIMENSION_KEY);
+
+ FieldValue fieldValue2(Field(android::util::WAKELOCK_STATE_CHANGED, (int32_t)0x02010101),
+ Value((int32_t)222));
+ HashableDimensionKey whatKey2({fieldValue2});
+ MetricDimensionKey dimensionKey2(whatKey2, DEFAULT_DIMENSION_KEY);
+
+ auto event = CreateAcquireWakelockEvent(attributions1, "wl1", bucketStartTimeNs + 2);
+ processor->OnLogEvent(event.get());
+
+ event = CreateAcquireWakelockEvent(attributions1, "wl2", bucketStartTimeNs + bucketSizeNs + 2);
+ processor->OnLogEvent(event.get());
+
+ event = CreateAcquireWakelockEvent(
+ attributions1, "wl1", bucketStartTimeNs + 25 * bucketSizeNs + 2);
+ processor->OnLogEvent(event.get());
+
+ EXPECT_EQ((int64_t)(bucketStartTimeNs + 25 * bucketSizeNs + 2 + 2 * 3600 * NS_PER_SEC),
+ processor->mMetricsManagers.begin()->second->getTtlEndNs());
+}
+
+
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
+
+} // namespace statsd
+} // namespace os
+} // namespace android
TEST(CountMetricProducerTest, TestEventWithAppUpgrade) {
sp<AlarmMonitor> alarmMonitor;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
- uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+ int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
int tagId = 1;
int conditionTagId = 2;
// Anomaly tracker only contains full buckets.
EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
- uint64_t lastEndTimeNs = countProducer.getCurrentBucketEndTimeNs();
+ int64_t lastEndTimeNs = countProducer.getCurrentBucketEndTimeNs();
// Next event occurs in same bucket as partial bucket created.
LogEvent event2(tagId, bucketStartTimeNs + 59 * NS_PER_SEC + 10);
event2.write("222"); // uid
}
TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket) {
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
- uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+ int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
int tagId = 1;
int conditionTagId = 2;
EXPECT_EQ((int64_t)bucketStartTimeNs,
countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs,
- (uint64_t)countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
+ countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
EXPECT_EQ(eventUpgradeTimeNs, countProducer.mCurrentBucketStartTimeNs);
// Next event occurs in same bucket as partial bucket created.
EXPECT_EQ((int64_t)eventUpgradeTimeNs,
countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketStartNs);
EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs,
- (uint64_t)countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketEndNs);
+ countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketEndNs);
}
TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced) {
TEST(DurationMetricTrackerTest, TestNoCondition) {
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- uint64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketStartTimeNs = 10000000000;
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
DurationMetric metric;
EXPECT_EQ(2UL, buckets.size());
EXPECT_EQ(bucketStartTimeNs, buckets[0].mBucketStartNs);
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[0].mBucketEndNs);
- EXPECT_EQ(bucketSizeNs - 1ULL, buckets[0].mDuration);
+ EXPECT_EQ(bucketSizeNs - 1LL, buckets[0].mDuration);
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[1].mBucketStartNs);
EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[1].mBucketEndNs);
- EXPECT_EQ(2ULL, buckets[1].mDuration);
+ EXPECT_EQ(2LL, buckets[1].mDuration);
}
TEST(DurationMetricTrackerTest, TestNonSlicedCondition) {
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- uint64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketStartTimeNs = 10000000000;
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
DurationMetric metric;
EXPECT_EQ(1UL, buckets2.size());
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets2[0].mBucketStartNs);
EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets2[0].mBucketEndNs);
- EXPECT_EQ(1ULL, buckets2[0].mDuration);
+ EXPECT_EQ(1LL, buckets2[0].mDuration);
}
TEST(DurationMetricTrackerTest, TestSumDurationWithUpgrade) {
* - [70,130]: All 60 secs
* - [130, 210]: Only 5 secs (event ended at 135sec)
*/
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
- uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
- uint64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
- uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+ int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+ int64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
+ int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
int tagId = 1;
* - [70,75]: 5 sec
* - [75,130]: 55 secs
*/
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
- uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
- uint64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
- uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+ int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+ int64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
+ int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
int tagId = 1;
TEST(DurationMetricTrackerTest, TestSumDurationAnomalyWithUpgrade) {
sp<AlarmMonitor> alarmMonitor;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
- uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
- uint64_t startTimeNs = bucketStartTimeNs + 1;
- uint64_t endTimeNs = startTimeNs + 65 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+ int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+ int64_t startTimeNs = bucketStartTimeNs + 1;
+ int64_t endTimeNs = startTimeNs + 65 * NS_PER_SEC;
int tagId = 1;
durationProducer.onMatchedLogEvent(2 /* stop index*/, end_event);
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - startTimeNs,
- (uint64_t)anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+ anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
}
TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade) {
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
- uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
- uint64_t startTimeNs = bucketStartTimeNs + 1;
- uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+ int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+ int64_t startTimeNs = bucketStartTimeNs + 1;
+ int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
int tagId = 1;
}
TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgradeInNextBucket) {
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
- uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
- uint64_t startTimeNs = bucketStartTimeNs + 1;
- uint64_t endTimeNs = startTimeNs + 115 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+ int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+ int64_t startTimeNs = bucketStartTimeNs + 1;
+ int64_t endTimeNs = startTimeNs + 115 * NS_PER_SEC;
int tagId = 1;
const ConfigKey kConfigKey(0, 12345);
TEST(EventMetricProducerTest, TestNoCondition) {
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
EventMetric metric;
metric.set_id(1);
}
TEST(EventMetricProducerTest, TestEventsWithNonSlicedCondition) {
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
EventMetric metric;
metric.set_id(1);
}
TEST(EventMetricProducerTest, TestEventsWithSlicedCondition) {
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
int tagId = 1;
int conditionTagId = 2;
const int64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
const int64_t bucket3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
const int64_t bucket4StartTimeNs = bucketStartTimeNs + 3 * bucketSizeNs;
-const uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+const int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
TEST(GaugeMetricProducerTest, TestNoCondition) {
GaugeMetric metric;
gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
EXPECT_EQ(0UL, (*gaugeProducer.mCurrentSlicedBucket).count(DEFAULT_METRIC_DIMENSION_KEY));
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
+ EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
EXPECT_EQ(eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
// Partial buckets are not sent to anomaly tracker.
EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
event2->write(10);
event2->init();
gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
- EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
+ EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ((uint64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ((int64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
// Partial buckets are not sent to anomaly tracker.
EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
event3->write(10);
event3->init();
gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event3);
- EXPECT_EQ(1UL, gaugeProducer.mCurrentBucketNum);
+ EXPECT_EQ(1L, gaugeProducer.mCurrentBucketNum);
EXPECT_EQ(2UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ((uint64_t)bucketStartTimeNs + bucketSizeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ((int64_t)bucketStartTimeNs + bucketSizeNs, gaugeProducer.mCurrentBucketStartTimeNs);
EXPECT_EQ(1, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
// Next event should trigger creation of new bucket.
event4->write(10);
event4->init();
gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event4);
- EXPECT_EQ(2UL, gaugeProducer.mCurrentBucketNum);
+ EXPECT_EQ(2L, gaugeProducer.mCurrentBucketNum);
EXPECT_EQ(3UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
EXPECT_EQ(2, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
}
gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
- EXPECT_EQ((uint64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
+ EXPECT_EQ((int64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(2, gaugeProducer.mCurrentSlicedBucket->begin()
->second.front()
const HashableDimensionKey conditionKey = getMockedDimensionKey(TagId, 4, "1");
const HashableDimensionKey key1 = getMockedDimensionKey(TagId, 1, "1");
const HashableDimensionKey key2 = getMockedDimensionKey(TagId, 1, "2");
-const uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+const int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
TEST(MaxDurationTrackerTest, TestSimpleMaxDuration) {
const MetricDimensionKey eventKey = getMockedMetricDimensionKey(TagId, 0, "1");
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
- uint64_t bucketNum = 0;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+ int64_t bucketNum = 0;
int64_t metricId = 1;
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
EXPECT_EQ(1u, buckets[eventKey].size());
- EXPECT_EQ(20ULL, buckets[eventKey][0].mDuration);
+ EXPECT_EQ(20LL, buckets[eventKey][0].mDuration);
}
TEST(MaxDurationTrackerTest, TestStopAll) {
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
- uint64_t bucketNum = 0;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+ int64_t bucketNum = 0;
int64_t metricId = 1;
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
- uint64_t bucketNum = 0;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+ int64_t bucketNum = 0;
int64_t metricId = 1;
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
- uint64_t bucketNum = 0;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+ int64_t bucketNum = 0;
int64_t metricId = 1;
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
Start in first bucket, stop in second bucket. Condition turns on and off in the first bucket
and again turns on and off in the second bucket.
*/
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
- uint64_t conditionStarts1 = bucketStartTimeNs + 11 * NS_PER_SEC;
- uint64_t conditionStops1 = bucketStartTimeNs + 14 * NS_PER_SEC;
- uint64_t conditionStarts2 = bucketStartTimeNs + bucketSizeNs + 5 * NS_PER_SEC;
- uint64_t conditionStops2 = conditionStarts2 + 10 * NS_PER_SEC;
- uint64_t eventStopTimeNs = conditionStops2 + 8 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
+ int64_t conditionStarts1 = bucketStartTimeNs + 11 * NS_PER_SEC;
+ int64_t conditionStops1 = bucketStartTimeNs + 14 * NS_PER_SEC;
+ int64_t conditionStarts2 = bucketStartTimeNs + bucketSizeNs + 5 * NS_PER_SEC;
+ int64_t conditionStops2 = conditionStarts2 + 10 * NS_PER_SEC;
+ int64_t eventStopTimeNs = conditionStops2 + 8 * NS_PER_SEC;
int64_t metricId = 1;
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
EXPECT_EQ(1U, buckets.size());
vector<DurationBucket> item = buckets.begin()->second;
EXPECT_EQ(1UL, item.size());
- EXPECT_EQ(13ULL * NS_PER_SEC, item[0].mDuration);
+ EXPECT_EQ((int64_t)(13LL * NS_PER_SEC), item[0].mDuration);
}
TEST(MaxDurationTrackerTest, TestAnomalyDetection) {
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = 13000000000;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = 13000000000;
int64_t durationTimeNs = 2 * 1000;
int64_t metricId = 1;
* dimension has already been running for 4 seconds. Thus, we have 40-4=36 seconds remaining
* before we trigger the anomaly.
*/
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 5 * NS_PER_SEC; // Condition is off at start.
- uint64_t conditionStarts1 = bucketStartTimeNs + 11 * NS_PER_SEC;
- uint64_t conditionStops1 = bucketStartTimeNs + 14 * NS_PER_SEC;
- uint64_t conditionStarts2 = bucketStartTimeNs + 20 * NS_PER_SEC;
- uint64_t eventStartTimeNs2 = conditionStarts2 - 4 * NS_PER_SEC;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 5 * NS_PER_SEC; // Condition is off at start.
+ int64_t conditionStarts1 = bucketStartTimeNs + 11 * NS_PER_SEC;
+ int64_t conditionStops1 = bucketStartTimeNs + 14 * NS_PER_SEC;
+ int64_t conditionStarts2 = bucketStartTimeNs + 20 * NS_PER_SEC;
+ int64_t eventStartTimeNs2 = conditionStarts2 - 4 * NS_PER_SEC;
int64_t metricId = 1;
Alert alert;
tracker.noteConditionChanged(key1, true, conditionStarts2);
EXPECT_EQ(1U, anomalyTracker->mAlarms.size());
auto alarm = anomalyTracker->mAlarms.begin()->second;
- uint64_t anomalyFireTimeSec = alarm->timestampSec;
+ int64_t anomalyFireTimeSec = alarm->timestampSec;
EXPECT_EQ(conditionStarts2 + 36 * NS_PER_SEC,
- (unsigned long long)anomalyFireTimeSec * NS_PER_SEC);
+ (long long)anomalyFireTimeSec * NS_PER_SEC);
// Now we test the calculation now that there's a refractory period.
// At the correct time, declare the anomaly. This will set a refractory period. Make sure it
std::unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> firedAlarms({alarm});
anomalyTracker->informAlarmsFired(anomalyFireTimeSec * NS_PER_SEC, firedAlarms);
EXPECT_EQ(0u, anomalyTracker->mAlarms.size());
- uint64_t refractoryPeriodEndsSec = anomalyFireTimeSec + refPeriodSec;
+ int64_t refractoryPeriodEndsSec = anomalyFireTimeSec + refPeriodSec;
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), refractoryPeriodEndsSec);
// Now stop and start again. Make sure the new predictAnomalyTimestampNs takes into account
// the refractory period correctly.
- uint64_t eventStopTimeNs = anomalyFireTimeSec * NS_PER_SEC + 10;
+ int64_t eventStopTimeNs = anomalyFireTimeSec * NS_PER_SEC + 10;
tracker.noteStop(key1, eventStopTimeNs, false);
tracker.noteStop(key2, eventStopTimeNs, false);
tracker.noteStart(key1, true, eventStopTimeNs + 1000000, conditionKey1);
// Anomaly is ongoing, but we're still in the refractory period.
EXPECT_EQ(1U, anomalyTracker->mAlarms.size());
alarm = anomalyTracker->mAlarms.begin()->second;
- EXPECT_EQ(refractoryPeriodEndsSec, (unsigned long long)(alarm->timestampSec));
+ EXPECT_EQ(refractoryPeriodEndsSec, (long long)(alarm->timestampSec));
// Makes sure it is correct after the refractory period is over.
tracker.noteStop(key1, eventStopTimeNs + 2000000, false);
- uint64_t justBeforeRefPeriodNs = (refractoryPeriodEndsSec - 2) * NS_PER_SEC;
+ int64_t justBeforeRefPeriodNs = (refractoryPeriodEndsSec - 2) * NS_PER_SEC;
tracker.noteStart(key1, true, justBeforeRefPeriodNs, conditionKey1);
alarm = anomalyTracker->mAlarms.begin()->second;
EXPECT_EQ(justBeforeRefPeriodNs + 40 * NS_PER_SEC,
* nested dimensions, are started for 8 seconds. When we stop, the other nested dimension has
* been started for 5 seconds. So we can only allow 35 more seconds from now.
*/
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs1 = bucketStartTimeNs + 5 * NS_PER_SEC; // Condition is off at start.
- uint64_t eventStopTimeNs1 = bucketStartTimeNs + 13 * NS_PER_SEC;
- uint64_t eventStartTimeNs2 = bucketStartTimeNs + 8 * NS_PER_SEC;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs1 = bucketStartTimeNs + 5 * NS_PER_SEC; // Condition is off at start.
+ int64_t eventStopTimeNs1 = bucketStartTimeNs + 13 * NS_PER_SEC;
+ int64_t eventStartTimeNs2 = bucketStartTimeNs + 8 * NS_PER_SEC;
int64_t metricId = 1;
Alert alert;
const HashableDimensionKey kConditionKey1 = getMockedDimensionKey(TagId, 1, "maps");
const HashableDimensionKey kEventKey1 = getMockedDimensionKey(TagId, 2, "maps");
const HashableDimensionKey kEventKey2 = getMockedDimensionKey(TagId, 3, "maps");
-const uint64_t bucketSizeNs = 30 * NS_PER_SEC;
+const int64_t bucketSizeNs = 30 * NS_PER_SEC;
TEST(OringDurationTrackerTest, TestDurationOverlap) {
const MetricDimensionKey eventKey = getMockedMetricDimensionKey(TagId, 0, "event");
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
- uint64_t durationTimeNs = 2 * 1000;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t durationTimeNs = 2 * 1000;
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
EXPECT_EQ(1u, buckets[eventKey].size());
- EXPECT_EQ(2003ULL, buckets[eventKey][0].mDuration);
+ EXPECT_EQ(2003LL, buckets[eventKey][0].mDuration);
}
TEST(OringDurationTrackerTest, TestStopAll) {
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
EXPECT_EQ(1u, buckets[eventKey].size());
- EXPECT_EQ(2003ULL, buckets[eventKey][0].mDuration);
+ EXPECT_EQ(2003LL, buckets[eventKey][0].mDuration);
}
TEST(OringDurationTrackerTest, TestCrossBucketBoundary) {
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
- uint64_t durationTimeNs = 2 * 1000;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t durationTimeNs = 2 * 1000;
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
- uint64_t durationTimeNs = 2 * 1000;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t durationTimeNs = 2 * 1000;
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
EXPECT_EQ(1u, buckets[eventKey].size());
- EXPECT_EQ(5ULL, buckets[eventKey][0].mDuration);
+ EXPECT_EQ(5LL, buckets[eventKey][0].mDuration);
}
TEST(OringDurationTrackerTest, TestDurationConditionChange2) {
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
- uint64_t durationTimeNs = 2 * 1000;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t durationTimeNs = 2 * 1000;
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
EXPECT_EQ(1u, buckets[eventKey].size());
- EXPECT_EQ(1005ULL, buckets[eventKey][0].mDuration);
+ EXPECT_EQ(1005LL, buckets[eventKey][0].mDuration);
}
TEST(OringDurationTrackerTest, TestDurationConditionChangeNested) {
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
- uint64_t bucketStartTimeNs = 10000000000;
- uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
+ int64_t bucketStartTimeNs = 10000000000;
+ int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 1;
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
EXPECT_EQ(1u, buckets[eventKey].size());
- EXPECT_EQ(15ULL, buckets[eventKey][0].mDuration);
+ EXPECT_EQ(15LL, buckets[eventKey][0].mDuration);
}
TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp) {
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
+ int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
sp<AlarmMonitor> alarmMonitor;
sp<DurationAnomalyTracker> anomalyTracker =
tracker.noteStop(DEFAULT_DIMENSION_KEY, eventStartTimeNs + 3, false);
EXPECT_EQ(0u, buckets[eventKey].size());
- uint64_t event1StartTimeNs = eventStartTimeNs + 10;
+ int64_t event1StartTimeNs = eventStartTimeNs + 10;
tracker.noteStart(kEventKey1, true, event1StartTimeNs, ConditionKey());
// No past buckets. The anomaly will happen in bucket #0.
EXPECT_EQ((long long)(event1StartTimeNs + alert.trigger_if_sum_gt() - 3),
tracker.predictAnomalyTimestampNs(*anomalyTracker, event1StartTimeNs));
- uint64_t event1StopTimeNs = eventStartTimeNs + bucketSizeNs + 10;
+ int64_t event1StopTimeNs = eventStartTimeNs + bucketSizeNs + 10;
tracker.flushIfNeeded(event1StopTimeNs, &buckets);
tracker.noteStop(kEventKey1, event1StopTimeNs, false);
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
EXPECT_EQ(1u, buckets[eventKey].size());
- EXPECT_EQ(3ULL + bucketStartTimeNs + bucketSizeNs - eventStartTimeNs - 10,
+ EXPECT_EQ(3LL + bucketStartTimeNs + bucketSizeNs - eventStartTimeNs - 10,
buckets[eventKey][0].mDuration);
const int64_t bucket0Duration = 3ULL + bucketStartTimeNs + bucketSizeNs - eventStartTimeNs - 10;
const int64_t bucket1Duration = eventStartTimeNs + 10 - bucketStartTimeNs;
// One past buckets. The anomaly will happen in bucket #1.
- uint64_t event2StartTimeNs = eventStartTimeNs + bucketSizeNs + 15;
+ int64_t event2StartTimeNs = eventStartTimeNs + bucketSizeNs + 15;
tracker.noteStart(kEventKey1, true, event2StartTimeNs, ConditionKey());
EXPECT_EQ((long long)(event2StartTimeNs + alert.trigger_if_sum_gt() - bucket0Duration -
bucket1Duration),
// Only one past buckets is applicable. Bucket +0 should be trashed. The anomaly will happen in
// bucket #2.
- uint64_t event3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs - 9 * NS_PER_SEC;
+ int64_t event3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs - 9 * NS_PER_SEC;
tracker.noteStart(kEventKey1, true, event3StartTimeNs, ConditionKey());
EXPECT_EQ((long long)(event3StartTimeNs + alert.trigger_if_sum_gt() - bucket1Duration - 1LL),
tracker.predictAnomalyTimestampNs(*anomalyTracker, event3StartTimeNs));
alert.set_num_buckets(1);
alert.set_refractory_period_secs(20);
- uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
- uint64_t bucketNum = 0;
+ int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
+ int64_t bucketNum = 0;
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
sp<AlarmMonitor> alarmMonitor;
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
bucketSizeNs, true, false, {anomalyTracker});
- uint64_t eventStartTimeNs = bucketStartTimeNs + 9 * NS_PER_SEC;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 9 * NS_PER_SEC;
tracker.noteStart(DEFAULT_DIMENSION_KEY, true, eventStartTimeNs, ConditionKey());
// Anomaly happens in the bucket #1.
EXPECT_EQ((long long)(bucketStartTimeNs + 14 * NS_PER_SEC),
EXPECT_EQ((long long)(bucketStartTimeNs + 34 * NS_PER_SEC) / NS_PER_SEC,
anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY));
- uint64_t event2StartTimeNs = bucketStartTimeNs + 22 * NS_PER_SEC;
+ int64_t event2StartTimeNs = bucketStartTimeNs + 22 * NS_PER_SEC;
EXPECT_EQ((long long)(bucketStartTimeNs + 34 * NS_PER_SEC) / NS_PER_SEC,
anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY));
EXPECT_EQ((long long)(bucketStartTimeNs + 35 * NS_PER_SEC),
// Test the cases where the refractory period is smaller than the bucket size, longer than
// the bucket size, and longer than 2x of the anomaly detection window.
for (int j = 0; j < 3; j++) {
- uint64_t thresholdNs = j * bucketSizeNs + 5 * NS_PER_SEC;
+ int64_t thresholdNs = j * bucketSizeNs + 5 * NS_PER_SEC;
for (int i = 0; i <= 7; ++i) {
vector<Matcher> dimensionInCondition;
Alert alert;
alert.set_refractory_period_secs(
bucketSizeNs / NS_PER_SEC / 2 + i * bucketSizeNs / NS_PER_SEC);
- uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
- uint64_t bucketNum = 101;
+ int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
+ int64_t bucketNum = 101;
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
sp<AlarmMonitor> alarmMonitor;
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
bucketSizeNs, true, false, {anomalyTracker});
- uint64_t eventStartTimeNs = bucketStartTimeNs + 9 * NS_PER_SEC;
+ int64_t eventStartTimeNs = bucketStartTimeNs + 9 * NS_PER_SEC;
tracker.noteStart(DEFAULT_DIMENSION_KEY, true, eventStartTimeNs, ConditionKey());
EXPECT_EQ((long long)(eventStartTimeNs + thresholdNs),
tracker.predictAnomalyTimestampNs(*anomalyTracker, eventStartTimeNs));
- uint64_t eventStopTimeNs = eventStartTimeNs + thresholdNs + NS_PER_SEC;
+ int64_t eventStopTimeNs = eventStartTimeNs + thresholdNs + NS_PER_SEC;
tracker.noteStop(DEFAULT_DIMENSION_KEY, eventStopTimeNs, false);
- uint64_t refractoryPeriodEndSec =
+ int64_t refractoryPeriodEndSec =
anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY);
- EXPECT_EQ((long long)(eventStopTimeNs) / NS_PER_SEC + alert.refractory_period_secs(),
+ EXPECT_EQ(eventStopTimeNs / (int64_t)NS_PER_SEC + alert.refractory_period_secs(),
refractoryPeriodEndSec);
// Acquire and release a wakelock in the next bucket.
- uint64_t event2StartTimeNs = eventStopTimeNs + bucketSizeNs;
+ int64_t event2StartTimeNs = eventStopTimeNs + bucketSizeNs;
tracker.noteStart(DEFAULT_DIMENSION_KEY, true, event2StartTimeNs, ConditionKey());
- uint64_t event2StopTimeNs = event2StartTimeNs + 4 * NS_PER_SEC;
+ int64_t event2StopTimeNs = event2StartTimeNs + 4 * NS_PER_SEC;
tracker.noteStop(DEFAULT_DIMENSION_KEY, event2StopTimeNs, false);
// Test the alarm prediction works well when seeing another wakelock start event.
for (int k = 0; k <= 2; ++k) {
- uint64_t event3StartTimeNs = event2StopTimeNs + NS_PER_SEC + k * bucketSizeNs;
- uint64_t alarmTimestampNs =
+ int64_t event3StartTimeNs = event2StopTimeNs + NS_PER_SEC + k * bucketSizeNs;
+ int64_t alarmTimestampNs =
tracker.predictAnomalyTimestampNs(*anomalyTracker, event3StartTimeNs);
EXPECT_GT(alarmTimestampNs, 0u);
EXPECT_GE(alarmTimestampNs, event3StartTimeNs);
- EXPECT_GE(alarmTimestampNs, refractoryPeriodEndSec * NS_PER_SEC);
+ EXPECT_GE(alarmTimestampNs, refractoryPeriodEndSec *(int64_t) NS_PER_SEC);
}
}
}
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
- uint64_t bucketNum = 0;
- uint64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
+ int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
+ int64_t bucketNum = 0;
+ int64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
sp<AlarmMonitor> alarmMonitor;
sp<DurationAnomalyTracker> anomalyTracker =
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
ConditionKey conkey;
conkey[StringToId("APP_BACKGROUND")] = kConditionKey1;
- uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
- uint64_t bucketSizeNs = 30 * NS_PER_SEC;
+ int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
+ int64_t bucketSizeNs = 30 * NS_PER_SEC;
sp<AlarmMonitor> alarmMonitor;
sp<DurationAnomalyTracker> anomalyTracker =
valueProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
shared_ptr<LogEvent> event2 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 59 * NS_PER_SEC);
event2->write(1);
event2->init();
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
// Next value should create a new bucket.
shared_ptr<LogEvent> event3 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 65 * NS_PER_SEC);
event3->init();
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event3);
EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ((uint64_t)bucketStartTimeNs + bucketSizeNs, valueProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, valueProducer.mCurrentBucketStartTimeNs);
}
TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
valueProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
EXPECT_EQ(20L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mValue);
allData.clear();
allData.push_back(event);
valueProducer.onDataPulled(allData);
EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
- EXPECT_EQ((uint64_t)bucket2StartTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(bucket2StartTimeNs, valueProducer.mCurrentBucketStartTimeNs);
EXPECT_EQ(30L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mValue);
}
[](const sp<IStatsCompanionService>&){});
sp<StatsLogProcessor> processor = new StatsLogProcessor(
uidMap, anomalyAlarmMonitor, periodicAlarmMonitor, timeBaseSec, [](const ConfigKey&){});
- processor->OnConfigUpdated(timeBaseSec, key, config);
+ processor->OnConfigUpdated(timeBaseSec * NS_PER_SEC, key, config);
return processor;
}