summaryrefslogtreecommitdiff
path: root/cmds
diff options
context:
space:
mode:
Diffstat (limited to 'cmds')
-rw-r--r--cmds/bmgr/src/com/android/commands/bmgr/Bmgr.java260
-rw-r--r--cmds/bootanimation/Android.mk7
-rw-r--r--cmds/bootanimation/BootAnimation.cpp4
-rw-r--r--cmds/bootanimation/BootAnimation.h1
-rw-r--r--cmds/bootanimation/BootAnimationUtil.cpp43
-rw-r--r--cmds/bootanimation/BootAnimationUtil.h7
-rw-r--r--cmds/bootanimation/audioplay.cpp81
-rw-r--r--cmds/bootanimation/audioplay.h5
-rw-r--r--cmds/bootanimation/bootanimation_main.cpp111
-rw-r--r--cmds/bootanimation/iot/Android.mk43
-rw-r--r--cmds/bootanimation/iot/BootAction.cpp33
-rw-r--r--cmds/bootanimation/iot/BootAction.h9
-rw-r--r--cmds/bootanimation/iot/BootParameters.cpp179
-rw-r--r--cmds/bootanimation/iot/BootParameters.h49
-rw-r--r--cmds/bootanimation/iot/BootParameters_test.cpp263
-rw-r--r--cmds/bootanimation/iot/iotbootanimation_main.cpp14
-rw-r--r--cmds/content/src/com/android/commands/content/Content.java2
-rw-r--r--cmds/dpm/src/com/android/commands/dpm/Dpm.java19
-rw-r--r--cmds/hid/jni/com_android_commands_hid_Device.cpp51
-rw-r--r--cmds/hid/jni/com_android_commands_hid_Device.h6
-rw-r--r--cmds/incident_helper/src/main.cpp3
-rw-r--r--cmds/incident_helper/src/parsers/PageTypeInfoParser.cpp9
-rw-r--r--cmds/incident_helper/testdata/pagetypeinfo.txt4
-rw-r--r--cmds/incident_helper/tests/PageTypeInfoParser_test.cpp4
-rw-r--r--cmds/incidentd/src/FdBuffer.cpp14
-rw-r--r--cmds/incidentd/src/IncidentService.cpp18
-rw-r--r--cmds/incidentd/src/PrivacyBuffer.cpp4
-rw-r--r--cmds/incidentd/src/Section.cpp122
-rw-r--r--cmds/incidentd/src/Section.h6
-rw-r--r--cmds/incidentd/src/Throttler.cpp11
-rw-r--r--cmds/incidentd/tests/Reporter_test.cpp2
-rw-r--r--cmds/incidentd/tests/Section_test.cpp8
-rw-r--r--cmds/input/src/com/android/commands/input/Input.java3
-rw-r--r--cmds/statsd/Android.mk27
-rw-r--r--cmds/statsd/benchmark/metric_util.cpp7
-rw-r--r--cmds/statsd/src/FieldValue.cpp147
-rw-r--r--cmds/statsd/src/FieldValue.h27
-rw-r--r--cmds/statsd/src/HashableDimensionKey.cpp4
-rw-r--r--cmds/statsd/src/StatsLogProcessor.cpp57
-rw-r--r--cmds/statsd/src/StatsLogProcessor.h17
-rw-r--r--cmds/statsd/src/StatsService.cpp62
-rw-r--r--cmds/statsd/src/StatsService.h8
-rw-r--r--cmds/statsd/src/anomaly/AlarmMonitor.cpp2
-rw-r--r--cmds/statsd/src/anomaly/AnomalyTracker.cpp8
-rw-r--r--cmds/statsd/src/anomaly/subscriber_util.cpp7
-rw-r--r--cmds/statsd/src/atoms.proto244
-rw-r--r--cmds/statsd/src/condition/condition_util.cpp6
-rw-r--r--cmds/statsd/src/config/ConfigManager.h3
-rw-r--r--cmds/statsd/src/external/Perfetto.cpp2
-rw-r--r--cmds/statsd/src/external/Perfprofd.cpp74
-rw-r--r--cmds/statsd/src/external/Perfprofd.h38
-rw-r--r--cmds/statsd/src/external/ResourceHealthManagerPuller.cpp2
-rw-r--r--cmds/statsd/src/external/StatsPuller.cpp4
-rw-r--r--cmds/statsd/src/external/StatsPuller.h2
-rw-r--r--cmds/statsd/src/external/StatsPullerManager.cpp (renamed from cmds/statsd/src/external/StatsPullerManagerImpl.cpp)44
-rw-r--r--cmds/statsd/src/external/StatsPullerManager.h105
-rw-r--r--cmds/statsd/src/external/StatsPullerManagerImpl.h102
-rw-r--r--cmds/statsd/src/external/puller_util.cpp10
-rw-r--r--cmds/statsd/src/guardrail/StatsdStats.cpp47
-rw-r--r--cmds/statsd/src/guardrail/StatsdStats.h15
-rw-r--r--cmds/statsd/src/logd/LogEvent.cpp49
-rw-r--r--cmds/statsd/src/logd/LogEvent.h9
-rw-r--r--cmds/statsd/src/logd/LogListener.cpp12
-rw-r--r--cmds/statsd/src/logd/LogListener.h3
-rw-r--r--cmds/statsd/src/logd/LogReader.cpp129
-rw-r--r--cmds/statsd/src/logd/LogReader.h69
-rw-r--r--cmds/statsd/src/main.cpp66
-rw-r--r--cmds/statsd/src/matchers/LogMatchingTracker.h2
-rw-r--r--cmds/statsd/src/metrics/CountMetricProducer.cpp9
-rw-r--r--cmds/statsd/src/metrics/CountMetricProducer.h5
-rw-r--r--cmds/statsd/src/metrics/DurationMetricProducer.cpp12
-rw-r--r--cmds/statsd/src/metrics/DurationMetricProducer.h4
-rw-r--r--cmds/statsd/src/metrics/EventMetricProducer.h1
-rw-r--r--cmds/statsd/src/metrics/GaugeMetricProducer.cpp56
-rw-r--r--cmds/statsd/src/metrics/GaugeMetricProducer.h39
-rw-r--r--cmds/statsd/src/metrics/MetricsManager.cpp18
-rw-r--r--cmds/statsd/src/metrics/MetricsManager.h8
-rw-r--r--cmds/statsd/src/metrics/ValueMetricProducer.cpp192
-rw-r--r--cmds/statsd/src/metrics/ValueMetricProducer.h53
-rw-r--r--cmds/statsd/src/metrics/duration_helper/DurationTracker.h1
-rw-r--r--cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp1
-rw-r--r--cmds/statsd/src/metrics/metrics_manager_util.cpp73
-rw-r--r--cmds/statsd/src/metrics/metrics_manager_util.h13
-rw-r--r--cmds/statsd/src/packages/UidMap.cpp2
-rw-r--r--cmds/statsd/src/packages/UidMap.h3
-rw-r--r--cmds/statsd/src/perfetto/perfetto_config.proto60
-rwxr-xr-xcmds/statsd/src/socket/StatsSocketListener.cpp20
-rw-r--r--cmds/statsd/src/stats_log.proto8
-rw-r--r--cmds/statsd/src/stats_util.h1
-rw-r--r--cmds/statsd/src/statsd_config.proto24
-rw-r--r--cmds/statsd/src/storage/StorageManager.cpp2
-rw-r--r--cmds/statsd/tests/FieldValue_test.cpp5
-rw-r--r--cmds/statsd/tests/LogEvent_test.cpp90
-rw-r--r--cmds/statsd/tests/MetricsManager_test.cpp93
-rw-r--r--cmds/statsd/tests/StatsLogProcessor_test.cpp181
-rw-r--r--cmds/statsd/tests/UidMap_test.cpp5
-rw-r--r--cmds/statsd/tests/anomaly/AnomalyTracker_test.cpp16
-rw-r--r--cmds/statsd/tests/condition/CombinationConditionTracker_test.cpp6
-rw-r--r--cmds/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp24
-rw-r--r--cmds/statsd/tests/e2e/MetricConditionLink_e2e_test.cpp1
-rw-r--r--cmds/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp38
-rw-r--r--cmds/statsd/tests/metrics/CountMetricProducer_test.cpp31
-rw-r--r--cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp38
-rw-r--r--cmds/statsd/tests/metrics/EventMetricProducer_test.cpp4
-rw-r--r--cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp182
-rw-r--r--cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp511
-rw-r--r--cmds/statsd/tests/statsd_test_util.cpp6
-rw-r--r--cmds/uiautomator/library/testrunner-src/com/android/uiautomator/core/ShellUiAutomatorBridge.java2
108 files changed, 2933 insertions, 1710 deletions
diff --git a/cmds/bmgr/src/com/android/commands/bmgr/Bmgr.java b/cmds/bmgr/src/com/android/commands/bmgr/Bmgr.java
index 84a04e5ad6e3..a826ec7c717e 100644
--- a/cmds/bmgr/src/com/android/commands/bmgr/Bmgr.java
+++ b/cmds/bmgr/src/com/android/commands/bmgr/Bmgr.java
@@ -16,10 +16,13 @@
package com.android.commands.bmgr;
+import android.annotation.IntDef;
import android.app.backup.BackupManager;
+import android.app.backup.BackupManagerMonitor;
import android.app.backup.BackupProgress;
import android.app.backup.BackupTransport;
import android.app.backup.IBackupManager;
+import android.app.backup.IBackupManagerMonitor;
import android.app.backup.IBackupObserver;
import android.app.backup.IRestoreObserver;
import android.app.backup.IRestoreSession;
@@ -28,6 +31,7 @@ import android.app.backup.RestoreSet;
import android.content.ComponentName;
import android.content.pm.IPackageManager;
import android.content.pm.PackageInfo;
+import android.os.Bundle;
import android.os.RemoteException;
import android.os.ServiceManager;
import android.os.SystemClock;
@@ -36,10 +40,13 @@ import android.util.ArraySet;
import com.android.internal.annotations.GuardedBy;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
+import java.util.Set;
import java.util.concurrent.CountDownLatch;
public final class Bmgr {
@@ -71,9 +78,7 @@ public final class Bmgr {
return;
}
- mBmgr = IBackupManager.Stub.asInterface(ServiceManager.getService("backup"));
- if (mBmgr == null) {
- System.err.println(BMGR_NOT_RUNNING_ERR);
+ if (!isBmgrActive()) {
return;
}
@@ -150,6 +155,27 @@ public final class Bmgr {
showUsage();
}
+ private boolean isBmgrActive() {
+ mBmgr = IBackupManager.Stub.asInterface(ServiceManager.getService("backup"));
+ if (mBmgr == null) {
+ System.err.println(BMGR_NOT_RUNNING_ERR);
+ return false;
+ }
+
+ try {
+ if (!mBmgr.isBackupServiceActive(UserHandle.USER_SYSTEM)) {
+ System.err.println(BMGR_NOT_RUNNING_ERR);
+ return false;
+ }
+ } catch (RemoteException e) {
+ System.err.println(e.toString());
+ System.err.println(BMGR_NOT_RUNNING_ERR);
+ return false;
+ }
+
+ return true;
+ }
+
private String enableToString(boolean enabled) {
return enabled ? "enabled" : "disabled";
}
@@ -228,7 +254,7 @@ public final class Bmgr {
}
// IBackupObserver generically usable for any backup/init operation
- abstract class Observer extends IBackupObserver.Stub {
+ private static abstract class Observer extends IBackupObserver.Stub {
private final Object trigger = new Object();
@GuardedBy("trigger")
@@ -276,7 +302,7 @@ public final class Bmgr {
}
}
- class BackupObserver extends Observer {
+ private static class BackupObserver extends Observer {
@Override
public void onUpdate(String currentPackage, BackupProgress backupProgress) {
super.onUpdate(currentPackage, backupProgress);
@@ -328,7 +354,7 @@ public final class Bmgr {
}
}
- private void backupNowAllPackages(boolean nonIncrementalBackup) {
+ private void backupNowAllPackages(boolean nonIncrementalBackup, @Monitor int monitorState) {
int userId = UserHandle.USER_SYSTEM;
IPackageManager mPm =
IPackageManager.Stub.asInterface(ServiceManager.getService("package"));
@@ -353,20 +379,27 @@ public final class Bmgr {
System.err.println(e.toString());
System.err.println(BMGR_NOT_RUNNING_ERR);
}
- backupNowPackages(Arrays.asList(filteredPackages), nonIncrementalBackup);
+ backupNowPackages(Arrays.asList(filteredPackages), nonIncrementalBackup, monitorState);
}
}
- private void backupNowPackages(List<String> packages, boolean nonIncrementalBackup) {
+ private void backupNowPackages(
+ List<String> packages, boolean nonIncrementalBackup, @Monitor int monitorState) {
int flags = 0;
if (nonIncrementalBackup) {
flags |= BackupManager.FLAG_NON_INCREMENTAL_BACKUP;
}
try {
BackupObserver observer = new BackupObserver();
- // TODO: implement monitor here?
- int err = mBmgr.requestBackup(packages.toArray(new String[packages.size()]), observer,
- null, flags);
+ BackupMonitor monitor =
+ (monitorState != Monitor.OFF)
+ ? new BackupMonitor(monitorState == Monitor.VERBOSE)
+ : null;
+ int err = mBmgr.requestBackup(
+ packages.toArray(new String[packages.size()]),
+ observer,
+ monitor,
+ flags);
if (err == 0) {
// Off and running -- wait for the backup to complete
observer.waitForCompletion();
@@ -383,6 +416,7 @@ public final class Bmgr {
String pkg;
boolean backupAll = false;
boolean nonIncrementalBackup = false;
+ @Monitor int monitor = Monitor.OFF;
ArrayList<String> allPkgs = new ArrayList<String>();
while ((pkg = nextArg()) != null) {
if (pkg.equals("--all")) {
@@ -391,6 +425,10 @@ public final class Bmgr {
nonIncrementalBackup = true;
} else if (pkg.equals("--incremental")) {
nonIncrementalBackup = false;
+ } else if (pkg.equals("--monitor")) {
+ monitor = Monitor.NORMAL;
+ } else if (pkg.equals("--monitor-verbose")) {
+ monitor = Monitor.VERBOSE;
} else {
if (!allPkgs.contains(pkg)) {
allPkgs.add(pkg);
@@ -401,14 +439,14 @@ public final class Bmgr {
if (allPkgs.size() == 0) {
System.out.println("Running " + (nonIncrementalBackup ? "non-" : "") +
"incremental backup for all packages.");
- backupNowAllPackages(nonIncrementalBackup);
+ backupNowAllPackages(nonIncrementalBackup, monitor);
} else {
System.err.println("Provide only '--all' flag or list of packages.");
}
} else if (allPkgs.size() > 0) {
System.out.println("Running " + (nonIncrementalBackup ? "non-" : "") +
"incremental backup for " + allPkgs.size() +" requested packages.");
- backupNowPackages(allPkgs, nonIncrementalBackup);
+ backupNowPackages(allPkgs, nonIncrementalBackup, monitor);
} else {
System.err.println("Provide '--all' flag or list of packages.");
}
@@ -704,34 +742,11 @@ public final class Bmgr {
return;
}
}
-
- System.out.println("done");
}
private void doRestorePackage(String pkg) {
- try {
- mRestore = mBmgr.beginRestoreSession(pkg, null);
- if (mRestore == null) {
- System.err.println(BMGR_NOT_RUNNING_ERR);
- return;
- }
-
- RestoreObserver observer = new RestoreObserver();
- // TODO implement monitor here
- int err = mRestore.restorePackage(pkg, observer, null );
- if (err == 0) {
- // Off and running -- wait for the restore to complete
- observer.waitForCompletion();
- } else {
- System.err.println("Unable to restore package " + pkg);
- }
-
- // And finally shut down the session
- mRestore.endRestoreSession();
- } catch (RemoteException e) {
- System.err.println(e.toString());
- System.err.println(BMGR_NOT_RUNNING_ERR);
- }
+ System.err.println("The syntax 'restore <package>' is no longer supported, please use ");
+ System.err.println("'restore <token> <package>'.");
}
private void doRestoreAll(long token, HashSet<String> filter) {
@@ -784,6 +799,8 @@ public final class Bmgr {
// once the restore has finished, close down the session and we're done
mRestore.endRestoreSession();
+
+ System.out.println("done");
} catch (RemoteException e) {
System.err.println(e.toString());
System.err.println(BMGR_NOT_RUNNING_ERR);
@@ -823,12 +840,12 @@ public final class Bmgr {
System.err.println(" bmgr transport WHICH|-c WHICH_COMPONENT");
System.err.println(" bmgr restore TOKEN");
System.err.println(" bmgr restore TOKEN PACKAGE...");
- System.err.println(" bmgr restore PACKAGE");
System.err.println(" bmgr run");
System.err.println(" bmgr wipe TRANSPORT PACKAGE");
System.err.println(" bmgr fullbackup PACKAGE...");
- System.err.println(" bmgr backupnow --all|PACKAGE...");
+ System.err.println(" bmgr backupnow [--monitor|--monitor-verbose] --all|PACKAGE...");
System.err.println(" bmgr cancel backups");
+ System.err.println(" bmgr init TRANSPORT...");
System.err.println("");
System.err.println("The 'backup' command schedules a backup pass for the named package.");
System.err.println("Note that the backup pass will effectively be a no-op if the package");
@@ -867,10 +884,6 @@ public final class Bmgr {
System.err.println("'restore' operation supplying only a token, but applies a filter to the");
System.err.println("set of applications to be restored.");
System.err.println("");
- System.err.println("The 'restore' command when given just a package name intiates a restore of");
- System.err.println("just that one package according to the restore set selection algorithm");
- System.err.println("used by the RestoreSession.restorePackage() method.");
- System.err.println("");
System.err.println("The 'run' command causes any scheduled backup operation to be initiated");
System.err.println("immediately, without the usual waiting period for batching together");
System.err.println("data changes.");
@@ -885,9 +898,168 @@ public final class Bmgr {
System.err.println("");
System.err.println("The 'backupnow' command runs an immediate backup for one or more packages.");
System.err.println(" --all flag runs backup for all eligible packages.");
+ System.err.println(" --monitor flag prints monitor events.");
+ System.err.println(" --monitor-verbose flag prints monitor events with all keys.");
System.err.println("For each package it will run key/value or full data backup ");
System.err.println("depending on the package's manifest declarations.");
System.err.println("The data is sent via the currently active transport.");
+ System.err.println("");
System.err.println("The 'cancel backups' command cancels all running backups.");
+ System.err.println("");
+ System.err.println("The 'init' command initializes the given transports, wiping all data");
+ System.err.println("from their backing data stores.");
+ }
+
+ private static class BackupMonitor extends IBackupManagerMonitor.Stub {
+ private final boolean mVerbose;
+
+ private BackupMonitor(boolean verbose) {
+ mVerbose = verbose;
+ }
+
+ @Override
+ public void onEvent(Bundle event) throws RemoteException {
+ StringBuilder out = new StringBuilder();
+ int id = event.getInt(BackupManagerMonitor.EXTRA_LOG_EVENT_ID);
+ int category = event.getInt(BackupManagerMonitor.EXTRA_LOG_EVENT_CATEGORY);
+ out.append("=> Event{").append(eventCategoryToString(category));
+ out.append(" / ").append(eventIdToString(id));
+ String packageName = event.getString(BackupManagerMonitor.EXTRA_LOG_EVENT_PACKAGE_NAME);
+ if (packageName != null) {
+ out.append(" : package = ").append(packageName);
+ if (event.containsKey(BackupManagerMonitor.EXTRA_LOG_EVENT_PACKAGE_LONG_VERSION)) {
+ long version =
+ event.getLong(
+ BackupManagerMonitor.EXTRA_LOG_EVENT_PACKAGE_LONG_VERSION);
+ out.append("(v").append(version).append(")");
+ }
+ }
+ if (mVerbose) {
+ Set<String> remainingKeys = new ArraySet<>(event.keySet());
+ remainingKeys.remove(BackupManagerMonitor.EXTRA_LOG_EVENT_ID);
+ remainingKeys.remove(BackupManagerMonitor.EXTRA_LOG_EVENT_CATEGORY);
+ remainingKeys.remove(BackupManagerMonitor.EXTRA_LOG_EVENT_PACKAGE_NAME);
+ remainingKeys.remove(BackupManagerMonitor.EXTRA_LOG_EVENT_PACKAGE_LONG_VERSION);
+ remainingKeys.remove(BackupManagerMonitor.EXTRA_LOG_EVENT_PACKAGE_VERSION);
+ if (!remainingKeys.isEmpty()) {
+ out.append(", other keys =");
+ for (String key : remainingKeys) {
+ out.append(" ").append(key);
+ }
+ }
+ }
+ out.append("}");
+ System.out.println(out.toString());
+ }
+ }
+
+ private static String eventCategoryToString(int eventCategory) {
+ switch (eventCategory) {
+ case BackupManagerMonitor.LOG_EVENT_CATEGORY_TRANSPORT:
+ return "TRANSPORT";
+ case BackupManagerMonitor.LOG_EVENT_CATEGORY_AGENT:
+ return "AGENT";
+ case BackupManagerMonitor.LOG_EVENT_CATEGORY_BACKUP_MANAGER_POLICY:
+ return "BACKUP_MANAGER_POLICY";
+ default:
+ return "UNKNOWN_CATEGORY";
+ }
+ }
+
+ private static String eventIdToString(int eventId) {
+ switch (eventId) {
+ case BackupManagerMonitor.LOG_EVENT_ID_FULL_BACKUP_CANCEL:
+ return "FULL_BACKUP_CANCEL";
+ case BackupManagerMonitor.LOG_EVENT_ID_ILLEGAL_KEY:
+ return "ILLEGAL_KEY";
+ case BackupManagerMonitor.LOG_EVENT_ID_NO_DATA_TO_SEND:
+ return "NO_DATA_TO_SEND";
+ case BackupManagerMonitor.LOG_EVENT_ID_PACKAGE_INELIGIBLE:
+ return "PACKAGE_INELIGIBLE";
+ case BackupManagerMonitor.LOG_EVENT_ID_PACKAGE_KEY_VALUE_PARTICIPANT:
+ return "PACKAGE_KEY_VALUE_PARTICIPANT";
+ case BackupManagerMonitor.LOG_EVENT_ID_PACKAGE_STOPPED:
+ return "PACKAGE_STOPPED";
+ case BackupManagerMonitor.LOG_EVENT_ID_PACKAGE_NOT_FOUND:
+ return "PACKAGE_NOT_FOUND";
+ case BackupManagerMonitor.LOG_EVENT_ID_BACKUP_DISABLED:
+ return "BACKUP_DISABLED";
+ case BackupManagerMonitor.LOG_EVENT_ID_DEVICE_NOT_PROVISIONED:
+ return "DEVICE_NOT_PROVISIONED";
+ case BackupManagerMonitor.LOG_EVENT_ID_PACKAGE_TRANSPORT_NOT_PRESENT:
+ return "PACKAGE_TRANSPORT_NOT_PRESENT";
+ case BackupManagerMonitor.LOG_EVENT_ID_ERROR_PREFLIGHT:
+ return "ERROR_PREFLIGHT";
+ case BackupManagerMonitor.LOG_EVENT_ID_QUOTA_HIT_PREFLIGHT:
+ return "QUOTA_HIT_PREFLIGHT";
+ case BackupManagerMonitor.LOG_EVENT_ID_EXCEPTION_FULL_BACKUP:
+ return "EXCEPTION_FULL_BACKUP";
+ case BackupManagerMonitor.LOG_EVENT_ID_KEY_VALUE_BACKUP_CANCEL:
+ return "KEY_VALUE_BACKUP_CANCEL";
+ case BackupManagerMonitor.LOG_EVENT_ID_NO_RESTORE_METADATA_AVAILABLE:
+ return "NO_RESTORE_METADATA_AVAILABLE";
+ case BackupManagerMonitor.LOG_EVENT_ID_NO_PM_METADATA_RECEIVED:
+ return "NO_PM_METADATA_RECEIVED";
+ case BackupManagerMonitor.LOG_EVENT_ID_PM_AGENT_HAS_NO_METADATA:
+ return "PM_AGENT_HAS_NO_METADATA";
+ case BackupManagerMonitor.LOG_EVENT_ID_LOST_TRANSPORT:
+ return "LOST_TRANSPORT";
+ case BackupManagerMonitor.LOG_EVENT_ID_PACKAGE_NOT_PRESENT:
+ return "PACKAGE_NOT_PRESENT";
+ case BackupManagerMonitor.LOG_EVENT_ID_RESTORE_VERSION_HIGHER:
+ return "RESTORE_VERSION_HIGHER";
+ case BackupManagerMonitor.LOG_EVENT_ID_APP_HAS_NO_AGENT:
+ return "APP_HAS_NO_AGENT";
+ case BackupManagerMonitor.LOG_EVENT_ID_SIGNATURE_MISMATCH:
+ return "SIGNATURE_MISMATCH";
+ case BackupManagerMonitor.LOG_EVENT_ID_CANT_FIND_AGENT:
+ return "CANT_FIND_AGENT";
+ case BackupManagerMonitor.LOG_EVENT_ID_KEY_VALUE_RESTORE_TIMEOUT:
+ return "KEY_VALUE_RESTORE_TIMEOUT";
+ case BackupManagerMonitor.LOG_EVENT_ID_RESTORE_ANY_VERSION:
+ return "RESTORE_ANY_VERSION";
+ case BackupManagerMonitor.LOG_EVENT_ID_VERSIONS_MATCH:
+ return "VERSIONS_MATCH";
+ case BackupManagerMonitor.LOG_EVENT_ID_VERSION_OF_BACKUP_OLDER:
+ return "VERSION_OF_BACKUP_OLDER";
+ case BackupManagerMonitor.LOG_EVENT_ID_FULL_RESTORE_SIGNATURE_MISMATCH:
+ return "FULL_RESTORE_SIGNATURE_MISMATCH";
+ case BackupManagerMonitor.LOG_EVENT_ID_SYSTEM_APP_NO_AGENT:
+ return "SYSTEM_APP_NO_AGENT";
+ case BackupManagerMonitor.LOG_EVENT_ID_FULL_RESTORE_ALLOW_BACKUP_FALSE:
+ return "FULL_RESTORE_ALLOW_BACKUP_FALSE";
+ case BackupManagerMonitor.LOG_EVENT_ID_APK_NOT_INSTALLED:
+ return "APK_NOT_INSTALLED";
+ case BackupManagerMonitor.LOG_EVENT_ID_CANNOT_RESTORE_WITHOUT_APK:
+ return "CANNOT_RESTORE_WITHOUT_APK";
+ case BackupManagerMonitor.LOG_EVENT_ID_MISSING_SIGNATURE:
+ return "MISSING_SIGNATURE";
+ case BackupManagerMonitor.LOG_EVENT_ID_EXPECTED_DIFFERENT_PACKAGE:
+ return "EXPECTED_DIFFERENT_PACKAGE";
+ case BackupManagerMonitor.LOG_EVENT_ID_UNKNOWN_VERSION:
+ return "UNKNOWN_VERSION";
+ case BackupManagerMonitor.LOG_EVENT_ID_FULL_RESTORE_TIMEOUT:
+ return "FULL_RESTORE_TIMEOUT";
+ case BackupManagerMonitor.LOG_EVENT_ID_CORRUPT_MANIFEST:
+ return "CORRUPT_MANIFEST";
+ case BackupManagerMonitor.LOG_EVENT_ID_WIDGET_METADATA_MISMATCH:
+ return "WIDGET_METADATA_MISMATCH";
+ case BackupManagerMonitor.LOG_EVENT_ID_WIDGET_UNKNOWN_VERSION:
+ return "WIDGET_UNKNOWN_VERSION";
+ case BackupManagerMonitor.LOG_EVENT_ID_NO_PACKAGES:
+ return "NO_PACKAGES";
+ case BackupManagerMonitor.LOG_EVENT_ID_TRANSPORT_IS_NULL:
+ return "TRANSPORT_IS_NULL";
+ default:
+ return "UNKNOWN_ID";
+ }
+ }
+
+ @IntDef({Monitor.OFF, Monitor.NORMAL, Monitor.VERBOSE})
+ @Retention(RetentionPolicy.SOURCE)
+ private @interface Monitor {
+ int OFF = 0;
+ int NORMAL = 1;
+ int VERBOSE = 2;
}
}
diff --git a/cmds/bootanimation/Android.mk b/cmds/bootanimation/Android.mk
index e5d35b3b8a0e..6943dab0acbe 100644
--- a/cmds/bootanimation/Android.mk
+++ b/cmds/bootanimation/Android.mk
@@ -27,7 +27,12 @@ ifeq ($(PRODUCT_IOT),true)
LOCAL_SHARED_LIBRARIES += \
libandroidthings \
+ libandroidthings_protos \
libchrome \
+ libprotobuf-cpp-lite \
+
+LOCAL_STATIC_LIBRARIES += \
+ libjsoncpp
LOCAL_SRC_FILES += \
iot/iotbootanimation_main.cpp \
@@ -94,3 +99,5 @@ LOCAL_32_BIT_ONLY := true
endif
include ${BUILD_SHARED_LIBRARY}
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/cmds/bootanimation/BootAnimation.cpp b/cmds/bootanimation/BootAnimation.cpp
index ed6c25dc49c3..e915cc811676 100644
--- a/cmds/bootanimation/BootAnimation.cpp
+++ b/cmds/bootanimation/BootAnimation.cpp
@@ -860,12 +860,12 @@ bool BootAnimation::movie()
mTimeCheckThread = nullptr;
}
- releaseAnimation(animation);
-
if (clockFontInitialized) {
glDeleteTextures(1, &animation->clockFont.texture.name);
}
+ releaseAnimation(animation);
+
return false;
}
diff --git a/cmds/bootanimation/BootAnimation.h b/cmds/bootanimation/BootAnimation.h
index b4699d884681..498eebce5999 100644
--- a/cmds/bootanimation/BootAnimation.h
+++ b/cmds/bootanimation/BootAnimation.h
@@ -22,6 +22,7 @@
#include <androidfw/AssetManager.h>
#include <utils/Thread.h>
+#include <binder/IBinder.h>
#include <EGL/egl.h>
#include <GLES/gl.h>
diff --git a/cmds/bootanimation/BootAnimationUtil.cpp b/cmds/bootanimation/BootAnimationUtil.cpp
index 7718daf61d81..1e417e938359 100644
--- a/cmds/bootanimation/BootAnimationUtil.cpp
+++ b/cmds/bootanimation/BootAnimationUtil.cpp
@@ -16,14 +16,30 @@
#include "BootAnimationUtil.h"
+#include <vector>
#include <inttypes.h>
#include <binder/IServiceManager.h>
#include <cutils/properties.h>
#include <utils/Log.h>
#include <utils/SystemClock.h>
+#include <android-base/properties.h>
namespace android {
+namespace {
+
+static constexpr char PLAY_SOUND_PROP_NAME[] = "persist.sys.bootanim.play_sound";
+static constexpr char BOOT_COMPLETED_PROP_NAME[] = "sys.boot_completed";
+static constexpr char POWER_CTL_PROP_NAME[] = "sys.powerctl";
+static constexpr char BOOTREASON_PROP_NAME[] = "ro.boot.bootreason";
+static const std::vector<std::string> PLAY_SOUND_BOOTREASON_BLACKLIST {
+ "kernel_panic",
+ "Panic",
+ "Watchdog",
+};
+
+} // namespace
+
bool bootAnimationDisabled() {
char value[PROPERTY_VALUE_MAX];
@@ -58,4 +74,31 @@ void waitForSurfaceFlinger() {
}
}
+bool playSoundsAllowed() {
+ // Only play sounds for system boots, not runtime restarts.
+ if (android::base::GetBoolProperty(BOOT_COMPLETED_PROP_NAME, false)) {
+ return false;
+ }
+ // no audio while shutting down
+ if (!android::base::GetProperty(POWER_CTL_PROP_NAME, "").empty()) {
+ return false;
+ }
+ // Read the system property to see if we should play the sound.
+ // If it's not present, default to allowed.
+ if (!property_get_bool(PLAY_SOUND_PROP_NAME, 1)) {
+ return false;
+ }
+
+ // Don't play sounds if this is a reboot due to an error.
+ char bootreason[PROPERTY_VALUE_MAX];
+ if (property_get(BOOTREASON_PROP_NAME, bootreason, nullptr) > 0) {
+ for (const auto& str : PLAY_SOUND_BOOTREASON_BLACKLIST) {
+ if (strcasecmp(str.c_str(), bootreason) == 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
} // namespace android
diff --git a/cmds/bootanimation/BootAnimationUtil.h b/cmds/bootanimation/BootAnimationUtil.h
index 60987cd1ccd1..1e1140a51763 100644
--- a/cmds/bootanimation/BootAnimationUtil.h
+++ b/cmds/bootanimation/BootAnimationUtil.h
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#ifndef ANDROID_BOOTANIMATION_UTIL_H
+#define ANDROID_BOOTANIMATION_UTIL_H
+
namespace android {
// Returns true if boot animation is disabled.
@@ -22,4 +25,8 @@ bool bootAnimationDisabled();
// Waits until the surface flinger is up.
void waitForSurfaceFlinger();
+// Returns whether sounds should be played during current boot.
+bool playSoundsAllowed();
} // namespace android
+
+#endif // ANDROID_BOOTANIMATION_UTIL_H
diff --git a/cmds/bootanimation/audioplay.cpp b/cmds/bootanimation/audioplay.cpp
index c546072e733a..874aab08862e 100644
--- a/cmds/bootanimation/audioplay.cpp
+++ b/cmds/bootanimation/audioplay.cpp
@@ -17,22 +17,27 @@
// cribbed from samples/native-audio
-#include "audioplay.h"
-
#define CHATTY ALOGD
#define LOG_TAG "audioplay"
+#include "audioplay.h"
+
#include <string.h>
#include <utils/Log.h>
+#include <utils/threads.h>
// for native audio
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
+#include "BootAnimationUtil.h"
+
namespace audioplay {
namespace {
+using namespace android;
+
// engine interfaces
static SLObjectItf engineObject = NULL;
static SLEngineItf engineEngine;
@@ -305,6 +310,74 @@ bool parseClipBuf(const uint8_t* clipBuf, int clipBufSize, const ChunkFormat** o
return true;
}
+class InitAudioThread : public Thread {
+public:
+ InitAudioThread(uint8_t* exampleAudioData, int exampleAudioLength)
+ : Thread(false),
+ mExampleAudioData(exampleAudioData),
+ mExampleAudioLength(exampleAudioLength) {}
+private:
+ virtual bool threadLoop() {
+ audioplay::create(mExampleAudioData, mExampleAudioLength);
+ // Exit immediately
+ return false;
+ }
+
+ uint8_t* mExampleAudioData;
+ int mExampleAudioLength;
+};
+
+// Typedef to aid readability.
+typedef android::BootAnimation::Animation Animation;
+
+class AudioAnimationCallbacks : public android::BootAnimation::Callbacks {
+public:
+ void init(const Vector<Animation::Part>& parts) override {
+ const Animation::Part* partWithAudio = nullptr;
+ for (const Animation::Part& part : parts) {
+ if (part.audioData != nullptr) {
+ partWithAudio = &part;
+ break;
+ }
+ }
+
+ if (partWithAudio == nullptr) {
+ return;
+ }
+
+ ALOGD("found audio.wav, creating playback engine");
+ // The audioData is used to initialize the audio system. Different data
+ // can be played later for other parts BUT the assumption is that they
+ // will all be the same format and only the format of this audioData
+ // will work correctly.
+ initAudioThread = new InitAudioThread(partWithAudio->audioData,
+ partWithAudio->audioLength);
+ initAudioThread->run("BootAnimation::InitAudioThread", PRIORITY_NORMAL);
+ };
+
+ void playPart(int partNumber, const Animation::Part& part, int playNumber) override {
+ // only play audio file the first time we animate the part
+ if (playNumber == 0 && part.audioData && playSoundsAllowed()) {
+ ALOGD("playing clip for part%d, size=%d",
+ partNumber, part.audioLength);
+ // Block until the audio engine is finished initializing.
+ if (initAudioThread != nullptr) {
+ initAudioThread->join();
+ }
+ audioplay::playClip(part.audioData, part.audioLength);
+ }
+ };
+
+ void shutdown() override {
+ // we've finally played everything we're going to play
+ audioplay::setPlaying(false);
+ audioplay::destroy();
+ };
+
+private:
+ sp<InitAudioThread> initAudioThread = nullptr;
+};
+
} // namespace
bool create(const uint8_t* exampleClipBuf, int exampleClipBufSize) {
@@ -397,4 +470,8 @@ void destroy() {
}
}
+sp<BootAnimation::Callbacks> createAnimationCallbacks() {
+ return new AudioAnimationCallbacks();
+}
+
} // namespace audioplay
diff --git a/cmds/bootanimation/audioplay.h b/cmds/bootanimation/audioplay.h
index 0e5705af0ad0..4704a702d50b 100644
--- a/cmds/bootanimation/audioplay.h
+++ b/cmds/bootanimation/audioplay.h
@@ -20,6 +20,8 @@
#include <string.h>
+#include "BootAnimation.h"
+
namespace audioplay {
// Initializes the engine with an example of the type of WAV clip to play.
@@ -32,6 +34,9 @@ bool playClip(const uint8_t* buf, int size);
void setPlaying(bool isPlaying);
void destroy();
+// Generates callbacks to integrate the audioplay system with the BootAnimation.
+android::sp<android::BootAnimation::Callbacks> createAnimationCallbacks();
+
}
#endif // AUDIOPLAY_H_
diff --git a/cmds/bootanimation/bootanimation_main.cpp b/cmds/bootanimation/bootanimation_main.cpp
index 8501982d071c..a52a5e92a840 100644
--- a/cmds/bootanimation/bootanimation_main.cpp
+++ b/cmds/bootanimation/bootanimation_main.cpp
@@ -26,8 +26,6 @@
#include <sys/resource.h>
#include <utils/Log.h>
#include <utils/SystemClock.h>
-#include <utils/threads.h>
-#include <android-base/properties.h>
#include "BootAnimation.h"
#include "BootAnimationUtil.h"
@@ -35,113 +33,6 @@
using namespace android;
-// ---------------------------------------------------------------------------
-
-namespace {
-
-// Create a typedef for readability.
-typedef android::BootAnimation::Animation Animation;
-
-static const char PLAY_SOUND_PROP_NAME[] = "persist.sys.bootanim.play_sound";
-static const char BOOT_COMPLETED_PROP_NAME[] = "sys.boot_completed";
-static const char POWER_CTL_PROP_NAME[] = "sys.powerctl";
-static const char BOOTREASON_PROP_NAME[] = "ro.boot.bootreason";
-static const std::vector<std::string> PLAY_SOUND_BOOTREASON_BLACKLIST {
- "kernel_panic",
- "Panic",
- "Watchdog",
-};
-
-class InitAudioThread : public Thread {
-public:
- InitAudioThread(uint8_t* exampleAudioData, int exampleAudioLength)
- : Thread(false),
- mExampleAudioData(exampleAudioData),
- mExampleAudioLength(exampleAudioLength) {}
-private:
- virtual bool threadLoop() {
- audioplay::create(mExampleAudioData, mExampleAudioLength);
- // Exit immediately
- return false;
- }
-
- uint8_t* mExampleAudioData;
- int mExampleAudioLength;
-};
-
-bool playSoundsAllowed() {
- // Only play sounds for system boots, not runtime restarts.
- if (android::base::GetBoolProperty(BOOT_COMPLETED_PROP_NAME, false)) {
- return false;
- }
- // no audio while shutting down
- if (!android::base::GetProperty(POWER_CTL_PROP_NAME, "").empty()) {
- return false;
- }
- // Read the system property to see if we should play the sound.
- // If it's not present, default to allowed.
- if (!property_get_bool(PLAY_SOUND_PROP_NAME, 1)) {
- return false;
- }
-
- // Don't play sounds if this is a reboot due to an error.
- char bootreason[PROPERTY_VALUE_MAX];
- if (property_get(BOOTREASON_PROP_NAME, bootreason, nullptr) > 0) {
- for (const auto& str : PLAY_SOUND_BOOTREASON_BLACKLIST) {
- if (strcasecmp(str.c_str(), bootreason) == 0) {
- return false;
- }
- }
- }
- return true;
-}
-
-class AudioAnimationCallbacks : public android::BootAnimation::Callbacks {
-public:
- void init(const Vector<Animation::Part>& parts) override {
- const Animation::Part* partWithAudio = nullptr;
- for (const Animation::Part& part : parts) {
- if (part.audioData != nullptr) {
- partWithAudio = &part;
- }
- }
-
- if (partWithAudio == nullptr) {
- return;
- }
-
- ALOGD("found audio.wav, creating playback engine");
- initAudioThread = new InitAudioThread(partWithAudio->audioData,
- partWithAudio->audioLength);
- initAudioThread->run("BootAnimation::InitAudioThread", PRIORITY_NORMAL);
- };
-
- void playPart(int partNumber, const Animation::Part& part, int playNumber) override {
- // only play audio file the first time we animate the part
- if (playNumber == 0 && part.audioData && playSoundsAllowed()) {
- ALOGD("playing clip for part%d, size=%d",
- partNumber, part.audioLength);
- // Block until the audio engine is finished initializing.
- if (initAudioThread != nullptr) {
- initAudioThread->join();
- }
- audioplay::playClip(part.audioData, part.audioLength);
- }
- };
-
- void shutdown() override {
- // we've finally played everything we're going to play
- audioplay::setPlaying(false);
- audioplay::destroy();
- };
-
-private:
- sp<InitAudioThread> initAudioThread = nullptr;
-};
-
-} // namespace
-
-
int main()
{
setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_DISPLAY);
@@ -156,7 +47,7 @@ int main()
waitForSurfaceFlinger();
// create the boot animation object
- sp<BootAnimation> boot = new BootAnimation(new AudioAnimationCallbacks());
+ sp<BootAnimation> boot = new BootAnimation(audioplay::createAnimationCallbacks());
ALOGV("Boot animation set up. Joining pool.");
IPCThreadState::self()->joinThreadPool();
diff --git a/cmds/bootanimation/iot/Android.mk b/cmds/bootanimation/iot/Android.mk
new file mode 100644
index 000000000000..3d288e4e111b
--- /dev/null
+++ b/cmds/bootanimation/iot/Android.mk
@@ -0,0 +1,43 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+ifeq ($(PRODUCT_IOT),true)
+
+# libbootanimation_iot_test
+# ===========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libbootanimation_iot_test
+LOCAL_CFLAGS := -Wall -Werror -Wunused -Wunreachable-code
+
+LOCAL_SHARED_LIBRARIES := \
+ libandroidthings \
+ libandroidthings_protos \
+ libbase \
+ libchrome \
+ liblog \
+ libprotobuf-cpp-lite \
+
+LOCAL_STATIC_LIBRARIES += \
+ libjsoncpp
+
+LOCAL_SRC_FILES := \
+ BootParameters.cpp \
+ BootParameters_test.cpp \
+
+include $(BUILD_NATIVE_TEST)
+
+endif # PRODUCT_IOT
diff --git a/cmds/bootanimation/iot/BootAction.cpp b/cmds/bootanimation/iot/BootAction.cpp
index fa797444d569..8b55147110bc 100644
--- a/cmds/bootanimation/iot/BootAction.cpp
+++ b/cmds/bootanimation/iot/BootAction.cpp
@@ -32,7 +32,7 @@ BootAction::~BootAction() {
}
bool BootAction::init(const std::string& libraryPath,
- const std::vector<ABootActionParameter>& parameters) {
+ const std::unique_ptr<BootParameters>& bootParameters) {
APeripheralManagerClient* client = nullptr;
ALOGD("Connecting to peripheralmanager");
// Wait for peripheral manager to come up.
@@ -77,9 +77,32 @@ bool BootAction::init(const std::string& libraryPath,
mLibStartPart = reinterpret_cast<libStartPart>(loaded);
}
- ALOGD("Entering boot_action_init");
- bool result = mLibInit(parameters.data(), parameters.size());
- ALOGD("Returned from boot_action_init");
+ // SilentBoot is considered optional, if it isn't exported by the library
+ // and the boot is silent, no method is called.
+ loaded = nullptr;
+ if (!loadSymbol("boot_action_silent_boot", &loaded) || loaded == nullptr) {
+ ALOGW("No boot_action_silent_boot found, boot action will not be "
+ "executed during a silent boot.");
+ } else {
+ mLibSilentBoot = reinterpret_cast<libInit>(loaded);
+ }
+
+ bool result = true;
+ const auto& parameters = bootParameters->getParameters();
+ if (bootParameters->isSilentBoot()) {
+ if (mLibSilentBoot != nullptr) {
+ ALOGD("Entering boot_action_silent_boot");
+ result = mLibSilentBoot(parameters.data(), parameters.size());
+ ALOGD("Returned from boot_action_silent_boot");
+ } else {
+ ALOGW("Skipping missing boot_action_silent_boot");
+ }
+ } else {
+ ALOGD("Entering boot_action_init");
+ result = mLibInit(parameters.data(), parameters.size());
+ ALOGD("Returned from boot_action_init");
+ }
+
return result;
}
@@ -99,7 +122,7 @@ void BootAction::shutdown() {
bool BootAction::loadSymbol(const char* symbol, void** loaded) {
*loaded = dlsym(mLibHandle, symbol);
- if (loaded == nullptr) {
+ if (*loaded == nullptr) {
ALOGE("Unable to load symbol : %s :: %s", symbol, dlerror());
return false;
}
diff --git a/cmds/bootanimation/iot/BootAction.h b/cmds/bootanimation/iot/BootAction.h
index 5e2495fe6c51..7119c35db0f9 100644
--- a/cmds/bootanimation/iot/BootAction.h
+++ b/cmds/bootanimation/iot/BootAction.h
@@ -20,6 +20,8 @@
#include <string>
#include <vector>
+#include "BootParameters.h"
+
#include <boot_action/boot_action.h> // libandroidthings native API.
#include <utils/RefBase.h>
@@ -31,7 +33,7 @@ public:
// libraryPath is a fully qualified path to the target .so library.
bool init(const std::string& libraryPath,
- const std::vector<ABootActionParameter>& parameters);
+ const std::unique_ptr<BootParameters>& bootParameters);
// The animation is going to start playing partNumber for the playCount'th
// time, update the action as needed.
@@ -45,7 +47,7 @@ public:
private:
typedef bool (*libInit)(const ABootActionParameter* parameters,
- size_t num_parameters);
+ size_t numParameters);
typedef void (*libStartPart)(int partNumber, int playNumber);
typedef void (*libShutdown)();
@@ -55,6 +57,9 @@ private:
libInit mLibInit = nullptr;
libStartPart mLibStartPart = nullptr;
libShutdown mLibShutdown = nullptr;
+
+ // Called only if the boot is silent.
+ libInit mLibSilentBoot = nullptr;
};
} // namespace android
diff --git a/cmds/bootanimation/iot/BootParameters.cpp b/cmds/bootanimation/iot/BootParameters.cpp
index da6ad0d1f08f..30a9b2895c44 100644
--- a/cmds/bootanimation/iot/BootParameters.cpp
+++ b/cmds/bootanimation/iot/BootParameters.cpp
@@ -18,45 +18,52 @@
#define LOG_TAG "BootParameters"
+#include <errno.h>
#include <fcntl.h>
-#include <string>
-
#include <android-base/file.h>
-#include <base/json/json_parser.h>
-#include <base/json/json_reader.h>
-#include <base/json/json_value_converter.h>
+#include <json/json.h>
#include <utils/Log.h>
-using android::base::RemoveFileIfExists;
using android::base::ReadFileToString;
-using base::JSONReader;
-using base::JSONValueConverter;
-using base::Value;
+using android::base::RemoveFileIfExists;
+using android::base::WriteStringToFile;
+using Json::ArrayIndex;
+using Json::Reader;
+using Json::Value;
namespace android {
namespace {
-// Brightness and volume are stored as integer strings in next_boot.json.
-// They are divided by this constant to produce the actual float values in
-// range [0.0, 1.0]. This constant must match its counterpart in
-// DeviceManager.
-constexpr const float kFloatScaleFactor = 1000.0f;
+// Keys for deprecated parameters. Devices that OTA from N to O and that used
+// the hidden BootParameters API will store these in the JSON blob. To support
+// the transition from N to O, these keys are mapped to the new parameters.
+constexpr const char *kKeyLegacyVolume = "volume";
+constexpr const char *kKeyLegacyAnimationsDisabled = "boot_animation_disabled";
+constexpr const char *kKeyLegacyParamNames = "param_names";
+constexpr const char *kKeyLegacyParamValues = "param_values";
+
+constexpr const char *kNextBootFile = "/data/misc/bootanimation/next_boot.proto";
+constexpr const char *kLastBootFile = "/data/misc/bootanimation/last_boot.proto";
-constexpr const char* kNextBootFile = "/data/misc/bootanimation/next_boot.json";
-constexpr const char* kLastBootFile = "/data/misc/bootanimation/last_boot.json";
+constexpr const char *kLegacyNextBootFile = "/data/misc/bootanimation/next_boot.json";
+constexpr const char *kLegacyLastBootFile = "/data/misc/bootanimation/last_boot.json";
-void swapBootConfigs() {
- // rename() will fail if next_boot.json doesn't exist, so delete
- // last_boot.json manually first.
+void removeLegacyFiles() {
std::string err;
- if (!RemoveFileIfExists(kLastBootFile, &err))
- ALOGE("Unable to delete last boot file: %s", err.c_str());
+ if (!RemoveFileIfExists(kLegacyLastBootFile, &err)) {
+ ALOGW("Unable to delete %s: %s", kLegacyLastBootFile, err.c_str());
+ }
- if (rename(kNextBootFile, kLastBootFile) && errno != ENOENT)
- ALOGE("Unable to swap boot files: %s", strerror(errno));
+ err.clear();
+ if (!RemoveFileIfExists(kLegacyNextBootFile, &err)) {
+ ALOGW("Unable to delete %s: %s", kLegacyNextBootFile, err.c_str());
+ }
+}
+void createNextBootFile() {
+ errno = 0;
int fd = open(kNextBootFile, O_CREAT, DEFFILEMODE);
if (fd == -1) {
ALOGE("Unable to create next boot file: %s", strerror(errno));
@@ -71,54 +78,120 @@ void swapBootConfigs() {
} // namespace
-BootParameters::SavedBootParameters::SavedBootParameters()
- : brightness(-kFloatScaleFactor), volume(-kFloatScaleFactor) {}
-
-void BootParameters::SavedBootParameters::RegisterJSONConverter(
- JSONValueConverter<SavedBootParameters>* converter) {
- converter->RegisterIntField("brightness", &SavedBootParameters::brightness);
- converter->RegisterIntField("volume", &SavedBootParameters::volume);
- converter->RegisterRepeatedString("param_names",
- &SavedBootParameters::param_names);
- converter->RegisterRepeatedString("param_values",
- &SavedBootParameters::param_values);
+// Renames the 'next' boot file to the 'last' file and reads its contents.
+bool BootParameters::swapAndLoadBootConfigContents(const char *lastBootFile,
+ const char *nextBootFile,
+ std::string *contents) {
+ if (!ReadFileToString(nextBootFile, contents)) {
+ RemoveFileIfExists(lastBootFile);
+ return false;
+ }
+
+ errno = 0;
+ if (rename(nextBootFile, lastBootFile) && errno != ENOENT)
+ ALOGE("Unable to swap boot files: %s", strerror(errno));
+
+ return true;
}
BootParameters::BootParameters() {
- swapBootConfigs();
loadParameters();
}
+// Saves the boot parameters state to disk so the framework can read it.
+void BootParameters::storeParameters() {
+ errno = 0;
+ if (!WriteStringToFile(mProto.SerializeAsString(), kLastBootFile)) {
+ ALOGE("Failed to write boot parameters to %s: %s", kLastBootFile, strerror(errno));
+ }
+
+ // WriteStringToFile sets the file permissions to 0666, but these are not
+ // honored by the system.
+ errno = 0;
+ if (chmod(kLastBootFile, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)) {
+ ALOGE("Failed to set permissions for %s: %s", kLastBootFile, strerror(errno));
+ }
+}
+
+// Load the boot parameters from disk, try the old location and format if the
+// file does not exist. Note:
+// - Parse errors result in defaults being used (a normal boot).
+// - Legacy boot parameters default to a silent boot.
void BootParameters::loadParameters() {
+ // Precedence is given to the new file format (.proto).
std::string contents;
- if (!ReadFileToString(kLastBootFile, &contents)) {
- if (errno != ENOENT)
- ALOGE("Unable to read from %s: %s", kLastBootFile, strerror(errno));
+ if (swapAndLoadBootConfigContents(kLastBootFile, kNextBootFile, &contents)) {
+ parseBootParameters(contents);
+ } else if (swapAndLoadBootConfigContents(kLegacyLastBootFile, kLegacyNextBootFile, &contents)) {
+ parseLegacyBootParameters(contents);
+ storeParameters();
+ removeLegacyFiles();
+ }
+
+ createNextBootFile();
+}
+void BootParameters::parseBootParameters(const std::string &contents) {
+ if (!mProto.ParseFromString(contents)) {
+ ALOGW("Failed to parse parameters from %s", kLastBootFile);
return;
}
- std::unique_ptr<Value> json = JSONReader::Read(contents);
- if (json.get() == nullptr) {
+ loadStateFromProto();
+}
+
+// Parses the JSON in the proto.
+void BootParameters::parseLegacyBootParameters(const std::string &contents) {
+ Value json;
+ if (!Reader().parse(contents, json)) {
+ ALOGW("Failed to parse parameters from %s", kLegacyLastBootFile);
return;
}
- JSONValueConverter<SavedBootParameters> converter;
- if (converter.Convert(*(json.get()), &mRawParameters)) {
- mBrightness = mRawParameters.brightness / kFloatScaleFactor;
- mVolume = mRawParameters.volume / kFloatScaleFactor;
-
- if (mRawParameters.param_names.size() == mRawParameters.param_values.size()) {
- for (size_t i = 0; i < mRawParameters.param_names.size(); i++) {
- mParameters.push_back({
- .key = mRawParameters.param_names[i]->c_str(),
- .value = mRawParameters.param_values[i]->c_str()
- });
+ int volume = 0;
+ bool bootAnimationDisabled = true;
+
+ Value &jsonValue = json[kKeyLegacyVolume];
+ if (jsonValue.isIntegral()) {
+ volume = jsonValue.asInt();
+ }
+
+ jsonValue = json[kKeyLegacyAnimationsDisabled];
+ if (jsonValue.isIntegral()) {
+ bootAnimationDisabled = jsonValue.asInt() == 1;
+ }
+
+ // Assume a silent boot unless all of the following are true -
+ // 1. The volume is neither 0 nor -1000 (the legacy default value).
+ // 2. The boot animations are explicitly enabled.
+ // Note: brightness was never used.
+ mProto.set_silent_boot((volume == 0) || (volume == -1000) || bootAnimationDisabled);
+
+ Value &keys = json[kKeyLegacyParamNames];
+ Value &values = json[kKeyLegacyParamValues];
+ if (keys.isArray() && values.isArray() && (keys.size() == values.size())) {
+ for (ArrayIndex i = 0; i < keys.size(); ++i) {
+ auto &key = keys[i];
+ auto &value = values[i];
+ if (key.isString() && value.isString()) {
+ auto userParameter = mProto.add_user_parameter();
+ userParameter->set_key(key.asString());
+ userParameter->set_value(value.asString());
}
- } else {
- ALOGW("Parameter names and values size mismatch");
}
}
+
+ loadStateFromProto();
+}
+
+void BootParameters::loadStateFromProto() {
+ // A missing key returns a safe, default value.
+ // Ignore invalid or missing parameters.
+ mIsSilentBoot = mProto.silent_boot();
+
+ for (const auto &param : mProto.user_parameter()) {
+ mParameters.push_back({.key = param.key().c_str(), .value = param.value().c_str()});
+ }
}
} // namespace android
diff --git a/cmds/bootanimation/iot/BootParameters.h b/cmds/bootanimation/iot/BootParameters.h
index c10bd44bc2ca..cbd1ca61cfc3 100644
--- a/cmds/bootanimation/iot/BootParameters.h
+++ b/cmds/bootanimation/iot/BootParameters.h
@@ -18,10 +18,11 @@
#define _BOOTANIMATION_BOOT_PARAMETERS_H_
#include <list>
+#include <string>
#include <vector>
-#include <base/json/json_value_converter.h>
#include <boot_action/boot_action.h> // libandroidthings native API.
+#include <boot_parameters.pb.h>
namespace android {
@@ -32,39 +33,39 @@ public:
// to clear the parameters for next boot.
BootParameters();
- // Returns true if volume/brightness were explicitly set on reboot.
- bool hasVolume() const { return mVolume >= 0; }
- bool hasBrightness() const { return mBrightness >= 0; }
-
- // Returns volume/brightness in [0,1], or -1 if unset.
- float getVolume() const { return mVolume; }
- float getBrightness() const { return mBrightness; }
+ // Returns whether or not this is a silent boot.
+ bool isSilentBoot() const { return mIsSilentBoot; }
// Returns the additional boot parameters that were set on reboot.
const std::vector<ABootActionParameter>& getParameters() const { return mParameters; }
-private:
- // Raw boot saved_parameters loaded from .json.
- struct SavedBootParameters {
- int brightness;
- int volume;
- std::vector<std::unique_ptr<std::string>> param_names;
- std::vector<std::unique_ptr<std::string>> param_values;
+ // Exposed for testing. Sets the parameters to the serialized proto.
+ void parseBootParameters(const std::string &contents);
+
+ // For devices that OTA from N to O.
+ // Exposed for testing. Sets the parameters to the raw JSON.
+ void parseLegacyBootParameters(const std::string &contents);
- SavedBootParameters();
- static void RegisterJSONConverter(
- ::base::JSONValueConverter<SavedBootParameters>* converter);
- };
+ // Exposed for testing. Loads the contents from |nextBootFile| and replaces
+ // |lastBootFile| with |nextBootFile|.
+ static bool swapAndLoadBootConfigContents(const char *lastBootFile, const char *nextBootFile,
+ std::string *contents);
+ private:
void loadParameters();
- float mVolume = -1.f;
- float mBrightness = -1.f;
+ // Replaces the legacy JSON blob with the updated version, allowing the
+ // framework to read it.
+ void storeParameters();
+
+ void loadStateFromProto();
+
+ bool mIsSilentBoot = false;
+
std::vector<ABootActionParameter> mParameters;
- // ABootActionParameter is just a raw pointer so we need to keep the
- // original strings around to avoid losing them.
- SavedBootParameters mRawParameters;
+ // Store the proto because mParameters makes a shallow copy.
+ android::things::proto::BootParameters mProto;
};
} // namespace android
diff --git a/cmds/bootanimation/iot/BootParameters_test.cpp b/cmds/bootanimation/iot/BootParameters_test.cpp
new file mode 100644
index 000000000000..d55bce6eecc3
--- /dev/null
+++ b/cmds/bootanimation/iot/BootParameters_test.cpp
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BootParameters.h"
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <android-base/file.h>
+#include <android-base/test_utils.h>
+#include <boot_parameters.pb.h>
+#include <gtest/gtest.h>
+
+namespace android {
+
+namespace {
+
+TEST(BootParametersTest, TestNoBootParametersIsNotSilent) {
+ android::things::proto::BootParameters proto;
+
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseBootParameters(proto.SerializeAsString());
+
+ ASSERT_FALSE(bootParameters.isSilentBoot());
+ ASSERT_EQ(0u, bootParameters.getParameters().size());
+}
+
+TEST(BootParametersTest, TestParseIsSilent) {
+ android::things::proto::BootParameters proto;
+ proto.set_silent_boot(true);
+
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseBootParameters(proto.SerializeAsString());
+
+ ASSERT_TRUE(bootParameters.isSilentBoot());
+}
+
+TEST(BootParametersTest, TestParseIsNotSilent) {
+ android::things::proto::BootParameters proto;
+ proto.set_silent_boot(false);
+
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseBootParameters(proto.SerializeAsString());
+
+ ASSERT_FALSE(bootParameters.isSilentBoot());
+}
+
+TEST(BootParametersTest, TestParseBootParameters) {
+ android::things::proto::BootParameters proto;
+ proto.set_silent_boot(false);
+
+ auto userParameter = proto.add_user_parameter();
+ userParameter->set_key("key1");
+ userParameter->set_value("value1");
+
+ userParameter = proto.add_user_parameter();
+ userParameter->set_key("key2");
+ userParameter->set_value("value2");
+
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseBootParameters(proto.SerializeAsString());
+
+ auto &parameters = bootParameters.getParameters();
+ ASSERT_EQ(2u, parameters.size());
+ ASSERT_STREQ(parameters[0].key, "key1");
+ ASSERT_STREQ(parameters[0].value, "value1");
+ ASSERT_STREQ(parameters[1].key, "key2");
+ ASSERT_STREQ(parameters[1].value, "value2");
+}
+
+TEST(BootParametersTest, TestParseLegacyDisableBootAnimationIsSilent) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":200,
+ "volume":100,
+ "boot_animation_disabled":1,
+ "param_names":[],
+ "param_values":[]
+ }
+ )");
+
+ ASSERT_TRUE(bootParameters.isSilentBoot());
+}
+
+TEST(BootParametersTest, TestParseLegacyZeroVolumeIsSilent) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":200,
+ "volume":0,
+ "boot_animation_disabled":0,
+ "param_names":[],
+ "param_values":[]
+ }
+ )");
+
+ ASSERT_TRUE(bootParameters.isSilentBoot());
+}
+
+TEST(BootParametersTest, TestParseLegacyDefaultVolumeIsSilent) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":200,
+ "volume":-1000,
+ "boot_animation_disabled":0,
+ "param_names":[],
+ "param_values":[]
+ }
+ )");
+
+ ASSERT_TRUE(bootParameters.isSilentBoot());
+}
+
+TEST(BootParametersTest, TestParseLegacyNotSilent) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":200,
+ "volume":500,
+ "boot_animation_disabled":0,
+ "param_names":[],
+ "param_values":[]
+ }
+ )");
+
+ ASSERT_FALSE(bootParameters.isSilentBoot());
+}
+
+TEST(BootParametersTest, TestParseLegacyParameters) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":200,
+ "volume":100,
+ "boot_animation_disabled":1,
+ "param_names":["key1", "key2"],
+ "param_values":["value1", "value2"]
+ }
+ )");
+
+ auto parameters = bootParameters.getParameters();
+ ASSERT_EQ(2u, parameters.size());
+ ASSERT_STREQ(parameters[0].key, "key1");
+ ASSERT_STREQ(parameters[0].value, "value1");
+ ASSERT_STREQ(parameters[1].key, "key2");
+ ASSERT_STREQ(parameters[1].value, "value2");
+}
+
+TEST(BootParametersTest, TestParseLegacyZeroParameters) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":200,
+ "volume":100,
+ "boot_animation_disabled":1,
+ "param_names":[],
+ "param_values":[]
+ }
+ )");
+
+ ASSERT_EQ(0u, bootParameters.getParameters().size());
+}
+
+TEST(BootParametersTest, TestMalformedLegacyParametersAreSkipped) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":500,
+ "volume":500,
+ "boot_animation_disabled":0,
+ "param_names":["key1", "key2"],
+ "param_values":[1, "value2"]
+ }
+ )");
+
+ auto parameters = bootParameters.getParameters();
+ ASSERT_EQ(1u, parameters.size());
+ ASSERT_STREQ(parameters[0].key, "key2");
+ ASSERT_STREQ(parameters[0].value, "value2");
+}
+
+TEST(BootParametersTest, TestLegacyUnequalParameterSizesAreSkipped) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":500,
+ "volume":500,
+ "boot_animation_disabled":0,
+ "param_names":["key1", "key2"],
+ "param_values":["value1"]
+ }
+ )");
+
+ ASSERT_EQ(0u, bootParameters.getParameters().size());
+}
+
+TEST(BootParametersTest, TestMissingLegacyBootParametersIsSilent) {
+ BootParameters bootParameters = BootParameters();
+ bootParameters.parseLegacyBootParameters(R"(
+ {
+ "brightness":500
+ }
+ )");
+
+ EXPECT_TRUE(bootParameters.isSilentBoot());
+ ASSERT_EQ(0u, bootParameters.getParameters().size());
+}
+
+TEST(BootParametersTest, TestLastFileIsRemovedOnError) {
+ TemporaryFile lastFile;
+ TemporaryDir tempDir;
+ std::string nonExistentFilePath(std::string(tempDir.path) + "/nonexistent");
+ std::string contents;
+
+ BootParameters::swapAndLoadBootConfigContents(lastFile.path, nonExistentFilePath.c_str(),
+ &contents);
+
+ struct stat buf;
+ ASSERT_EQ(-1, lstat(lastFile.path, &buf));
+ ASSERT_TRUE(contents.empty());
+}
+
+TEST(BootParametersTest, TestNextFileIsRemovedLastFileExistsOnSuccess) {
+ TemporaryFile lastFile;
+ TemporaryFile nextFile;
+
+ base::WriteStringToFile("foo", nextFile.path);
+
+ std::string contents;
+ // Expected side effects:
+ // - |next_file| is moved to |last_file|
+ // - |contents| is the contents of |next_file| before being moved.
+ BootParameters::swapAndLoadBootConfigContents(lastFile.path, nextFile.path, &contents);
+
+ struct stat buf;
+ ASSERT_EQ(0, lstat(lastFile.path, &buf));
+ ASSERT_EQ(-1, lstat(nextFile.path, &buf));
+ ASSERT_EQ(contents, "foo");
+
+ contents.clear();
+ ASSERT_TRUE(base::ReadFileToString(lastFile.path, &contents));
+ ASSERT_EQ(contents, "foo");
+}
+
+} // namespace
+
+} // namespace android
diff --git a/cmds/bootanimation/iot/iotbootanimation_main.cpp b/cmds/bootanimation/iot/iotbootanimation_main.cpp
index 00cef430135e..2a3d3766ab38 100644
--- a/cmds/bootanimation/iot/iotbootanimation_main.cpp
+++ b/cmds/bootanimation/iot/iotbootanimation_main.cpp
@@ -59,7 +59,7 @@ public:
}
mBootAction = new BootAction();
- if (!mBootAction->init(library_path, mBootParameters->getParameters())) {
+ if (!mBootAction->init(library_path, mBootParameters)) {
mBootAction = NULL;
}
};
@@ -116,8 +116,16 @@ int main() {
sp<ProcessState> proc(ProcessState::self());
ProcessState::self()->startThreadPool();
- sp<BootAnimation> boot = new BootAnimation(
- new BootActionAnimationCallbacks(std::move(bootParameters)));
+ bool isSilentBoot = bootParameters->isSilentBoot();
+ sp<BootActionAnimationCallbacks> callbacks =
+ new BootActionAnimationCallbacks(std::move(bootParameters));
+
+ // On silent boot, animations aren't displayed.
+ if (isSilentBoot) {
+ callbacks->init({});
+ } else {
+ sp<BootAnimation> boot = new BootAnimation(callbacks);
+ }
IPCThreadState::self()->joinThreadPool();
return 0;
diff --git a/cmds/content/src/com/android/commands/content/Content.java b/cmds/content/src/com/android/commands/content/Content.java
index 6e0bd3a81d84..36e51b9703c9 100644
--- a/cmds/content/src/com/android/commands/content/Content.java
+++ b/cmds/content/src/com/android/commands/content/Content.java
@@ -462,7 +462,7 @@ public class Content {
IBinder token = new Binder();
try {
ContentProviderHolder holder = activityManager.getContentProviderExternal(
- providerName, mUserId, token);
+ providerName, mUserId, token, "*cmd*");
if (holder == null) {
throw new IllegalStateException("Could not find provider: " + providerName);
}
diff --git a/cmds/dpm/src/com/android/commands/dpm/Dpm.java b/cmds/dpm/src/com/android/commands/dpm/Dpm.java
index 7c1a5557a1e9..376b13cd371e 100644
--- a/cmds/dpm/src/com/android/commands/dpm/Dpm.java
+++ b/cmds/dpm/src/com/android/commands/dpm/Dpm.java
@@ -46,6 +46,7 @@ public final class Dpm extends BaseCommand {
private static final String COMMAND_SET_PROFILE_OWNER = "set-profile-owner";
private static final String COMMAND_REMOVE_ACTIVE_ADMIN = "remove-active-admin";
private static final String COMMAND_CLEAR_FREEZE_PERIOD_RECORD = "clear-freeze-period-record";
+ private static final String COMMAND_FORCE_NETWORK_LOGS = "force-network-logs";
private static final String COMMAND_FORCE_SECURITY_LOGS = "force-security-logs";
private IDevicePolicyManager mDevicePolicyManager;
@@ -84,6 +85,9 @@ public final class Dpm extends BaseCommand {
"feature development to prevent triggering restriction on setting freeze " +
"periods.\n" +
"\n" +
+ "dpm " + COMMAND_FORCE_NETWORK_LOGS + ": makes all network logs available to " +
+ "the DPC and triggers DeviceAdminReceiver.onNetworkLogsAvailable() if needed.\n" +
+ "\n" +
"dpm " + COMMAND_FORCE_SECURITY_LOGS + ": makes all security logs available to " +
"the DPC and triggers DeviceAdminReceiver.onSecurityLogsAvailable() if needed.");
}
@@ -114,6 +118,9 @@ public final class Dpm extends BaseCommand {
case COMMAND_CLEAR_FREEZE_PERIOD_RECORD:
runClearFreezePeriodRecord();
break;
+ case COMMAND_FORCE_NETWORK_LOGS:
+ runForceNetworkLogs();
+ break;
case COMMAND_FORCE_SECURITY_LOGS:
runForceSecurityLogs();
break;
@@ -122,6 +129,18 @@ public final class Dpm extends BaseCommand {
}
}
+ private void runForceNetworkLogs() throws RemoteException, InterruptedException {
+ while (true) {
+ final long toWait = mDevicePolicyManager.forceNetworkLogs();
+ if (toWait == 0) {
+ break;
+ }
+ System.out.println("We have to wait for " + toWait + " milliseconds...");
+ Thread.sleep(toWait);
+ }
+ System.out.println("Success");
+ }
+
private void runForceSecurityLogs() throws RemoteException, InterruptedException {
while (true) {
final long toWait = mDevicePolicyManager.forceSecurityLogs();
diff --git a/cmds/hid/jni/com_android_commands_hid_Device.cpp b/cmds/hid/jni/com_android_commands_hid_Device.cpp
index 5cc4fc4c16b2..b3e287bae76a 100644
--- a/cmds/hid/jni/com_android_commands_hid_Device.cpp
+++ b/cmds/hid/jni/com_android_commands_hid_Device.cpp
@@ -42,7 +42,6 @@ namespace android {
namespace uhid {
static const char* UHID_PATH = "/dev/uhid";
-static const size_t UHID_MAX_NAME_LENGTH = 128;
static struct {
jmethodID onDeviceOpen;
@@ -90,8 +89,13 @@ JNIEnv* DeviceCallback::getJNIEnv() {
}
Device* Device::open(int32_t id, const char* name, int32_t vid, int32_t pid,
- std::unique_ptr<uint8_t[]> descriptor, size_t descriptorSize,
- std::unique_ptr<DeviceCallback> callback) {
+ std::vector<uint8_t> descriptor, std::unique_ptr<DeviceCallback> callback) {
+
+ size_t size = descriptor.size();
+ if (size > HID_MAX_DESCRIPTOR_SIZE) {
+ LOGE("Received invalid hid report with descriptor size %zu, skipping", size);
+ return nullptr;
+ }
int fd = ::open(UHID_PATH, O_RDWR | O_CLOEXEC);
if (fd < 0) {
@@ -102,10 +106,10 @@ Device* Device::open(int32_t id, const char* name, int32_t vid, int32_t pid,
struct uhid_event ev;
memset(&ev, 0, sizeof(ev));
ev.type = UHID_CREATE2;
- strncpy((char*)ev.u.create2.name, name, UHID_MAX_NAME_LENGTH);
- memcpy(&ev.u.create2.rd_data, descriptor.get(),
- descriptorSize * sizeof(ev.u.create2.rd_data[0]));
- ev.u.create2.rd_size = descriptorSize;
+ strlcpy(reinterpret_cast<char*>(ev.u.create2.name), name, sizeof(ev.u.create2.name));
+ memcpy(&ev.u.create2.rd_data, descriptor.data(),
+ size * sizeof(ev.u.create2.rd_data[0]));
+ ev.u.create2.rd_size = size;
ev.u.create2.bus = BUS_BLUETOOTH;
ev.u.create2.vendor = vid;
ev.u.create2.product = pid;
@@ -156,12 +160,17 @@ Device::~Device() {
mFd = -1;
}
-void Device::sendReport(uint8_t* report, size_t reportSize) {
+void Device::sendReport(const std::vector<uint8_t>& report) const {
+ if (report.size() > UHID_DATA_MAX) {
+ LOGE("Received invalid report of size %zu, skipping", report.size());
+ return;
+ }
+
struct uhid_event ev;
memset(&ev, 0, sizeof(ev));
ev.type = UHID_INPUT2;
- ev.u.input2.size = reportSize;
- memcpy(&ev.u.input2.data, report, reportSize);
+ ev.u.input2.size = report.size();
+ memcpy(&ev.u.input2.data, report.data(), report.size() * sizeof(ev.u.input2.data[0]));
ssize_t ret = TEMP_FAILURE_RETRY(::write(mFd, &ev, sizeof(ev)));
if (ret < 0 || ret != sizeof(ev)) {
LOGE("Failed to send hid event: %s", strerror(errno));
@@ -191,12 +200,13 @@ int Device::handleEvents(int events) {
} // namespace uhid
-std::unique_ptr<uint8_t[]> getData(JNIEnv* env, jbyteArray javaArray, size_t& outSize) {
+std::vector<uint8_t> getData(JNIEnv* env, jbyteArray javaArray) {
ScopedByteArrayRO scopedArray(env, javaArray);
- outSize = scopedArray.size();
- std::unique_ptr<uint8_t[]> data(new uint8_t[outSize]);
- for (size_t i = 0; i < outSize; i++) {
- data[i] = static_cast<uint8_t>(scopedArray[i]);
+ size_t size = scopedArray.size();
+ std::vector<uint8_t> data;
+ data.reserve(size);
+ for (size_t i = 0; i < size; i++) {
+ data.push_back(static_cast<uint8_t>(scopedArray[i]));
}
return data;
}
@@ -208,23 +218,20 @@ static jlong openDevice(JNIEnv* env, jclass /* clazz */, jstring rawName, jint i
return 0;
}
- size_t size;
- std::unique_ptr<uint8_t[]> desc = getData(env, rawDescriptor, size);
+ std::vector<uint8_t> desc = getData(env, rawDescriptor);
std::unique_ptr<uhid::DeviceCallback> cb(new uhid::DeviceCallback(env, callback));
uhid::Device* d = uhid::Device::open(
- id, reinterpret_cast<const char*>(name.c_str()), vid, pid,
- std::move(desc), size, std::move(cb));
+ id, reinterpret_cast<const char*>(name.c_str()), vid, pid, desc, std::move(cb));
return reinterpret_cast<jlong>(d);
}
static void sendReport(JNIEnv* env, jclass /* clazz */, jlong ptr, jbyteArray rawReport) {
- size_t size;
- std::unique_ptr<uint8_t[]> report = getData(env, rawReport, size);
+ std::vector<uint8_t> report = getData(env, rawReport);
uhid::Device* d = reinterpret_cast<uhid::Device*>(ptr);
if (d) {
- d->sendReport(report.get(), size);
+ d->sendReport(report);
} else {
LOGE("Could not send report, Device* is null!");
}
diff --git a/cmds/hid/jni/com_android_commands_hid_Device.h b/cmds/hid/jni/com_android_commands_hid_Device.h
index 149456d8c10d..61a1f760697f 100644
--- a/cmds/hid/jni/com_android_commands_hid_Device.h
+++ b/cmds/hid/jni/com_android_commands_hid_Device.h
@@ -15,6 +15,7 @@
*/
#include <memory>
+#include <vector>
#include <jni.h>
@@ -38,13 +39,12 @@ private:
class Device {
public:
static Device* open(int32_t id, const char* name, int32_t vid, int32_t pid,
- std::unique_ptr<uint8_t[]> descriptor, size_t descriptorSize,
- std::unique_ptr<DeviceCallback> callback);
+ std::vector<uint8_t> descriptor, std::unique_ptr<DeviceCallback> callback);
Device(int32_t id, int fd, std::unique_ptr<DeviceCallback> callback);
~Device();
- void sendReport(uint8_t* report, size_t reportSize);
+ void sendReport(const std::vector<uint8_t>& report) const;
void close();
int handleEvents(int events);
diff --git a/cmds/incident_helper/src/main.cpp b/cmds/incident_helper/src/main.cpp
index 091410bce2dd..809a77163fb4 100644
--- a/cmds/incident_helper/src/main.cpp
+++ b/cmds/incident_helper/src/main.cpp
@@ -73,7 +73,8 @@ static TextParserBase* selectParser(int section) {
case 2006:
return new BatteryTypeParser();
default:
- return NULL;
+ // Return no op parser when no specific ones are implemented.
+ return new NoopParser();
}
}
diff --git a/cmds/incident_helper/src/parsers/PageTypeInfoParser.cpp b/cmds/incident_helper/src/parsers/PageTypeInfoParser.cpp
index 0615c74b8d64..2a89c920c119 100644
--- a/cmds/incident_helper/src/parsers/PageTypeInfoParser.cpp
+++ b/cmds/incident_helper/src/parsers/PageTypeInfoParser.cpp
@@ -75,18 +75,13 @@ PageTypeInfoParser::Parse(const int in, const int out) const
} else return BAD_VALUE;
// expect part 2 starts with "type"
if (stripPrefix(&record[2], "type")) {
- // expect the rest of part 2 has number of (pageBlockOrder + 2) parts
// An example looks like:
// header line: type 0 1 2 3 4 5 6 7 8 9 10
// record line: Unmovable 426 279 226 1 1 1 0 0 2 2 0
- // The pageBlockOrder = 10 and it's zero-indexed. so total parts
- // are 10 + 1(zero-indexed) + 1(the type part) = 12.
record_t pageCounts = parseRecord(record[2]);
- int pageCountsSize = pageBlockOrder + 2;
- if ((int)pageCounts.size() != pageCountsSize) return BAD_VALUE;
proto.write(PageTypeInfoProto::MigrateType::TYPE, pageCounts[0]);
- for (auto i=1; i<pageCountsSize; i++) {
+ for (size_t i=1; i<pageCounts.size(); i++) {
proto.write(PageTypeInfoProto::MigrateType::FREE_PAGES_COUNT, toInt(pageCounts[i]));
}
} else return BAD_VALUE;
@@ -125,4 +120,4 @@ PageTypeInfoParser::Parse(const int in, const int out) const
fprintf(stderr, "[%s]Proto size: %zu bytes\n", this->name.string(), proto.size());
return NO_ERROR;
-} \ No newline at end of file
+}
diff --git a/cmds/incident_helper/testdata/pagetypeinfo.txt b/cmds/incident_helper/testdata/pagetypeinfo.txt
index d45ddc408c0f..c65b5a1fa1e1 100644
--- a/cmds/incident_helper/testdata/pagetypeinfo.txt
+++ b/cmds/incident_helper/testdata/pagetypeinfo.txt
@@ -1,5 +1,5 @@
-Page block order: 10
-Pages per block: 1024
+Page block order: 9
+Pages per block: 512
Free pages count per migrate type at order 0 1 2 3 4 5 6 7 8 9 10
Node 0, zone DMA, type Unmovable 426 279 226 1 1 1 0 0 2 2 0
diff --git a/cmds/incident_helper/tests/PageTypeInfoParser_test.cpp b/cmds/incident_helper/tests/PageTypeInfoParser_test.cpp
index 9bad7be4a07e..5688681e45fd 100644
--- a/cmds/incident_helper/tests/PageTypeInfoParser_test.cpp
+++ b/cmds/incident_helper/tests/PageTypeInfoParser_test.cpp
@@ -54,8 +54,8 @@ TEST_F(PageTypeInfoParserTest, Success) {
PageTypeInfoParser parser;
PageTypeInfoProto expected;
- expected.set_page_block_order(10);
- expected.set_pages_per_block(1024);
+ expected.set_page_block_order(9);
+ expected.set_pages_per_block(512);
PageTypeInfoProto::MigrateType* mt1 = expected.add_migrate_types();
mt1->set_node(0);
diff --git a/cmds/incidentd/src/FdBuffer.cpp b/cmds/incidentd/src/FdBuffer.cpp
index 0885b13483c6..a8ef8311720d 100644
--- a/cmds/incidentd/src/FdBuffer.cpp
+++ b/cmds/incidentd/src/FdBuffer.cpp
@@ -71,7 +71,8 @@ status_t FdBuffer::read(int fd, int64_t timeout) {
VLOG("return event has error %s", strerror(errno));
return errno != 0 ? -errno : UNKNOWN_ERROR;
} else {
- ssize_t amt = ::read(fd, mBuffer.writeBuffer(), mBuffer.currentToWrite());
+ ssize_t amt = TEMP_FAILURE_RETRY(
+ ::read(fd, mBuffer.writeBuffer(), mBuffer.currentToWrite()));
if (amt < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
continue;
@@ -182,9 +183,9 @@ status_t FdBuffer::readProcessedDataInStream(int fd, unique_fd toFd, unique_fd f
if (cirSize != BUFFER_SIZE && pfds[0].fd != -1) {
ssize_t amt;
if (rpos >= wpos) {
- amt = ::read(fd, cirBuf + rpos, BUFFER_SIZE - rpos);
+ amt = TEMP_FAILURE_RETRY(::read(fd, cirBuf + rpos, BUFFER_SIZE - rpos));
} else {
- amt = ::read(fd, cirBuf + rpos, wpos - rpos);
+ amt = TEMP_FAILURE_RETRY(::read(fd, cirBuf + rpos, wpos - rpos));
}
if (amt < 0) {
if (!(errno == EAGAIN || errno == EWOULDBLOCK)) {
@@ -204,9 +205,9 @@ status_t FdBuffer::readProcessedDataInStream(int fd, unique_fd toFd, unique_fd f
if (cirSize > 0 && pfds[1].fd != -1) {
ssize_t amt;
if (rpos > wpos) {
- amt = ::write(toFd.get(), cirBuf + wpos, rpos - wpos);
+ amt = TEMP_FAILURE_RETRY(::write(toFd.get(), cirBuf + wpos, rpos - wpos));
} else {
- amt = ::write(toFd.get(), cirBuf + wpos, BUFFER_SIZE - wpos);
+ amt = TEMP_FAILURE_RETRY(::write(toFd.get(), cirBuf + wpos, BUFFER_SIZE - wpos));
}
if (amt < 0) {
if (!(errno == EAGAIN || errno == EWOULDBLOCK)) {
@@ -235,7 +236,8 @@ status_t FdBuffer::readProcessedDataInStream(int fd, unique_fd toFd, unique_fd f
}
// read from parsing process
- ssize_t amt = ::read(fromFd.get(), mBuffer.writeBuffer(), mBuffer.currentToWrite());
+ ssize_t amt = TEMP_FAILURE_RETRY(
+ ::read(fromFd.get(), mBuffer.writeBuffer(), mBuffer.currentToWrite()));
if (amt < 0) {
if (!(errno == EAGAIN || errno == EWOULDBLOCK)) {
VLOG("Fail to read fromFd %d: %s", fromFd.get(), strerror(errno));
diff --git a/cmds/incidentd/src/IncidentService.cpp b/cmds/incidentd/src/IncidentService.cpp
index e305b5462b77..e92cf9444e15 100644
--- a/cmds/incidentd/src/IncidentService.cpp
+++ b/cmds/incidentd/src/IncidentService.cpp
@@ -314,6 +314,19 @@ status_t IncidentService::command(FILE* in, FILE* out, FILE* err, Vector<String8
mThrottler->dump(out);
return NO_ERROR;
}
+ if (!args[0].compare(String8("section"))) {
+ int id = atoi(args[1]);
+ int idx = 0;
+ while (SECTION_LIST[idx] != NULL) {
+ const Section* section = SECTION_LIST[idx];
+ if (section->id == id) {
+ fprintf(out, "Section[%d] %s\n", id, section->name.string());
+ break;
+ }
+ idx++;
+ }
+ return NO_ERROR;
+ }
}
return cmd_help(out);
}
@@ -321,8 +334,9 @@ status_t IncidentService::command(FILE* in, FILE* out, FILE* err, Vector<String8
status_t IncidentService::cmd_help(FILE* out) {
fprintf(out, "usage: adb shell cmd incident privacy print <section_id>\n");
fprintf(out, "usage: adb shell cmd incident privacy parse <section_id> < proto.txt\n");
- fprintf(out, " Prints/parses for the section id.\n");
- fprintf(out, "\n");
+ fprintf(out, " Prints/parses for the section id.\n\n");
+ fprintf(out, "usage: adb shell cmd incident section <section_id>\n");
+ fprintf(out, " Prints section id and its name.\n\n");
fprintf(out, "usage: adb shell cmd incident throttler\n");
fprintf(out, " Prints the current throttler state\n");
return NO_ERROR;
diff --git a/cmds/incidentd/src/PrivacyBuffer.cpp b/cmds/incidentd/src/PrivacyBuffer.cpp
index d753e5e6404e..7a8ebe394d51 100644
--- a/cmds/incidentd/src/PrivacyBuffer.cpp
+++ b/cmds/incidentd/src/PrivacyBuffer.cpp
@@ -86,8 +86,8 @@ status_t PrivacyBuffer::stripField(const Privacy* parentPolicy, const PrivacySpe
// iterator will point to head of next field
size_t currentAt = mData.rp()->pos();
writeFieldOrSkip(fieldTag, skip);
- VLOG("[Depth %2d]Field %d %ss %d bytes", depth, fieldId, skip ? "skip" : "write",
- (int)(get_varint_size(fieldTag) + mData.rp()->pos() - currentAt));
+ VLOG("[Depth %2d]Field %d %ss %zu bytes", depth, fieldId, skip ? "skip" : "write",
+ get_varint_size(fieldTag) + mData.rp()->pos() - currentAt);
return NO_ERROR;
}
// current field is message type and its sub-fields have extra privacy policies
diff --git a/cmds/incidentd/src/Section.cpp b/cmds/incidentd/src/Section.cpp
index 4bbe04204a54..87799b38906c 100644
--- a/cmds/incidentd/src/Section.cpp
+++ b/cmds/incidentd/src/Section.cpp
@@ -151,11 +151,10 @@ DONE:
}
// ================================================================================
-Section::Section(int i, int64_t timeoutMs, bool userdebugAndEngOnly, bool deviceSpecific)
+Section::Section(int i, int64_t timeoutMs, bool userdebugAndEngOnly)
: id(i),
timeoutMs(timeoutMs),
- userdebugAndEngOnly(userdebugAndEngOnly),
- deviceSpecific(deviceSpecific) {}
+ userdebugAndEngOnly(userdebugAndEngOnly) {}
Section::~Section() {}
@@ -240,10 +239,10 @@ status_t MetadataSection::Execute(ReportRequestSet* requests) const {
// ================================================================================
static inline bool isSysfs(const char* filename) { return strncmp(filename, "/sys/", 5) == 0; }
-FileSection::FileSection(int id, const char* filename, const bool deviceSpecific,
- const int64_t timeoutMs)
- : Section(id, timeoutMs, false, deviceSpecific), mFilename(filename) {
- name = filename;
+FileSection::FileSection(int id, const char* filename, const int64_t timeoutMs)
+ : Section(id, timeoutMs, false), mFilename(filename) {
+ name = "file ";
+ name += filename;
mIsSysfs = isSysfs(filename);
}
@@ -254,8 +253,10 @@ status_t FileSection::Execute(ReportRequestSet* requests) const {
// add O_CLOEXEC to make sure it is closed when exec incident helper
unique_fd fd(open(mFilename, O_RDONLY | O_CLOEXEC));
if (fd.get() == -1) {
- ALOGW("FileSection '%s' failed to open file", this->name.string());
- return this->deviceSpecific ? NO_ERROR : -errno;
+ ALOGW("[%s] failed to open file", this->name.string());
+ // There may be some devices/architectures that won't have the file.
+ // Just return here without an error.
+ return NO_ERROR;
}
FdBuffer buffer;
@@ -263,13 +264,13 @@ status_t FileSection::Execute(ReportRequestSet* requests) const {
Fpipe c2pPipe;
// initiate pipes to pass data to/from incident_helper
if (!p2cPipe.init() || !c2pPipe.init()) {
- ALOGW("FileSection '%s' failed to setup pipes", this->name.string());
+ ALOGW("[%s] failed to setup pipes", this->name.string());
return -errno;
}
pid_t pid = fork_execute_incident_helper(this->id, &p2cPipe, &c2pPipe);
if (pid == -1) {
- ALOGW("FileSection '%s' failed to fork", this->name.string());
+ ALOGW("[%s] failed to fork", this->name.string());
return -errno;
}
@@ -279,7 +280,7 @@ status_t FileSection::Execute(ReportRequestSet* requests) const {
this->timeoutMs, mIsSysfs);
write_section_stats(requests->sectionStats(this->id), buffer);
if (readStatus != NO_ERROR || buffer.timedOut()) {
- ALOGW("FileSection '%s' failed to read data from incident helper: %s, timedout: %s",
+ ALOGW("[%s] failed to read data from incident helper: %s, timedout: %s",
this->name.string(), strerror(-readStatus), buffer.timedOut() ? "true" : "false");
kill_child(pid);
return readStatus;
@@ -287,20 +288,11 @@ status_t FileSection::Execute(ReportRequestSet* requests) const {
status_t ihStatus = wait_child(pid);
if (ihStatus != NO_ERROR) {
- ALOGW("FileSection '%s' abnormal child process: %s", this->name.string(),
- strerror(-ihStatus));
+ ALOGW("[%s] abnormal child process: %s", this->name.string(), strerror(-ihStatus));
return ihStatus;
}
- VLOG("FileSection '%s' wrote %zd bytes in %d ms", this->name.string(), buffer.size(),
- (int)buffer.durationMs());
- status_t err = write_report_requests(this->id, buffer, requests);
- if (err != NO_ERROR) {
- ALOGW("FileSection '%s' failed writing: %s", this->name.string(), strerror(-err));
- return err;
- }
-
- return NO_ERROR;
+ return write_report_requests(this->id, buffer, requests);
}
// ================================================================================
GZipSection::GZipSection(int id, const char* filename, ...) : Section(id) {
@@ -329,9 +321,8 @@ status_t GZipSection::Execute(ReportRequestSet* requests) const {
ALOGW("GZipSection failed to open file %s", mFilenames[index]);
index++; // look at the next file.
}
- VLOG("GZipSection is using file %s, fd=%d", mFilenames[index], fd.get());
if (fd.get() == -1) {
- ALOGW("GZipSection %s can't open all the files", this->name.string());
+ ALOGW("[%s] can't open all the files", this->name.string());
return NO_ERROR; // e.g. LAST_KMSG will reach here in user build.
}
FdBuffer buffer;
@@ -339,13 +330,13 @@ status_t GZipSection::Execute(ReportRequestSet* requests) const {
Fpipe c2pPipe;
// initiate pipes to pass data to/from gzip
if (!p2cPipe.init() || !c2pPipe.init()) {
- ALOGW("GZipSection '%s' failed to setup pipes", this->name.string());
+ ALOGW("[%s] failed to setup pipes", this->name.string());
return -errno;
}
pid_t pid = fork_execute_cmd((char* const*)GZIP, &p2cPipe, &c2pPipe);
if (pid == -1) {
- ALOGW("GZipSection '%s' failed to fork", this->name.string());
+ ALOGW("[%s] failed to fork", this->name.string());
return -errno;
}
// parent process
@@ -364,24 +355,22 @@ status_t GZipSection::Execute(ReportRequestSet* requests) const {
size_t editPos = internalBuffer->wp()->pos();
internalBuffer->wp()->move(8); // reserve 8 bytes for the varint of the data size.
size_t dataBeginAt = internalBuffer->wp()->pos();
- VLOG("GZipSection '%s' editPos=%zd, dataBeginAt=%zd", this->name.string(), editPos,
- dataBeginAt);
+ VLOG("[%s] editPos=%zu, dataBeginAt=%zu", this->name.string(), editPos, dataBeginAt);
status_t readStatus = buffer.readProcessedDataInStream(
fd.get(), std::move(p2cPipe.writeFd()), std::move(c2pPipe.readFd()), this->timeoutMs,
isSysfs(mFilenames[index]));
write_section_stats(requests->sectionStats(this->id), buffer);
if (readStatus != NO_ERROR || buffer.timedOut()) {
- ALOGW("GZipSection '%s' failed to read data from gzip: %s, timedout: %s",
- this->name.string(), strerror(-readStatus), buffer.timedOut() ? "true" : "false");
+ ALOGW("[%s] failed to read data from gzip: %s, timedout: %s", this->name.string(),
+ strerror(-readStatus), buffer.timedOut() ? "true" : "false");
kill_child(pid);
return readStatus;
}
status_t gzipStatus = wait_child(pid);
if (gzipStatus != NO_ERROR) {
- ALOGW("GZipSection '%s' abnormal child process: %s", this->name.string(),
- strerror(-gzipStatus));
+ ALOGW("[%s] abnormal child process: %s", this->name.string(), strerror(-gzipStatus));
return gzipStatus;
}
// Revisit the actual size from gzip result and edit the internal buffer accordingly.
@@ -389,15 +378,8 @@ status_t GZipSection::Execute(ReportRequestSet* requests) const {
internalBuffer->wp()->rewind()->move(editPos);
internalBuffer->writeRawVarint32(dataSize);
internalBuffer->copy(dataBeginAt, dataSize);
- VLOG("GZipSection '%s' wrote %zd bytes in %d ms, dataSize=%zd", this->name.string(),
- buffer.size(), (int)buffer.durationMs(), dataSize);
- status_t err = write_report_requests(this->id, buffer, requests);
- if (err != NO_ERROR) {
- ALOGW("GZipSection '%s' failed writing: %s", this->name.string(), strerror(-err));
- return err;
- }
- return NO_ERROR;
+ return write_report_requests(this->id, buffer, requests);
}
// ================================================================================
@@ -482,8 +464,7 @@ status_t WorkerThreadSection::Execute(ReportRequestSet* requests) const {
err = buffer.read(data->pipe.readFd().get(), this->timeoutMs);
if (err != NO_ERROR) {
// TODO: Log this error into the incident report.
- ALOGW("WorkerThreadSection '%s' reader failed with error '%s'", this->name.string(),
- strerror(-err));
+ ALOGW("[%s] reader failed with error '%s'", this->name.string(), strerror(-err));
}
// Done with the read fd. The worker thread closes the write one so
@@ -501,39 +482,25 @@ status_t WorkerThreadSection::Execute(ReportRequestSet* requests) const {
if (data->workerError != NO_ERROR) {
err = data->workerError;
// TODO: Log this error into the incident report.
- ALOGW("WorkerThreadSection '%s' worker failed with error '%s'", this->name.string(),
- strerror(-err));
+ ALOGW("[%s] worker failed with error '%s'", this->name.string(), strerror(-err));
}
}
}
write_section_stats(requests->sectionStats(this->id), buffer);
if (timedOut || buffer.timedOut()) {
- ALOGW("WorkerThreadSection '%s' timed out", this->name.string());
+ ALOGW("[%s] timed out", this->name.string());
return NO_ERROR;
}
- if (buffer.truncated()) {
- // TODO: Log this into the incident report.
- }
-
// TODO: There was an error with the command or buffering. Report that. For now
// just exit with a log messasge.
if (err != NO_ERROR) {
- ALOGW("WorkerThreadSection '%s' failed with error '%s'", this->name.string(),
- strerror(-err));
+ ALOGW("[%s] failed with error '%s'", this->name.string(), strerror(-err));
return NO_ERROR;
}
// Write the data that was collected
- VLOG("WorkerThreadSection '%s' wrote %zd bytes in %d ms", name.string(), buffer.size(),
- (int)buffer.durationMs());
- err = write_report_requests(this->id, buffer, requests);
- if (err != NO_ERROR) {
- ALOGW("WorkerThreadSection '%s' failed writing: '%s'", this->name.string(), strerror(-err));
- return err;
- }
-
- return NO_ERROR;
+ return write_report_requests(this->id, buffer, requests);
}
// ================================================================================
@@ -570,18 +537,18 @@ status_t CommandSection::Execute(ReportRequestSet* requests) const {
Fpipe ihPipe;
if (!cmdPipe.init() || !ihPipe.init()) {
- ALOGW("CommandSection '%s' failed to setup pipes", this->name.string());
+ ALOGW("[%s] failed to setup pipes", this->name.string());
return -errno;
}
pid_t cmdPid = fork_execute_cmd((char* const*)mCommand, NULL, &cmdPipe);
if (cmdPid == -1) {
- ALOGW("CommandSection '%s' failed to fork", this->name.string());
+ ALOGW("[%s] failed to fork", this->name.string());
return -errno;
}
pid_t ihPid = fork_execute_incident_helper(this->id, &cmdPipe, &ihPipe);
if (ihPid == -1) {
- ALOGW("CommandSection '%s' failed to fork", this->name.string());
+ ALOGW("[%s] failed to fork", this->name.string());
return -errno;
}
@@ -589,7 +556,7 @@ status_t CommandSection::Execute(ReportRequestSet* requests) const {
status_t readStatus = buffer.read(ihPipe.readFd().get(), this->timeoutMs);
write_section_stats(requests->sectionStats(this->id), buffer);
if (readStatus != NO_ERROR || buffer.timedOut()) {
- ALOGW("CommandSection '%s' failed to read data from incident helper: %s, timedout: %s",
+ ALOGW("[%s] failed to read data from incident helper: %s, timedout: %s",
this->name.string(), strerror(-readStatus), buffer.timedOut() ? "true" : "false");
kill_child(cmdPid);
kill_child(ihPid);
@@ -601,20 +568,13 @@ status_t CommandSection::Execute(ReportRequestSet* requests) const {
status_t cmdStatus = wait_child(cmdPid);
status_t ihStatus = wait_child(ihPid);
if (cmdStatus != NO_ERROR || ihStatus != NO_ERROR) {
- ALOGW("CommandSection '%s' abnormal child processes, return status: command: %s, incident "
+ ALOGW("[%s] abnormal child processes, return status: command: %s, incident "
"helper: %s",
this->name.string(), strerror(-cmdStatus), strerror(-ihStatus));
return cmdStatus != NO_ERROR ? cmdStatus : ihStatus;
}
- VLOG("CommandSection '%s' wrote %zd bytes in %d ms", this->name.string(), buffer.size(),
- (int)buffer.durationMs());
- status_t err = write_report_requests(this->id, buffer, requests);
- if (err != NO_ERROR) {
- ALOGW("CommandSection '%s' failed writing: %s", this->name.string(), strerror(-err));
- return err;
- }
- return NO_ERROR;
+ return write_report_requests(this->id, buffer, requests);
}
// ================================================================================
@@ -664,7 +624,7 @@ status_t DumpsysSection::BlockingCall(int pipeWriteFd) const {
map<log_id_t, log_time> LogSection::gLastLogsRetrieved;
LogSection::LogSection(int id, log_id_t logID) : WorkerThreadSection(id), mLogID(logID) {
- name += "logcat ";
+ name = "logcat ";
name += android_log_id_to_name(logID);
switch (logID) {
case LOG_ID_EVENTS:
@@ -705,7 +665,7 @@ status_t LogSection::BlockingCall(int pipeWriteFd) const {
android_logger_list_free);
if (android_logger_open(loggers.get(), mLogID) == NULL) {
- ALOGE("LogSection %s: Can't get logger.", this->name.string());
+ ALOGE("[%s] Can't get logger.", this->name.string());
return -1;
}
@@ -721,7 +681,7 @@ status_t LogSection::BlockingCall(int pipeWriteFd) const {
// err = -EAGAIN, graceful indication for ANDRODI_LOG_NONBLOCK that this is the end of data.
if (err <= 0) {
if (err != -EAGAIN) {
- ALOGW("LogSection %s: fails to read a log_msg.\n", this->name.string());
+ ALOGW("[%s] fails to read a log_msg.\n", this->name.string());
}
// dump previous logs and don't consider this error a failure.
break;
@@ -792,7 +752,7 @@ status_t LogSection::BlockingCall(int pipeWriteFd) const {
AndroidLogEntry entry;
err = android_log_processLogBuffer(&msg.entry_v1, &entry);
if (err != NO_ERROR) {
- ALOGW("LogSection %s: fails to process to an entry.\n", this->name.string());
+ ALOGW("[%s] fails to process to an entry.\n", this->name.string());
break;
}
lastTimestamp.tv_sec = entry.tv_sec;
@@ -821,7 +781,7 @@ status_t LogSection::BlockingCall(int pipeWriteFd) const {
TombstoneSection::TombstoneSection(int id, const char* type, const int64_t timeoutMs)
: WorkerThreadSection(id, timeoutMs), mType(type) {
- name += "tombstone ";
+ name = "tombstone ";
name += type;
}
@@ -876,7 +836,7 @@ status_t TombstoneSection::BlockingCall(int pipeWriteFd) const {
Fpipe dumpPipe;
if (!dumpPipe.init()) {
- ALOGW("TombstoneSection '%s' failed to setup dump pipe", this->name.string());
+ ALOGW("[%s] failed to setup dump pipe", this->name.string());
err = -errno;
break;
}
@@ -910,7 +870,7 @@ status_t TombstoneSection::BlockingCall(int pipeWriteFd) const {
// Wait on the child to avoid it becoming a zombie process.
status_t cStatus = wait_child(child);
if (err != NO_ERROR) {
- ALOGW("TombstoneSection '%s' failed to read stack dump: %d", this->name.string(), err);
+ ALOGW("[%s] failed to read stack dump: %d", this->name.string(), err);
dumpPipe.readFd().reset();
break;
}
diff --git a/cmds/incidentd/src/Section.h b/cmds/incidentd/src/Section.h
index a031a15fe7c9..302b4ef7ae34 100644
--- a/cmds/incidentd/src/Section.h
+++ b/cmds/incidentd/src/Section.h
@@ -41,11 +41,9 @@ public:
const int id;
const int64_t timeoutMs; // each section must have a timeout
const bool userdebugAndEngOnly;
- const bool deviceSpecific;
String8 name;
- Section(int id, int64_t timeoutMs = REMOTE_CALL_TIMEOUT_MS, bool userdebugAndEngOnly = false,
- bool deviceSpecific = false);
+ Section(int id, int64_t timeoutMs = REMOTE_CALL_TIMEOUT_MS, bool userdebugAndEngOnly = false);
virtual ~Section();
virtual status_t Execute(ReportRequestSet* requests) const = 0;
@@ -78,7 +76,7 @@ public:
*/
class FileSection : public Section {
public:
- FileSection(int id, const char* filename, bool deviceSpecific = false,
+ FileSection(int id, const char* filename,
int64_t timeoutMs = 5000 /* 5 seconds */);
virtual ~FileSection();
diff --git a/cmds/incidentd/src/Throttler.cpp b/cmds/incidentd/src/Throttler.cpp
index 2b790ca14176..11136ecca091 100644
--- a/cmds/incidentd/src/Throttler.cpp
+++ b/cmds/incidentd/src/Throttler.cpp
@@ -18,6 +18,7 @@
#include "Throttler.h"
+#include <inttypes.h>
#include <utils/SystemClock.h>
namespace android {
@@ -42,15 +43,15 @@ bool Throttler::shouldThrottle() {
}
void Throttler::addReportSize(size_t reportByteSize) {
- VLOG("The current request took %d bytes to dropbox", (int)reportByteSize);
+ VLOG("The current request took %zu bytes to dropbox", reportByteSize);
mAccumulatedSize += reportByteSize;
}
void Throttler::dump(FILE* out) {
- fprintf(out, "mSizeLimit=%d\n", (int)mSizeLimit);
- fprintf(out, "mAccumulatedSize=%d\n", (int)mAccumulatedSize);
- fprintf(out, "mRefractoryPeriodMs=%d\n", (int)mRefractoryPeriodMs);
- fprintf(out, "mLastRefractoryMs=%d\n", (int)mLastRefractoryMs);
+ fprintf(out, "mSizeLimit=%zu\n", mSizeLimit);
+ fprintf(out, "mAccumulatedSize=%zu\n", mAccumulatedSize);
+ fprintf(out, "mRefractoryPeriodMs=%" PRIi64 "\n", mRefractoryPeriodMs);
+ fprintf(out, "mLastRefractoryMs=%" PRIi64 "\n", mLastRefractoryMs);
}
} // namespace incidentd
diff --git a/cmds/incidentd/tests/Reporter_test.cpp b/cmds/incidentd/tests/Reporter_test.cpp
index cf107c858cca..108690844280 100644
--- a/cmds/incidentd/tests/Reporter_test.cpp
+++ b/cmds/incidentd/tests/Reporter_test.cpp
@@ -176,7 +176,7 @@ TEST_F(ReporterTest, RunReportToGivenDirectory) {
ASSERT_EQ(Reporter::REPORT_FINISHED, reporter->runReport(&size));
vector<string> results = InspectFiles();
- ASSERT_EQ((int)results.size(), 1);
+ ASSERT_EQ(results.size(), 1UL);
EXPECT_EQ(results[0],
"\n\x2"
"\b\f\n\x6"
diff --git a/cmds/incidentd/tests/Section_test.cpp b/cmds/incidentd/tests/Section_test.cpp
index 3c338b3a36c8..9b684a060286 100644
--- a/cmds/incidentd/tests/Section_test.cpp
+++ b/cmds/incidentd/tests/Section_test.cpp
@@ -144,15 +144,15 @@ TEST_F(SectionTest, FileSection) {
}
TEST_F(SectionTest, FileSectionNotExist) {
- FileSection fs1(NOOP_PARSER, "notexist", false, QUICK_TIMEOUT_MS);
- ASSERT_EQ(NAME_NOT_FOUND, fs1.Execute(&requests));
+ FileSection fs1(NOOP_PARSER, "notexist", QUICK_TIMEOUT_MS);
+ ASSERT_EQ(NO_ERROR, fs1.Execute(&requests));
- FileSection fs2(NOOP_PARSER, "notexist", true, QUICK_TIMEOUT_MS);
+ FileSection fs2(NOOP_PARSER, "notexist", QUICK_TIMEOUT_MS);
ASSERT_EQ(NO_ERROR, fs2.Execute(&requests));
}
TEST_F(SectionTest, FileSectionTimeout) {
- FileSection fs(TIMEOUT_PARSER, tf.path, false, QUICK_TIMEOUT_MS);
+ FileSection fs(TIMEOUT_PARSER, tf.path, QUICK_TIMEOUT_MS);
ASSERT_EQ(NO_ERROR, fs.Execute(&requests));
ASSERT_TRUE(requests.sectionStats(TIMEOUT_PARSER)->timed_out());
}
diff --git a/cmds/input/src/com/android/commands/input/Input.java b/cmds/input/src/com/android/commands/input/Input.java
index d3ec32076292..74edffb4738d 100644
--- a/cmds/input/src/com/android/commands/input/Input.java
+++ b/cmds/input/src/com/android/commands/input/Input.java
@@ -91,9 +91,6 @@ public class Input {
if (args.length > start) {
for (int i = start; i < args.length; i++) {
int keyCode = KeyEvent.keyCodeFromString(args[i]);
- if (keyCode == KeyEvent.KEYCODE_UNKNOWN) {
- keyCode = KeyEvent.keyCodeFromString("KEYCODE_" + args[i]);
- }
sendKeyEvent(inputSource, keyCode, longpress);
}
return;
diff --git a/cmds/statsd/Android.mk b/cmds/statsd/Android.mk
index 091268e15ad4..ba2aaad875d0 100644
--- a/cmds/statsd/Android.mk
+++ b/cmds/statsd/Android.mk
@@ -35,16 +35,16 @@ statsd_common_src := \
src/config/ConfigListener.cpp \
src/config/ConfigManager.cpp \
src/external/Perfetto.cpp \
+ src/external/Perfprofd.cpp \
src/external/StatsPuller.cpp \
src/external/StatsCompanionServicePuller.cpp \
src/external/SubsystemSleepStatePuller.cpp \
src/external/ResourceHealthManagerPuller.cpp \
src/external/ResourceThermalManagerPuller.cpp \
- src/external/StatsPullerManagerImpl.cpp \
+ src/external/StatsPullerManager.cpp \
src/external/puller_util.cpp \
src/logd/LogEvent.cpp \
src/logd/LogListener.cpp \
- src/logd/LogReader.cpp \
src/matchers/CombinationLogMatchingTracker.cpp \
src/matchers/matcher_util.cpp \
src/matchers/SimpleLogMatchingTracker.cpp \
@@ -59,7 +59,6 @@ statsd_common_src := \
src/metrics/MetricsManager.cpp \
src/metrics/metrics_manager_util.cpp \
src/packages/UidMap.cpp \
- src/perfetto/perfetto_config.proto \
src/storage/StorageManager.cpp \
src/StatsLogProcessor.cpp \
src/StatsService.cpp \
@@ -70,6 +69,10 @@ statsd_common_src := \
src/guardrail/StatsdStats.cpp \
src/socket/StatsSocketListener.cpp
+# TODO(b/110563449): Once statsd is using a blueprint file, migrate to the proper filegroups.
+statsd_common_src += \
+ ../../../../system/extras/perfprofd/binder_interface/aidl/android/os/IPerfProfd.aidl
+
statsd_common_c_includes := \
$(LOCAL_PATH)/src \
$(LOCAL_PATH)/../../libs/services/include
@@ -78,8 +81,7 @@ statsd_common_aidl_includes := \
$(LOCAL_PATH)/../../core/java
statsd_common_static_libraries := \
- libhealthhalutils \
- libplatformprotos \
+ libhealthhalutils
statsd_common_shared_libraries := \
libbase \
@@ -141,10 +143,14 @@ LOCAL_SHARED_LIBRARIES := $(statsd_common_shared_libraries) \
LOCAL_MODULE_CLASS := EXECUTABLES
-# Enable sanitizer and allow very verbose printing on eng builds
+# Enable sanitizer ONLY on eng builds.
ifeq ($(TARGET_BUILD_VARIANT),eng)
LOCAL_CLANG := true
LOCAL_SANITIZE := address
+endif
+
+# Add a flag to enable stats log printing from statsd on debug builds.
+ifneq (,$(filter userdebug eng, $(TARGET_BUILD_VARIANT)))
LOCAL_CFLAGS += \
-DVERY_VERBOSE_PRINTING
endif
@@ -187,7 +193,6 @@ LOCAL_SRC_FILES := \
tests/external/puller_util_test.cpp \
tests/indexed_priority_queue_test.cpp \
tests/LogEntryMatcher_test.cpp \
- tests/LogReader_test.cpp \
tests/LogEvent_test.cpp \
tests/MetricsManager_test.cpp \
tests/StatsLogProcessor_test.cpp \
@@ -224,7 +229,8 @@ LOCAL_SRC_FILES := \
LOCAL_STATIC_LIBRARIES := \
$(statsd_common_static_libraries) \
- libgmock
+ libgmock \
+ libplatformprotos
LOCAL_PROTOC_OPTIMIZE_TYPE := full
@@ -246,7 +252,6 @@ LOCAL_MODULE := statsdprotolite
LOCAL_SRC_FILES := \
src/stats_log.proto \
src/statsd_config.proto \
- src/perfetto/perfetto_config.proto \
src/atoms.proto
LOCAL_PROTOC_OPTIMIZE_TYPE := lite
@@ -290,7 +295,6 @@ LOCAL_PROTOC_FLAGS := \
LOCAL_SHARED_LIBRARIES := $(statsd_common_shared_libraries) \
libprotobuf-cpp-full
-
LOCAL_STATIC_JAVA_LIBRARIES := \
platformprotoslite
@@ -308,7 +312,8 @@ LOCAL_CFLAGS += -Wno-varargs
LOCAL_AIDL_INCLUDES := $(statsd_common_aidl_includes)
LOCAL_STATIC_LIBRARIES := \
- $(statsd_common_static_libraries)
+ $(statsd_common_static_libraries) \
+ libplatformprotos
LOCAL_SHARED_LIBRARIES := $(statsd_common_shared_libraries) \
libgtest_prod \
diff --git a/cmds/statsd/benchmark/metric_util.cpp b/cmds/statsd/benchmark/metric_util.cpp
index 50ed18d3e2b0..067b6eddf254 100644
--- a/cmds/statsd/benchmark/metric_util.cpp
+++ b/cmds/statsd/benchmark/metric_util.cpp
@@ -362,11 +362,12 @@ std::unique_ptr<LogEvent> CreateSyncEndEvent(
sp<StatsLogProcessor> CreateStatsLogProcessor(const long timeBaseSec, const StatsdConfig& config,
const ConfigKey& key) {
sp<UidMap> uidMap = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
- sp<StatsLogProcessor> processor = new StatsLogProcessor(
- uidMap, anomalyAlarmMonitor, periodicAlarmMonitor, timeBaseSec * NS_PER_SEC,
- [](const ConfigKey&){return true;});
+ sp<StatsLogProcessor> processor =
+ new StatsLogProcessor(uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
+ timeBaseSec * NS_PER_SEC, [](const ConfigKey&) { return true; });
processor->OnConfigUpdated(timeBaseSec * NS_PER_SEC, key, config);
return processor;
}
diff --git a/cmds/statsd/src/FieldValue.cpp b/cmds/statsd/src/FieldValue.cpp
index f150f074c52b..7b6d29b905bd 100644
--- a/cmds/statsd/src/FieldValue.cpp
+++ b/cmds/statsd/src/FieldValue.cpp
@@ -141,6 +141,9 @@ Value::Value(const Value& from) {
case FLOAT:
float_value = from.float_value;
break;
+ case DOUBLE:
+ double_value = from.double_value;
+ break;
case STRING:
str_value = from.str_value;
break;
@@ -157,6 +160,8 @@ std::string Value::toString() const {
return std::to_string(long_value) + "[L]";
case FLOAT:
return std::to_string(float_value) + "[F]";
+ case DOUBLE:
+ return std::to_string(double_value) + "[D]";
case STRING:
return str_value + "[S]";
default:
@@ -174,6 +179,8 @@ bool Value::operator==(const Value& that) const {
return long_value == that.long_value;
case FLOAT:
return float_value == that.float_value;
+ case DOUBLE:
+ return double_value == that.double_value;
case STRING:
return str_value == that.str_value;
default:
@@ -190,6 +197,8 @@ bool Value::operator!=(const Value& that) const {
return long_value != that.long_value;
case FLOAT:
return float_value != that.float_value;
+ case DOUBLE:
+ return double_value != that.double_value;
case STRING:
return str_value != that.str_value;
default:
@@ -207,6 +216,8 @@ bool Value::operator<(const Value& that) const {
return long_value < that.long_value;
case FLOAT:
return float_value < that.float_value;
+ case DOUBLE:
+ return double_value < that.double_value;
case STRING:
return str_value < that.str_value;
default:
@@ -214,6 +225,142 @@ bool Value::operator<(const Value& that) const {
}
}
+bool Value::operator>(const Value& that) const {
+ if (type != that.getType()) return type > that.getType();
+
+ switch (type) {
+ case INT:
+ return int_value > that.int_value;
+ case LONG:
+ return long_value > that.long_value;
+ case FLOAT:
+ return float_value > that.float_value;
+ case DOUBLE:
+ return double_value > that.double_value;
+ case STRING:
+ return str_value > that.str_value;
+ default:
+ return false;
+ }
+}
+
+bool Value::operator>=(const Value& that) const {
+ if (type != that.getType()) return type >= that.getType();
+
+ switch (type) {
+ case INT:
+ return int_value >= that.int_value;
+ case LONG:
+ return long_value >= that.long_value;
+ case FLOAT:
+ return float_value >= that.float_value;
+ case DOUBLE:
+ return double_value >= that.double_value;
+ case STRING:
+ return str_value >= that.str_value;
+ default:
+ return false;
+ }
+}
+
+Value Value::operator-(const Value& that) const {
+ Value v;
+ if (type != that.type) {
+ ALOGE("Can't operate on different value types, %d, %d", type, that.type);
+ return v;
+ }
+ if (type == STRING) {
+ ALOGE("Can't operate on string value type");
+ return v;
+ }
+
+ switch (type) {
+ case INT:
+ v.setInt(int_value - that.int_value);
+ break;
+ case LONG:
+ v.setLong(long_value - that.long_value);
+ break;
+ case FLOAT:
+ v.setFloat(float_value - that.float_value);
+ break;
+ case DOUBLE:
+ v.setDouble(double_value - that.double_value);
+ break;
+ default:
+ break;
+ }
+ return v;
+}
+
+Value& Value::operator=(const Value& that) {
+ type = that.type;
+ switch (type) {
+ case INT:
+ int_value = that.int_value;
+ break;
+ case LONG:
+ long_value = that.long_value;
+ break;
+ case FLOAT:
+ float_value = that.float_value;
+ break;
+ case DOUBLE:
+ double_value = that.double_value;
+ break;
+ case STRING:
+ str_value = that.str_value;
+ break;
+ default:
+ break;
+ }
+ return *this;
+}
+
+Value& Value::operator+=(const Value& that) {
+ if (type != that.type) {
+ ALOGE("Can't operate on different value types, %d, %d", type, that.type);
+ return *this;
+ }
+ if (type == STRING) {
+ ALOGE("Can't operate on string value type");
+ return *this;
+ }
+
+ switch (type) {
+ case INT:
+ int_value += that.int_value;
+ break;
+ case LONG:
+ long_value += that.long_value;
+ break;
+ case FLOAT:
+ float_value += that.float_value;
+ break;
+ case DOUBLE:
+ double_value += that.double_value;
+ break;
+ default:
+ break;
+ }
+ return *this;
+}
+
+double Value::getDouble() const {
+ switch (type) {
+ case INT:
+ return int_value;
+ case LONG:
+ return long_value;
+ case FLOAT:
+ return float_value;
+ case DOUBLE:
+ return double_value;
+ default:
+ return 0;
+ }
+}
+
bool equalDimensions(const std::vector<Matcher>& dimension_a,
const std::vector<Matcher>& dimension_b) {
bool eq = dimension_a.size() == dimension_b.size();
diff --git a/cmds/statsd/src/FieldValue.h b/cmds/statsd/src/FieldValue.h
index 02c49b99c583..b1b885ec4efa 100644
--- a/cmds/statsd/src/FieldValue.h
+++ b/cmds/statsd/src/FieldValue.h
@@ -32,7 +32,7 @@ const int32_t kLastBitMask = 0x80;
const int32_t kClearLastBitDeco = 0x7f;
const int32_t kClearAllPositionMatcherMask = 0xffff00ff;
-enum Type { UNKNOWN, INT, LONG, FLOAT, STRING };
+enum Type { UNKNOWN, INT, LONG, FLOAT, DOUBLE, STRING };
int32_t getEncodedField(int32_t pos[], int32_t depth, bool includeDepth);
@@ -212,7 +212,7 @@ public:
* the result is equal to the Matcher Field. That's a bit wise AND operation + check if 2 ints are
* equal. Nothing can beat the performance of this matching algorithm.
*
- * TODO: ADD EXAMPLE HERE.
+ * TODO(b/110561213): ADD EXAMPLE HERE.
*/
struct Matcher {
Matcher(const Field& matcher, int32_t mask) : mMatcher(matcher), mMask(mask){};
@@ -283,6 +283,11 @@ struct Value {
type = FLOAT;
}
+ Value(double v) {
+ double_value = v;
+ type = DOUBLE;
+ }
+
Value(const std::string& v) {
str_value = v;
type = STRING;
@@ -298,10 +303,21 @@ struct Value {
type = LONG;
}
+ void setFloat(float v) {
+ float_value = v;
+ type = FLOAT;
+ }
+
+ void setDouble(double v) {
+ double_value = v;
+ type = DOUBLE;
+ }
+
union {
int32_t int_value;
int64_t long_value;
float float_value;
+ double double_value;
};
std::string str_value;
@@ -313,12 +329,19 @@ struct Value {
return type;
}
+ double getDouble() const;
+
Value(const Value& from);
bool operator==(const Value& that) const;
bool operator!=(const Value& that) const;
bool operator<(const Value& that) const;
+ bool operator>(const Value& that) const;
+ bool operator>=(const Value& that) const;
+ Value operator-(const Value& that) const;
+ Value& operator+=(const Value& that);
+ Value& operator=(const Value& that);
};
/**
diff --git a/cmds/statsd/src/HashableDimensionKey.cpp b/cmds/statsd/src/HashableDimensionKey.cpp
index 71030345b0aa..af8b3af6ea61 100644
--- a/cmds/statsd/src/HashableDimensionKey.cpp
+++ b/cmds/statsd/src/HashableDimensionKey.cpp
@@ -65,8 +65,6 @@ bool filterValues(const vector<Matcher>& matcherFields, const vector<FieldValue>
for (const auto& value : values) {
for (size_t i = 0; i < matcherFields.size(); ++i) {
const auto& matcher = matcherFields[i];
- // TODO: potential optimization here to break early because all fields are naturally
- // sorted.
if (value.mField.matches(matcher)) {
output->addValue(value);
output->mutableValue(num_matches)->mField.setTag(value.mField.getTag());
@@ -196,4 +194,4 @@ bool MetricDimensionKey::operator<(const MetricDimensionKey& that) const {
} // namespace statsd
} // namespace os
-} // namespace android \ No newline at end of file
+} // namespace android
diff --git a/cmds/statsd/src/StatsLogProcessor.cpp b/cmds/statsd/src/StatsLogProcessor.cpp
index e7f1caf26932..8e02f9cb7628 100644
--- a/cmds/statsd/src/StatsLogProcessor.cpp
+++ b/cmds/statsd/src/StatsLogProcessor.cpp
@@ -72,18 +72,20 @@ const int FIELD_ID_STRINGS = 9;
#define STATS_DATA_DIR "/data/misc/stats-data"
StatsLogProcessor::StatsLogProcessor(const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerManager,
const sp<AlarmMonitor>& anomalyAlarmMonitor,
const sp<AlarmMonitor>& periodicAlarmMonitor,
const int64_t timeBaseNs,
const std::function<bool(const ConfigKey&)>& sendBroadcast)
: mUidMap(uidMap),
+ mPullerManager(pullerManager),
mAnomalyAlarmMonitor(anomalyAlarmMonitor),
mPeriodicAlarmMonitor(periodicAlarmMonitor),
mSendBroadcast(sendBroadcast),
mTimeBaseNs(timeBaseNs),
mLargestTimestampSeen(0),
mLastTimestampSeen(0) {
- mStatsPullerManager.ForceClearPullerCache();
+ mPullerManager->ForceClearPullerCache();
}
StatsLogProcessor::~StatsLogProcessor() {
@@ -152,17 +154,13 @@ void StatsLogProcessor::onIsolatedUidChangedEventLocked(const LogEvent& event) {
if (is_create) {
mUidMap->assignIsolatedUid(isolated_uid, parent_uid);
} else {
- mUidMap->removeIsolatedUid(isolated_uid, parent_uid);
+ mUidMap->removeIsolatedUid(isolated_uid);
}
} else {
ALOGE("Failed to parse uid in the isolated uid change event.");
}
}
-void StatsLogProcessor::OnLogEvent(LogEvent* event) {
- OnLogEvent(event, false);
-}
-
void StatsLogProcessor::resetConfigs() {
std::lock_guard<std::mutex> lock(mMetricsMutex);
resetConfigsLocked(getElapsedRealtimeNs());
@@ -176,7 +174,7 @@ void StatsLogProcessor::resetConfigsLocked(const int64_t timestampNs) {
resetConfigsLocked(timestampNs, configKeys);
}
-void StatsLogProcessor::OnLogEvent(LogEvent* event, bool reconnected) {
+void StatsLogProcessor::OnLogEvent(LogEvent* event) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
#ifdef VERY_VERBOSE_PRINTING
@@ -186,41 +184,6 @@ void StatsLogProcessor::OnLogEvent(LogEvent* event, bool reconnected) {
#endif
const int64_t currentTimestampNs = event->GetElapsedTimestampNs();
- if (reconnected && mLastTimestampSeen != 0) {
- // LogReader tells us the connection has just been reset. Now we need
- // to enter reconnection state to find the last CP.
- mInReconnection = true;
- }
-
- if (mInReconnection) {
- // We see the checkpoint
- if (currentTimestampNs == mLastTimestampSeen) {
- mInReconnection = false;
- // Found the CP. ignore this event, and we will start to read from next event.
- return;
- }
- if (currentTimestampNs > mLargestTimestampSeen) {
- // We see a new log but CP has not been found yet. Give up now.
- mLogLossCount++;
- mInReconnection = false;
- StatsdStats::getInstance().noteLogLost(currentTimestampNs);
- // Persist the data before we reset. Do we want this?
- WriteDataToDiskLocked(CONFIG_RESET);
- // We see fresher event before we see the checkpoint. We might have lost data.
- // The best we can do is to reset.
- resetConfigsLocked(currentTimestampNs);
- } else {
- // Still in search of the CP. Keep going.
- return;
- }
- }
-
- mLogCount++;
- mLastTimestampSeen = currentTimestampNs;
- if (mLargestTimestampSeen < currentTimestampNs) {
- mLargestTimestampSeen = currentTimestampNs;
- }
-
resetIfConfigTtlExpiredLocked(currentTimestampNs);
StatsdStats::getInstance().noteAtomLogged(
@@ -238,7 +201,7 @@ void StatsLogProcessor::OnLogEvent(LogEvent* event, bool reconnected) {
int64_t curTimeSec = getElapsedRealtimeSec();
if (curTimeSec - mLastPullerCacheClearTimeSec > StatsdStats::kPullerCacheClearIntervalSec) {
- mStatsPullerManager.ClearPullerCacheIfNecessary(curTimeSec * NS_PER_SEC);
+ mPullerManager->ClearPullerCacheIfNecessary(curTimeSec * NS_PER_SEC);
mLastPullerCacheClearTimeSec = curTimeSec;
}
@@ -266,8 +229,8 @@ void StatsLogProcessor::OnConfigUpdatedLocked(
const int64_t timestampNs, const ConfigKey& key, const StatsdConfig& config) {
VLOG("Updated configuration for key %s", key.ToString().c_str());
sp<MetricsManager> newMetricsManager =
- new MetricsManager(key, config, mTimeBaseNs, timestampNs, mUidMap,
- mAnomalyAlarmMonitor, mPeriodicAlarmMonitor);
+ new MetricsManager(key, config, mTimeBaseNs, timestampNs, mUidMap, mPullerManager,
+ mAnomalyAlarmMonitor, mPeriodicAlarmMonitor);
if (newMetricsManager->isConfigValid()) {
mUidMap->OnConfigUpdated(key);
if (newMetricsManager->shouldAddUidMapListener()) {
@@ -453,7 +416,7 @@ void StatsLogProcessor::OnConfigRemoved(const ConfigKey& key) {
mLastBroadcastTimes.erase(key);
if (mMetricsManagers.empty()) {
- mStatsPullerManager.ForceClearPullerCache();
+ mPullerManager->ForceClearPullerCache();
}
}
@@ -538,7 +501,7 @@ void StatsLogProcessor::WriteDataToDisk(const DumpReportReason dumpReportReason)
void StatsLogProcessor::informPullAlarmFired(const int64_t timestampNs) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
- mStatsPullerManager.OnAlarmFired(timestampNs);
+ mPullerManager->OnAlarmFired(timestampNs);
}
int64_t StatsLogProcessor::getLastReportTimeNs(const ConfigKey& key) {
diff --git a/cmds/statsd/src/StatsLogProcessor.h b/cmds/statsd/src/StatsLogProcessor.h
index b175b3c544b5..df80b8e6e052 100644
--- a/cmds/statsd/src/StatsLogProcessor.h
+++ b/cmds/statsd/src/StatsLogProcessor.h
@@ -18,7 +18,6 @@
#include <gtest/gtest_prod.h>
#include "config/ConfigListener.h"
-#include "logd/LogReader.h"
#include "metrics/MetricsManager.h"
#include "packages/UidMap.h"
#include "external/StatsPullerManager.h"
@@ -45,15 +44,13 @@ enum DumpReportReason {
class StatsLogProcessor : public ConfigListener {
public:
- StatsLogProcessor(const sp<UidMap>& uidMap, const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ StatsLogProcessor(const sp<UidMap>& uidMap, const sp<StatsPullerManager>& pullerManager,
+ const sp<AlarmMonitor>& anomalyAlarmMonitor,
const sp<AlarmMonitor>& subscriberTriggerAlarmMonitor,
const int64_t timeBaseNs,
const std::function<bool(const ConfigKey&)>& sendBroadcast);
virtual ~StatsLogProcessor();
- void OnLogEvent(LogEvent* event, bool reconnectionStarts);
-
- // for testing only.
void OnLogEvent(LogEvent* event);
void OnConfigUpdated(const int64_t timestampNs, const ConfigKey& key,
@@ -126,7 +123,7 @@ private:
sp<UidMap> mUidMap; // Reference to the UidMap to lookup app name and version for each uid.
- StatsPullerManager mStatsPullerManager;
+ sp<StatsPullerManager> mPullerManager; // Reference to StatsPullerManager
sp<AlarmMonitor> mAnomalyAlarmMonitor;
@@ -173,14 +170,6 @@ private:
int64_t mLastTimestampSeen = 0;
- bool mInReconnection = false;
-
- // Processed log count
- uint64_t mLogCount = 0;
-
- // Log loss detected count
- int mLogLossCount = 0;
-
long mLastPullerCacheClearTimeSec = 0;
#ifdef VERY_VERBOSE_PRINTING
diff --git a/cmds/statsd/src/StatsService.cpp b/cmds/statsd/src/StatsService.cpp
index 10c04f67ca05..9a7934576abd 100644
--- a/cmds/statsd/src/StatsService.cpp
+++ b/cmds/statsd/src/StatsService.cpp
@@ -150,25 +150,26 @@ StatsService::StatsService(const sp<Looper>& handlerLooper)
})) {
mUidMap = new UidMap();
+ mPullerManager = new StatsPullerManager();
StatsPuller::SetUidMap(mUidMap);
mConfigManager = new ConfigManager();
- mProcessor = new StatsLogProcessor(mUidMap, mAnomalyAlarmMonitor, mPeriodicAlarmMonitor,
- getElapsedRealtimeNs(), [this](const ConfigKey& key) {
- sp<IStatsCompanionService> sc = getStatsCompanionService();
- auto receiver = mConfigManager->GetConfigReceiver(key);
- if (sc == nullptr) {
- VLOG("Could not find StatsCompanionService");
- return false;
- } else if (receiver == nullptr) {
- VLOG("Statscompanion could not find a broadcast receiver for %s",
- key.ToString().c_str());
- return false;
- } else {
- sc->sendDataBroadcast(receiver, mProcessor->getLastReportTimeNs(key));
- return true;
- }
- }
- );
+ mProcessor = new StatsLogProcessor(
+ mUidMap, mPullerManager, mAnomalyAlarmMonitor, mPeriodicAlarmMonitor,
+ getElapsedRealtimeNs(), [this](const ConfigKey& key) {
+ sp<IStatsCompanionService> sc = getStatsCompanionService();
+ auto receiver = mConfigManager->GetConfigReceiver(key);
+ if (sc == nullptr) {
+ VLOG("Could not find StatsCompanionService");
+ return false;
+ } else if (receiver == nullptr) {
+ VLOG("Statscompanion could not find a broadcast receiver for %s",
+ key.ToString().c_str());
+ return false;
+ } else {
+ sc->sendDataBroadcast(receiver, mProcessor->getLastReportTimeNs(key));
+ return true;
+ }
+ });
mConfigManager->AddListener(mProcessor);
@@ -402,7 +403,7 @@ void StatsService::print_cmd_help(FILE* out) {
fprintf(out, "\n *Note: If both UID and NAME are omitted then all configs will\n");
fprintf(out, "\n be removed from memory and disk!\n");
fprintf(out, "\n");
- fprintf(out, "usage: adb shell cmd stats dump-report [UID] NAME [--proto]\n");
+ fprintf(out, "usage: adb shell cmd stats dump-report [UID] NAME [--include_current_bucket] [--proto]\n");
fprintf(out, " Dump all metric data for a configuration.\n");
fprintf(out, " UID The uid of the configuration. It is only possible to pass\n");
fprintf(out, " the UID parameter on eng builds. If UID is omitted the\n");
@@ -439,7 +440,6 @@ status_t StatsService::cmd_trigger_broadcast(FILE* out, Vector<String8>& args) {
if (argCount == 2) {
// Automatically pick the UID
uid = IPCThreadState::self()->getCallingUid();
- // TODO: What if this isn't a binder call? Should we fail?
name.assign(args[1].c_str(), args[1].size());
good = true;
} else if (argCount == 3) {
@@ -492,7 +492,6 @@ status_t StatsService::cmd_config(FILE* in, FILE* out, FILE* err, Vector<String8
if (argCount == 3) {
// Automatically pick the UID
uid = IPCThreadState::self()->getCallingUid();
- // TODO: What if this isn't a binder call? Should we fail?
name.assign(args[2].c_str(), args[2].size());
good = true;
} else if (argCount == 4) {
@@ -568,16 +567,20 @@ status_t StatsService::cmd_dump_report(FILE* out, FILE* err, const Vector<String
int argCount = args.size();
bool good = false;
bool proto = false;
+ bool includeCurrentBucket = false;
int uid;
string name;
if (!std::strcmp("--proto", args[argCount-1].c_str())) {
proto = true;
argCount -= 1;
}
+ if (!std::strcmp("--include_current_bucket", args[argCount-1].c_str())) {
+ includeCurrentBucket = true;
+ argCount -= 1;
+ }
if (argCount == 2) {
// Automatically pick the UID
uid = IPCThreadState::self()->getCallingUid();
- // TODO: What if this isn't a binder call? Should we fail?
name.assign(args[1].c_str(), args[1].size());
good = true;
} else if (argCount == 3) {
@@ -602,8 +605,7 @@ status_t StatsService::cmd_dump_report(FILE* out, FILE* err, const Vector<String
if (good) {
vector<uint8_t> data;
mProcessor->onDumpReport(ConfigKey(uid, StrToInt64(name)), getElapsedRealtimeNs(),
- false /* include_current_bucket*/, ADB_DUMP, &data);
- // TODO: print the returned StatsLogReport to file instead of printing to logcat.
+ includeCurrentBucket, ADB_DUMP, &data);
if (proto) {
for (size_t i = 0; i < data.size(); i ++) {
fprintf(out, "%c", data[i]);
@@ -711,7 +713,7 @@ status_t StatsService::cmd_log_app_breadcrumb(FILE* out, const Vector<String8>&
status_t StatsService::cmd_print_pulled_metrics(FILE* out, const Vector<String8>& args) {
int s = atoi(args[1].c_str());
vector<shared_ptr<LogEvent> > stats;
- if (mStatsPullerManager.Pull(s, getElapsedRealtimeNs(), &stats)) {
+ if (mPullerManager->Pull(s, getElapsedRealtimeNs(), &stats)) {
for (const auto& it : stats) {
fprintf(out, "Pull from %d: %s\n", s, it->ToString().c_str());
}
@@ -739,7 +741,7 @@ status_t StatsService::cmd_clear_puller_cache(FILE* out) {
VLOG("StatsService::cmd_clear_puller_cache with Pid %i, Uid %i",
ipc->getCallingPid(), ipc->getCallingUid());
if (checkCallingPermission(String16(kPermissionDump))) {
- int cleared = mStatsPullerManager.ForceClearPullerCache();
+ int cleared = mPullerManager->ForceClearPullerCache();
fprintf(out, "Puller removed %d cached data!\n", cleared);
return NO_ERROR;
} else {
@@ -870,7 +872,7 @@ Status StatsService::statsCompanionReady() {
}
VLOG("StatsService::statsCompanionReady linking to statsCompanion.");
IInterface::asBinder(statsCompanion)->linkToDeath(this);
- mStatsPullerManager.SetStatsCompanionService(statsCompanion);
+ mPullerManager->SetStatsCompanionService(statsCompanion);
mAnomalyAlarmMonitor->setStatsCompanionService(statsCompanion);
mPeriodicAlarmMonitor->setStatsCompanionService(statsCompanion);
SubscriberReporter::getInstance().setStatsCompanionService(statsCompanion);
@@ -881,8 +883,8 @@ void StatsService::Startup() {
mConfigManager->Startup();
}
-void StatsService::OnLogEvent(LogEvent* event, bool reconnectionStarts) {
- mProcessor->OnLogEvent(event, reconnectionStarts);
+void StatsService::OnLogEvent(LogEvent* event) {
+ mProcessor->OnLogEvent(event);
}
Status StatsService::getData(int64_t key, const String16& packageName, vector<uint8_t>* output) {
@@ -1007,14 +1009,14 @@ void StatsService::binderDied(const wp <IBinder>& who) {
ALOGW("statscompanion service died");
StatsdStats::getInstance().noteSystemServerRestart(getWallClockSec());
if (mProcessor != nullptr) {
- ALOGW("Reset statsd upon system server restars.");
+ ALOGW("Reset statsd upon system server restarts.");
mProcessor->WriteDataToDisk(STATSCOMPANION_DIED);
mProcessor->resetConfigs();
}
mAnomalyAlarmMonitor->setStatsCompanionService(nullptr);
mPeriodicAlarmMonitor->setStatsCompanionService(nullptr);
SubscriberReporter::getInstance().setStatsCompanionService(nullptr);
- mStatsPullerManager.SetStatsCompanionService(nullptr);
+ mPullerManager->SetStatsCompanionService(nullptr);
}
} // namespace statsd
diff --git a/cmds/statsd/src/StatsService.h b/cmds/statsd/src/StatsService.h
index b3a477645b73..613f509e915a 100644
--- a/cmds/statsd/src/StatsService.h
+++ b/cmds/statsd/src/StatsService.h
@@ -22,6 +22,7 @@
#include "anomaly/AlarmMonitor.h"
#include "config/ConfigManager.h"
#include "external/StatsPullerManager.h"
+#include "logd/LogListener.h"
#include "packages/UidMap.h"
#include "statscompanion_util.h"
@@ -49,7 +50,6 @@ public:
virtual ~StatsService();
/** The anomaly alarm registered with AlarmManager won't be updated by less than this. */
- // TODO: Consider making this configurable. And choose a good number.
const uint32_t MIN_DIFF_TO_UPDATE_REGISTERED_ALARM_SECS = 5;
virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
@@ -76,7 +76,7 @@ public:
/**
* Called by LogReader when there's a log event to process.
*/
- virtual void OnLogEvent(LogEvent* event, bool reconnectionStarts);
+ virtual void OnLogEvent(LogEvent* event);
/**
* Binder call for clients to request data for this configuration key.
@@ -246,9 +246,9 @@ private:
sp<UidMap> mUidMap;
/**
- * Fetches external metrics.
+ * Fetches external metrics
*/
- StatsPullerManager mStatsPullerManager;
+ sp<StatsPullerManager> mPullerManager;
/**
* Tracks the configurations that have been passed to statsd.
diff --git a/cmds/statsd/src/anomaly/AlarmMonitor.cpp b/cmds/statsd/src/anomaly/AlarmMonitor.cpp
index 78f0c2b09537..bc36dadacddb 100644
--- a/cmds/statsd/src/anomaly/AlarmMonitor.cpp
+++ b/cmds/statsd/src/anomaly/AlarmMonitor.cpp
@@ -60,7 +60,7 @@ void AlarmMonitor::add(sp<const InternalAlarm> alarm) {
ALOGW("Asked to add a 0-time alarm.");
return;
}
- // TODO: Ensure that refractory period is respected.
+ // TODO(b/110563466): Ensure that refractory period is respected.
VLOG("Adding alarm with time %u", alarm->timestampSec);
mPq.push(alarm);
if (mRegisteredAlarmTimeSec < 1 ||
diff --git a/cmds/statsd/src/anomaly/AnomalyTracker.cpp b/cmds/statsd/src/anomaly/AnomalyTracker.cpp
index f32efee56d64..ee111cddcfd7 100644
--- a/cmds/statsd/src/anomaly/AnomalyTracker.cpp
+++ b/cmds/statsd/src/anomaly/AnomalyTracker.cpp
@@ -208,7 +208,8 @@ bool AnomalyTracker::detectAnomaly(const int64_t& currentBucketNum,
}
void AnomalyTracker::declareAnomaly(const int64_t& timestampNs, const MetricDimensionKey& key) {
- // TODO: Why receive timestamp? RefractoryPeriod should always be based on real time right now.
+ // TODO(b/110563466): Why receive timestamp? RefractoryPeriod should always be based on
+ // real time right now.
if (isInRefractoryPeriod(timestampNs, key)) {
VLOG("Skipping anomaly declaration since within refractory period");
return;
@@ -216,7 +217,8 @@ void AnomalyTracker::declareAnomaly(const int64_t& timestampNs, const MetricDime
if (mAlert.has_refractory_period_secs()) {
mRefractoryPeriodEndsSec[key] = ((timestampNs + NS_PER_SEC - 1) / NS_PER_SEC) // round up
+ mAlert.refractory_period_secs();
- // TODO: If we had access to the bucket_size_millis, consider calling resetStorage()
+ // TODO(b/110563466): If we had access to the bucket_size_millis, consider
+ // calling resetStorage()
// if (mAlert.refractory_period_secs() > mNumOfPastBuckets * bucketSizeNs) {resetStorage();}
}
@@ -230,7 +232,7 @@ void AnomalyTracker::declareAnomaly(const int64_t& timestampNs, const MetricDime
StatsdStats::getInstance().noteAnomalyDeclared(mConfigKey, mAlert.id());
- // TODO: This should also take in the const MetricDimensionKey& key?
+ // TODO(b/110564268): This should also take in the const MetricDimensionKey& key?
android::util::stats_write(android::util::ANOMALY_DETECTED, mConfigKey.GetUid(),
mConfigKey.GetId(), mAlert.id());
}
diff --git a/cmds/statsd/src/anomaly/subscriber_util.cpp b/cmds/statsd/src/anomaly/subscriber_util.cpp
index ee9e9c01a60a..9d37cdb2d4d7 100644
--- a/cmds/statsd/src/anomaly/subscriber_util.cpp
+++ b/cmds/statsd/src/anomaly/subscriber_util.cpp
@@ -22,6 +22,7 @@
#include <binder/IServiceManager.h>
#include "external/Perfetto.h"
+#include "external/Perfprofd.h"
#include "frameworks/base/libs/incident/proto/android/os/header.pb.h"
#include "subscriber/IncidentdReporter.h"
#include "subscriber/SubscriberReporter.h"
@@ -64,6 +65,12 @@ void triggerSubscribers(const int64_t rule_id,
SubscriberReporter::getInstance().alertBroadcastSubscriber(configKey, subscription,
dimensionKey);
break;
+ case Subscription::SubscriberInformationCase::kPerfprofdDetails:
+ if (!CollectPerfprofdTraceAndUploadToDropbox(subscription.perfprofd_details(),
+ rule_id, configKey)) {
+ ALOGW("Failed to generate perfprofd traces.");
+ }
+ break;
default:
break;
}
diff --git a/cmds/statsd/src/atoms.proto b/cmds/statsd/src/atoms.proto
index ab9c7e81a88e..97891b0929b4 100644
--- a/cmds/statsd/src/atoms.proto
+++ b/cmds/statsd/src/atoms.proto
@@ -48,7 +48,7 @@ message Atom {
oneof pushed {
// For StatsLog reasons, 1 is illegal and will not work. Must start at 2.
BleScanStateChanged ble_scan_state_changed = 2;
- // 3 is available for use
+ ProcessStateChanged process_state_changed = 3;
BleScanResultReceived ble_scan_result_received = 4;
SensorStateChanged sensor_state_changed = 5;
GpsScanStateChanged gps_scan_state_changed = 6;
@@ -59,7 +59,12 @@ message Atom {
LongPartialWakelockStateChanged long_partial_wakelock_state_changed = 11;
MobileRadioPowerStateChanged mobile_radio_power_state_changed = 12;
WifiRadioPowerStateChanged wifi_radio_power_state_changed = 13;
- // 14 - 19 are available
+ ActivityManagerSleepStateChanged activity_manager_sleep_state_changed = 14;
+ MemoryFactorStateChanged memory_factor_state_changed = 15;
+ ExcessiveCpuUsageReported excessive_cpu_usage_reported = 16;
+ CachedKillReported cached_kill_reported = 17;
+ ProcessMemoryStatReported process_memory_stat_reported = 18;
+ // 19 is available
BatterySaverModeStateChanged battery_saver_mode_state_changed = 20;
DeviceIdleModeStateChanged device_idle_mode_state_changed = 21;
DeviceIdlingModeStateChanged device_idling_mode_state_changed = 22;
@@ -121,12 +126,13 @@ message Atom {
ANROccurred anr_occurred = 79;
WTFOccurred wtf_occurred = 80;
LowMemReported low_mem_reported = 81;
-
-
+ GenericAtom generic_atom = 82;
+ KeyValuePairsAtom key_value_pairs_atom = 83;
+ VibratorStateChanged vibrator_state_changed = 84;
}
// Pulled events will start at field 10000.
- // Next: 10022
+ // Next: 10024
oneof pulled {
WifiBytesTransfer wifi_bytes_transfer = 10000;
WifiBytesTransferByFgBg wifi_bytes_transfer_by_fg_bg = 10001;
@@ -150,6 +156,8 @@ message Atom {
RemainingBatteryCapacity remaining_battery_capacity = 10019;
FullBatteryCapacity full_battery_capacity = 10020;
Temperature temperature = 10021;
+ BinderCalls binder_calls = 10022;
+ BinderCallsExceptions binder_calls_exceptions = 10023;
}
// DO NOT USE field numbers above 100,000 in AOSP. Field numbers above
@@ -171,6 +179,20 @@ message AttributionNode {
optional string tag = 2;
}
+message KeyValuePair {
+ optional int32 key = 1;
+ oneof value {
+ int64 value_int = 2;
+ string value_str = 3;
+ float value_float = 4;
+ }
+}
+
+message KeyValuePairsAtom {
+ optional int32 uid = 1;
+ repeated KeyValuePair pairs = 2;
+}
+
/*
* *****************************************************************************
* Below are all of the individual atoms that are logged by Android via statsd.
@@ -209,7 +231,8 @@ message ScreenStateChanged {
}
/**
- * Logs that the state of a process state, as per the activity manager, has changed.
+ * Logs that the process state of the uid, as determined by ActivityManager
+ * (i.e. the highest process state of that uid's processes) has changed.
*
* Logged from:
* frameworks/base/services/core/java/com/android/server/am/BatteryStatsService.java
@@ -222,6 +245,112 @@ message UidProcessStateChanged {
}
/**
+ * Logs process state change of a process, as per the activity manager.
+ *
+ * Logged from:
+ * frameworks/base/services/core/java/com/android/server/am/ProcessRecord.java
+ */
+message ProcessStateChanged {
+ optional int32 uid = 1;
+ optional string process_name = 2;
+ optional string package_name = 3;
+ // TODO: remove this when validation is done
+ optional int64 version = 5;
+ // The state, from frameworks/base/core/proto/android/app/enums.proto.
+ optional android.app.ProcessStateEnum state = 4;
+}
+
+/**
+ * Logs when ActivityManagerService sleep state is changed.
+ *
+ * Logged from:
+ * frameworks/base/services/core/java/com/android/server/am/ActivityTaskManagerService.java
+ */
+message ActivityManagerSleepStateChanged {
+ // TODO: import frameworks proto
+ enum State {
+ UNKNOWN = 0;
+ ASLEEP = 1;
+ AWAKE = 2;
+ }
+ optional State state = 1 [(stateFieldOption).option = EXCLUSIVE];
+}
+
+/**
+ * Logs when system memory state changes.
+ *
+ * Logged from:
+ * frameworks/base/services/core/java/com/android/server/am/ActivityManagerService.java
+ */
+message MemoryFactorStateChanged {
+ // TODO: import frameworks proto
+ enum State {
+ MEMORY_UNKNOWN = 0;
+ NORMAL = 1; // normal.
+ MODERATE = 2; // moderate memory pressure.
+ LOW = 3; // low memory.
+ CRITICAL = 4; // critical memory.
+
+ }
+ optional State factor = 1 [(stateFieldOption).option = EXCLUSIVE];
+}
+
+/**
+ * Logs when app is using too much cpu, according to ActivityManagerService.
+ *
+ * Logged from:
+ * frameworks/base/services/core/java/com/android/server/am/ActivityManagerService.java
+ */
+message ExcessiveCpuUsageReported {
+ optional int32 uid = 1;
+ optional string process_name = 2;
+ optional string package_name = 3;
+ // package version. TODO: remove this when validation is done
+ optional int64 version = 4;
+}
+
+/**
+ * Logs when a cached process is killed, along with its pss.
+ *
+ * Logged from:
+ * frameworks/base/services/core/java/com/android/server/am/ActivityManagerService.java
+ */
+message CachedKillReported {
+ optional int32 uid = 1;
+ optional string process_name = 2;
+ optional string package_name = 3;
+ // TODO: remove this when validation is done
+ optional int64 version = 5;
+ optional int64 pss = 4;
+}
+
+/**
+ * Logs when memory stats of a process is reported.
+ *
+ * Logged from:
+ * frameworks/base/services/core/java/com/android/server/am/ProcessRecord.java
+ */
+message ProcessMemoryStatReported {
+ optional int32 uid = 1;
+ optional string process_name = 2;
+ optional string package_name = 3;
+ //TODO: remove this when validation is done
+ optional int64 version = 9;
+ optional int64 pss = 4;
+ optional int64 uss = 5;
+ optional int64 rss = 6;
+ enum Type {
+ ADD_PSS_INTERNAL_SINGLE = 0;
+ ADD_PSS_INTERNAL_ALL_MEM = 1;
+ ADD_PSS_INTERNAL_ALL_POLL = 2;
+ ADD_PSS_EXTERNAL = 3;
+ ADD_PSS_EXTERNAL_SLOW = 4;
+ }
+ optional Type type = 7;
+ optional int64 duration = 8;
+}
+
+/**
* Logs that a process started, finished, crashed, or ANRed.
*
* Logged from:
@@ -579,7 +708,7 @@ message WakeupAlarmOccurred {
* Changing from LOW to MEDIUM or HIGH can be considered the app waking the mobile radio.
*
* Logged from:
- * frameworks/base/core/java/com/android/internal/os/BatteryStatsImpl.java
+ * frameworks/base/services/core/java/com/android/server/NetworkManagementService.java
*/
message MobileRadioPowerStateChanged {
repeated AttributionNode attribution_node = 1;
@@ -593,7 +722,7 @@ message MobileRadioPowerStateChanged {
* Changing from LOW to MEDIUM or HIGH can be considered the app waking the wifi radio.
*
* Logged from:
- * frameworks/base/core/java/com/android/internal/os/BatteryStatsImpl.java
+ * frameworks/base/services/core/java/com/android/server/NetworkManagementService.java
*/
message WifiRadioPowerStateChanged {
repeated AttributionNode attribution_node = 1;
@@ -1282,6 +1411,25 @@ message ANROccurred {
optional ForegroundState foreground_state = 6;
}
+/**
+ * Logs when the vibrator state changes.
+ * Logged from:
+ * frameworks/base/services/core/java/com/android/server/VibratorService.java
+ */
+message VibratorStateChanged {
+ repeated AttributionNode attribution_node = 1;
+
+ enum State {
+ OFF = 0;
+ ON = 1;
+ }
+ optional State state = 2;
+
+ // Duration (in milliseconds) requested to keep the vibrator on.
+ // Only applicable for State == ON.
+ optional int64 duration_millis = 3;
+}
+
/*
* Allows other apps to push events into statsd.
* Logged from:
@@ -1484,6 +1632,7 @@ message ForegroundServiceStateChanged {
message IsolatedUidChanged {
// The host UID. Generally, we should attribute metrics from the isolated uid to the host uid.
// NOTE: DO NOT annotate uid field in this atom. This atom is specially handled in statsd.
+ // This field is ignored when event == REMOVED.
optional int32 parent_uid = 1;
optional int32 isolated_uid = 2;
@@ -1613,6 +1762,17 @@ message AppDied {
optional uint64 timestamp_millis = 1 [(stateFieldOption).option = EXCLUSIVE];
}
+/**
+ * An atom for generic metrics logging. Available from Android Q.
+ */
+message GenericAtom {
+ // The uid of the application that sent this custom atom.
+ optional int32 uid = 1 [(is_uid) = true];
+
+ // An event_id indicates the type of event.
+ optional int32 event_id = 2;
+}
+
//////////////////////////////////////////////////////////////////////
// Pulled atoms below this line //
//////////////////////////////////////////////////////////////////////
@@ -1974,3 +2134,71 @@ message Temperature {
// Temperature in tenths of a degree C.
optional int32 temperature_dC = 3;
}
+
+/**
+ * Pulls the statistics of calls to Binder.
+ *
+ * Binder stats are cumulative from boot unless somebody reset the data using
+ * > adb shell dumpsys binder_calls_stats --reset
+ *
+ * Next tag: 14
+ */
+message BinderCalls {
+ optional int32 uid = 1 [(is_uid) = true];
+ // Fully qualified class name of the API call.
+ //
+ // This is a system server class name.
+ //
+ // TODO(gaillard): figure out if binder call stats includes data from isolated uids, if a uid
+ // gets recycled and we have isolated uids, we might attribute the data incorrectly.
+ // TODO(gaillard): there is a high dimensions cardinality, figure out if we should drop the less
+ // commonly used APIs.
+ optional string service_class_name = 2;
+ // Method name of the API call. It can also be a transaction code if we cannot
+ // resolve it to a name. See Binder#getTransactionName.
+ //
+ // This is a system server method name.
+ optional string service_method_name = 3;
+ // Total number of API calls.
+ optional int64 call_count = 4;
+ // True if the screen was interactive PowerManager#isInteractive at the end of the call.
+ optional bool screen_interactive = 13;
+ // Total number of API calls we have data recorded for. If we collected data for all the calls,
+ // call_count will be equal to recorded_call_count.
+ //
+ // If recorded_call_count is different than call_count, it means data collection has been
+ // sampled. All the fields below will be sampled in this case.
+ optional int64 recorded_call_count = 12;
+ // Number of exceptions thrown by the API.
+ optional int64 recorded_exception_count = 5;
+ // Total latency of all API calls.
+ // Average can be computed using total_latency_micros / recorded_call_count.
+ optional int64 recorded_total_latency_micros = 6;
+ // Maximum latency of one API call.
+ optional int64 recorded_max_latency_micros = 7;
+ // Total CPU usage of all API calls.
+ // Average can be computed using total_cpu_micros / recorded_call_count.
+ // Total can be computed using total_cpu_micros / recorded_call_count * call_count.
+ optional int64 recorded_total_cpu_micros = 8;
+ // Maximum CPU usage of one API call.
+ optional int64 recorded_max_cpu_micros = 9;
+ // Maximum parcel reply size of one API call.
+ optional int64 recorded_max_reply_size_bytes = 10;
+ // Maximum parcel request size of one API call.
+ optional int64 recorded_max_request_size_bytes = 11;
+}
+
+/**
+ * Pulls the statistics of exceptions during calls to Binder.
+ *
+ * Binder stats are cumulative from boot unless somebody reset the data using
+ * > adb shell dumpsys binder_calls_stats --reset
+ */
+message BinderCallsExceptions {
+ // Exception class name, e.g. java.lang.IllegalArgumentException.
+ //
+ // This is an exception class name thrown by the system server.
+ optional string exception_class_name = 1;
+ // Total number of exceptions.
+ optional int64 exception_count = 2;
+}
diff --git a/cmds/statsd/src/condition/condition_util.cpp b/cmds/statsd/src/condition/condition_util.cpp
index 691356b5edc6..35e03e45c785 100644
--- a/cmds/statsd/src/condition/condition_util.cpp
+++ b/cmds/statsd/src/condition/condition_util.cpp
@@ -76,9 +76,9 @@ ConditionState evaluateCombinationCondition(const std::vector<int>& children,
break;
}
case LogicalOperation::NOT:
- newCondition = (conditionCache[children[0]] == ConditionState::kFalse)
- ? ConditionState::kTrue
- : ConditionState::kFalse;
+ newCondition = children.empty() ? ConditionState::kUnknown :
+ ((conditionCache[children[0]] == ConditionState::kFalse) ?
+ ConditionState::kTrue : ConditionState::kFalse);
break;
case LogicalOperation::NAND:
newCondition = hasFalse ? ConditionState::kTrue : ConditionState::kFalse;
diff --git a/cmds/statsd/src/config/ConfigManager.h b/cmds/statsd/src/config/ConfigManager.h
index 611c34250a38..122e669057b0 100644
--- a/cmds/statsd/src/config/ConfigManager.h
+++ b/cmds/statsd/src/config/ConfigManager.h
@@ -31,9 +31,6 @@ namespace android {
namespace os {
namespace statsd {
-// Util function to build a hard coded config with test metrics.
-StatsdConfig build_fake_config();
-
/**
* Keeps track of which configurations have been set from various sources.
*/
diff --git a/cmds/statsd/src/external/Perfetto.cpp b/cmds/statsd/src/external/Perfetto.cpp
index 05544837b752..e44351b39769 100644
--- a/cmds/statsd/src/external/Perfetto.cpp
+++ b/cmds/statsd/src/external/Perfetto.cpp
@@ -113,7 +113,7 @@ bool CollectPerfettoTraceAndUploadToDropbox(const PerfettoDetails& config,
return false;
}
- std::string cfgProto = config.trace_config().SerializeAsString();
+ const std::string& cfgProto = config.trace_config();
size_t bytesWritten = fwrite(cfgProto.data(), 1, cfgProto.size(), writePipeStream);
fclose(writePipeStream);
if (bytesWritten != cfgProto.size() || cfgProto.size() == 0) {
diff --git a/cmds/statsd/src/external/Perfprofd.cpp b/cmds/statsd/src/external/Perfprofd.cpp
new file mode 100644
index 000000000000..1678f104a07a
--- /dev/null
+++ b/cmds/statsd/src/external/Perfprofd.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Perfprofd.h"
+
+#define DEBUG false // STOPSHIP if true
+#include "config/ConfigKey.h"
+#include "Log.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <string>
+
+#include <binder/IServiceManager.h>
+
+#include "frameworks/base/cmds/statsd/src/statsd_config.pb.h" // Alert
+
+#include "android/os/IPerfProfd.h"
+
+namespace android {
+namespace os {
+namespace statsd {
+
+bool CollectPerfprofdTraceAndUploadToDropbox(const PerfprofdDetails& config,
+ int64_t alert_id,
+ const ConfigKey& configKey) {
+ VLOG("Starting trace collection through perfprofd");
+
+ if (!config.has_perfprofd_config()) {
+ ALOGE("The perfprofd trace config is empty, aborting");
+ return false;
+ }
+
+ sp<IPerfProfd> service = interface_cast<IPerfProfd>(
+ defaultServiceManager()->getService(android::String16("perfprofd")));
+ if (service == NULL) {
+ ALOGE("Could not find perfprofd service");
+ return false;
+ }
+
+ auto* data = reinterpret_cast<const uint8_t*>(config.perfprofd_config().data());
+ std::vector<uint8_t> proto_serialized(data, data + config.perfprofd_config().size());
+
+ // TODO: alert-id etc?
+
+ binder::Status status = service->startProfilingProtobuf(proto_serialized);
+ if (status.isOk()) {
+ return true;
+ }
+
+ ALOGE("Error starting perfprofd profiling: %s", status.toString8().c_str());
+ return false;
+}
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/cmds/statsd/src/external/Perfprofd.h b/cmds/statsd/src/external/Perfprofd.h
new file mode 100644
index 000000000000..b93fdf8e1cb2
--- /dev/null
+++ b/cmds/statsd/src/external/Perfprofd.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <inttypes.h>
+
+namespace android {
+namespace os {
+namespace statsd {
+
+class ConfigKey;
+class PerfprofdDetails; // Declared in statsd_config.pb.h
+
+// Starts the collection of a Perfprofd trace with the given |config|.
+// The trace is uploaded to Dropbox by the perfprofd service once done.
+// This method returns immediately after passing the config and does NOT wait
+// for the full duration of the trace.
+bool CollectPerfprofdTraceAndUploadToDropbox(const PerfprofdDetails& config,
+ int64_t alert_id,
+ const ConfigKey& configKey);
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/cmds/statsd/src/external/ResourceHealthManagerPuller.cpp b/cmds/statsd/src/external/ResourceHealthManagerPuller.cpp
index 3741202763b3..ae97d7a2fc15 100644
--- a/cmds/statsd/src/external/ResourceHealthManagerPuller.cpp
+++ b/cmds/statsd/src/external/ResourceHealthManagerPuller.cpp
@@ -54,7 +54,7 @@ bool getHealthHal() {
ResourceHealthManagerPuller::ResourceHealthManagerPuller(int tagId) : StatsPuller(tagId) {
}
-// TODO: add other health atoms (eg. Temperature).
+// TODO(b/110565992): add other health atoms (eg. Temperature).
bool ResourceHealthManagerPuller::PullInternal(vector<shared_ptr<LogEvent>>* data) {
if (!getHealthHal()) {
ALOGE("Health Hal not loaded");
diff --git a/cmds/statsd/src/external/StatsPuller.cpp b/cmds/statsd/src/external/StatsPuller.cpp
index b29e979b5236..436a8801896f 100644
--- a/cmds/statsd/src/external/StatsPuller.cpp
+++ b/cmds/statsd/src/external/StatsPuller.cpp
@@ -18,10 +18,10 @@
#include "Log.h"
#include "StatsPuller.h"
+#include "StatsPullerManager.h"
#include "guardrail/StatsdStats.h"
#include "puller_util.h"
#include "stats_log_util.h"
-#include "StatsPullerManagerImpl.h"
namespace android {
namespace os {
@@ -35,7 +35,7 @@ void StatsPuller::SetUidMap(const sp<UidMap>& uidMap) { mUidMap = uidMap; }
// ValueMetric has a minimum bucket size of 10min so that we don't pull too frequently
StatsPuller::StatsPuller(const int tagId)
: mTagId(tagId) {
- mCoolDownNs = StatsPullerManagerImpl::kAllPullAtomInfo.find(tagId)->second.coolDownNs;
+ mCoolDownNs = StatsPullerManager::kAllPullAtomInfo.find(tagId)->second.coolDownNs;
VLOG("Puller for tag %d created. Cooldown set to %lld", mTagId, (long long)mCoolDownNs);
}
diff --git a/cmds/statsd/src/external/StatsPuller.h b/cmds/statsd/src/external/StatsPuller.h
index caac677ee215..22cb2f5c2175 100644
--- a/cmds/statsd/src/external/StatsPuller.h
+++ b/cmds/statsd/src/external/StatsPuller.h
@@ -37,6 +37,8 @@ public:
virtual ~StatsPuller() {}
+ // Pulls the data. The returned data will have elapsedTimeNs set as timeNs
+ // and will have wallClockTimeNs set as current wall clock time.
bool Pull(const int64_t timeNs, std::vector<std::shared_ptr<LogEvent>>* data);
// Clear cache immediately
diff --git a/cmds/statsd/src/external/StatsPullerManagerImpl.cpp b/cmds/statsd/src/external/StatsPullerManager.cpp
index c020f9c12b87..e6e84550cf5f 100644
--- a/cmds/statsd/src/external/StatsPullerManagerImpl.cpp
+++ b/cmds/statsd/src/external/StatsPullerManager.cpp
@@ -29,7 +29,7 @@
#include "ResourceHealthManagerPuller.h"
#include "ResourceThermalManagerPuller.h"
#include "StatsCompanionServicePuller.h"
-#include "StatsPullerManagerImpl.h"
+#include "StatsPullerManager.h"
#include "SubsystemSleepStatePuller.h"
#include "statslog.h"
@@ -49,7 +49,7 @@ namespace statsd {
// Values smaller than this may require to update the alarm.
const int64_t NO_ALARM_UPDATE = INT64_MAX;
-const std::map<int, PullAtomInfo> StatsPullerManagerImpl::kAllPullAtomInfo = {
+const std::map<int, PullAtomInfo> StatsPullerManager::kAllPullAtomInfo = {
// wifi_bytes_transfer
{android::util::WIFI_BYTES_TRANSFER,
{{2, 3, 4, 5},
@@ -171,12 +171,25 @@ const std::map<int, PullAtomInfo> StatsPullerManagerImpl::kAllPullAtomInfo = {
1 * NS_PER_SEC,
new StatsCompanionServicePuller(android::util::PROCESS_MEMORY_STATE)}},
// temperature
- {android::util::TEMPERATURE, {{}, {}, 1, new ResourceThermalManagerPuller()}}};
+ {android::util::TEMPERATURE, {{}, {}, 1, new ResourceThermalManagerPuller()}},
+ // binder_calls
+ {android::util::BINDER_CALLS,
+ {{4, 5, 6, 8, 12},
+ {2, 3, 7, 9, 10, 11, 13},
+ 1 * NS_PER_SEC,
+ new StatsCompanionServicePuller(android::util::BINDER_CALLS)}},
+ // binder_calls_exceptions
+ {android::util::BINDER_CALLS_EXCEPTIONS,
+ {{},
+ {},
+ 1 * NS_PER_SEC,
+ new StatsCompanionServicePuller(android::util::BINDER_CALLS_EXCEPTIONS)}}
+ };
-StatsPullerManagerImpl::StatsPullerManagerImpl() : mNextPullTimeNs(NO_ALARM_UPDATE) {
+StatsPullerManager::StatsPullerManager() : mNextPullTimeNs(NO_ALARM_UPDATE) {
}
-bool StatsPullerManagerImpl::Pull(const int tagId, const int64_t timeNs,
+bool StatsPullerManager::Pull(const int tagId, const int64_t timeNs,
vector<shared_ptr<LogEvent>>* data) {
VLOG("Initiating pulling %d", tagId);
@@ -190,16 +203,11 @@ bool StatsPullerManagerImpl::Pull(const int tagId, const int64_t timeNs,
}
}
-StatsPullerManagerImpl& StatsPullerManagerImpl::GetInstance() {
- static StatsPullerManagerImpl instance;
- return instance;
-}
-
-bool StatsPullerManagerImpl::PullerForMatcherExists(int tagId) const {
+bool StatsPullerManager::PullerForMatcherExists(int tagId) const {
return kAllPullAtomInfo.find(tagId) != kAllPullAtomInfo.end();
}
-void StatsPullerManagerImpl::updateAlarmLocked() {
+void StatsPullerManager::updateAlarmLocked() {
if (mNextPullTimeNs == NO_ALARM_UPDATE) {
VLOG("No need to set alarms. Skipping");
return;
@@ -214,7 +222,7 @@ void StatsPullerManagerImpl::updateAlarmLocked() {
return;
}
-void StatsPullerManagerImpl::SetStatsCompanionService(
+void StatsPullerManager::SetStatsCompanionService(
sp<IStatsCompanionService> statsCompanionService) {
AutoMutex _l(mLock);
sp<IStatsCompanionService> tmpForLock = mStatsCompanionService;
@@ -227,7 +235,7 @@ void StatsPullerManagerImpl::SetStatsCompanionService(
}
}
-void StatsPullerManagerImpl::RegisterReceiver(int tagId, wp<PullDataReceiver> receiver,
+void StatsPullerManager::RegisterReceiver(int tagId, wp<PullDataReceiver> receiver,
int64_t nextPullTimeNs, int64_t intervalNs) {
AutoMutex _l(mLock);
auto& receivers = mReceivers[tagId];
@@ -262,7 +270,7 @@ void StatsPullerManagerImpl::RegisterReceiver(int tagId, wp<PullDataReceiver> re
VLOG("Puller for tagId %d registered of %d", tagId, (int)receivers.size());
}
-void StatsPullerManagerImpl::UnRegisterReceiver(int tagId, wp<PullDataReceiver> receiver) {
+void StatsPullerManager::UnRegisterReceiver(int tagId, wp<PullDataReceiver> receiver) {
AutoMutex _l(mLock);
if (mReceivers.find(tagId) == mReceivers.end()) {
VLOG("Unknown pull code or no receivers: %d", tagId);
@@ -278,7 +286,7 @@ void StatsPullerManagerImpl::UnRegisterReceiver(int tagId, wp<PullDataReceiver>
}
}
-void StatsPullerManagerImpl::OnAlarmFired(const int64_t currentTimeNs) {
+void StatsPullerManager::OnAlarmFired(const int64_t currentTimeNs) {
AutoMutex _l(mLock);
int64_t minNextPullTimeNs = NO_ALARM_UPDATE;
@@ -331,7 +339,7 @@ void StatsPullerManagerImpl::OnAlarmFired(const int64_t currentTimeNs) {
updateAlarmLocked();
}
-int StatsPullerManagerImpl::ForceClearPullerCache() {
+int StatsPullerManager::ForceClearPullerCache() {
int totalCleared = 0;
for (const auto& pulledAtom : kAllPullAtomInfo) {
totalCleared += pulledAtom.second.puller->ForceClearCache();
@@ -339,7 +347,7 @@ int StatsPullerManagerImpl::ForceClearPullerCache() {
return totalCleared;
}
-int StatsPullerManagerImpl::ClearPullerCacheIfNecessary(int64_t timestampNs) {
+int StatsPullerManager::ClearPullerCacheIfNecessary(int64_t timestampNs) {
int totalCleared = 0;
for (const auto& pulledAtom : kAllPullAtomInfo) {
totalCleared += pulledAtom.second.puller->ClearCacheIfNecessary(timestampNs);
diff --git a/cmds/statsd/src/external/StatsPullerManager.h b/cmds/statsd/src/external/StatsPullerManager.h
index 50ffe17549c6..bbf5d9dc69db 100644
--- a/cmds/statsd/src/external/StatsPullerManager.h
+++ b/cmds/statsd/src/external/StatsPullerManager.h
@@ -16,54 +16,95 @@
#pragma once
-#include "StatsPullerManagerImpl.h"
+#include <android/os/IStatsCompanionService.h>
+#include <binder/IServiceManager.h>
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include <list>
+#include <string>
+#include <unordered_map>
+#include <vector>
+#include "PullDataReceiver.h"
+#include "StatsPuller.h"
+#include "logd/LogEvent.h"
namespace android {
namespace os {
namespace statsd {
-class StatsPullerManager {
- public:
- virtual ~StatsPullerManager() {}
+typedef struct {
+ // The field numbers of the fields that need to be summed when merging
+ // isolated uid with host uid.
+ std::vector<int> additiveFields;
+ // The field numbers of the fields that can't be merged when merging
+ // data belong to isolated uid and host uid.
+ std::vector<int> nonAdditiveFields;
+ // How long should the puller wait before doing an actual pull again. Default
+ // 1 sec. Set this to 0 if this is handled elsewhere.
+ int64_t coolDownNs = 1 * NS_PER_SEC;
+ // The actual puller
+ sp<StatsPuller> puller;
+} PullAtomInfo;
+class StatsPullerManager : public virtual RefBase {
+public:
+ StatsPullerManager();
+
+ virtual ~StatsPullerManager() {
+ }
+
+ // Registers a receiver for tagId. It will be pulled on the nextPullTimeNs
+ // and then every intervalNs thereafter.
virtual void RegisterReceiver(int tagId, wp<PullDataReceiver> receiver, int64_t nextPullTimeNs,
- int64_t intervalNs) {
- mPullerManager.RegisterReceiver(tagId, receiver, nextPullTimeNs, intervalNs);
- };
+ int64_t intervalNs);
- virtual void UnRegisterReceiver(int tagId, wp <PullDataReceiver> receiver) {
- mPullerManager.UnRegisterReceiver(tagId, receiver);
- };
+ // Stop listening on a tagId.
+ virtual void UnRegisterReceiver(int tagId, wp<PullDataReceiver> receiver);
// Verify if we know how to pull for this matcher
- bool PullerForMatcherExists(int tagId) {
- return mPullerManager.PullerForMatcherExists(tagId);
- }
+ bool PullerForMatcherExists(int tagId) const;
- void OnAlarmFired(const int64_t currentTimeNs) {
- mPullerManager.OnAlarmFired(currentTimeNs);
- }
+ void OnAlarmFired(const int64_t timeNs);
- virtual bool Pull(const int tagId, const int64_t timesNs,
- vector<std::shared_ptr<LogEvent>>* data) {
- return mPullerManager.Pull(tagId, timesNs, data);
- }
+ // Use respective puller to pull the data. The returned data will have
+ // elapsedTimeNs set as timeNs and will have wallClockTimeNs set as current
+ // wall clock time.
+ virtual bool Pull(const int tagId, const int64_t timeNs,
+ vector<std::shared_ptr<LogEvent>>* data);
- int ForceClearPullerCache() {
- return mPullerManager.ForceClearPullerCache();
- }
+ // Clear pull data cache immediately.
+ int ForceClearPullerCache();
- void SetStatsCompanionService(sp<IStatsCompanionService> statsCompanionService) {
- mPullerManager.SetStatsCompanionService(statsCompanionService);
- }
+ // Clear pull data cache if it is beyond respective cool down time.
+ int ClearPullerCacheIfNecessary(int64_t timestampNs);
- int ClearPullerCacheIfNecessary(int64_t timestampNs) {
- return mPullerManager.ClearPullerCacheIfNecessary(timestampNs);
- }
+ void SetStatsCompanionService(sp<IStatsCompanionService> statsCompanionService);
+
+ const static std::map<int, PullAtomInfo> kAllPullAtomInfo;
+
+private:
+ sp<IStatsCompanionService> mStatsCompanionService = nullptr;
+
+ typedef struct {
+ int64_t nextPullTimeNs;
+ int64_t intervalNs;
+ wp<PullDataReceiver> receiver;
+ } ReceiverInfo;
+
+ // mapping from simple matcher tagId to receivers
+ std::map<int, std::list<ReceiverInfo>> mReceivers;
+
+ // locks for data receiver and StatsCompanionService changes
+ Mutex mLock;
+
+ void updateAlarmLocked();
+
+ int64_t mNextPullTimeNs;
- private:
- StatsPullerManagerImpl
- & mPullerManager = StatsPullerManagerImpl::GetInstance();
+ FRIEND_TEST(GaugeMetricE2eTest, TestRandomSamplePulledEvents);
+ FRIEND_TEST(GaugeMetricE2eTest, TestRandomSamplePulledEvent_LateAlarm);
+ FRIEND_TEST(ValueMetricE2eTest, TestPulledEvents);
+ FRIEND_TEST(ValueMetricE2eTest, TestPulledEvents_LateAlarm);
};
} // namespace statsd
diff --git a/cmds/statsd/src/external/StatsPullerManagerImpl.h b/cmds/statsd/src/external/StatsPullerManagerImpl.h
deleted file mode 100644
index 56d04b41c5d5..000000000000
--- a/cmds/statsd/src/external/StatsPullerManagerImpl.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <android/os/IStatsCompanionService.h>
-#include <binder/IServiceManager.h>
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-#include <string>
-#include <unordered_map>
-#include <vector>
-#include <list>
-#include "PullDataReceiver.h"
-#include "StatsPuller.h"
-#include "logd/LogEvent.h"
-
-namespace android {
-namespace os {
-namespace statsd {
-
-typedef struct {
- // The field numbers of the fields that need to be summed when merging
- // isolated uid with host uid.
- std::vector<int> additiveFields;
- // The field numbers of the fields that can't be merged when merging
- // data belong to isolated uid and host uid.
- std::vector<int> nonAdditiveFields;
- // How long should the puller wait before doing an actual pull again. Default
- // 1 sec. Set this to 0 if this is handled elsewhere.
- int64_t coolDownNs = 1 * NS_PER_SEC;
- // The actual puller
- sp<StatsPuller> puller;
-} PullAtomInfo;
-
-class StatsPullerManagerImpl : public virtual RefBase {
-public:
- static StatsPullerManagerImpl& GetInstance();
-
- void RegisterReceiver(int tagId, wp<PullDataReceiver> receiver, int64_t nextPullTimeNs,
- int64_t intervalNs);
-
- void UnRegisterReceiver(int tagId, wp<PullDataReceiver> receiver);
-
- // Verify if we know how to pull for this matcher
- bool PullerForMatcherExists(int tagId) const;
-
- void OnAlarmFired(const int64_t timeNs);
-
- bool Pull(const int tagId, const int64_t timeNs, vector<std::shared_ptr<LogEvent>>* data);
-
- int ForceClearPullerCache();
-
- int ClearPullerCacheIfNecessary(int64_t timestampNs);
-
- void SetStatsCompanionService(sp<IStatsCompanionService> statsCompanionService);
-
- const static std::map<int, PullAtomInfo> kAllPullAtomInfo;
-
- private:
- StatsPullerManagerImpl();
-
- sp<IStatsCompanionService> mStatsCompanionService = nullptr;
-
- typedef struct {
- int64_t nextPullTimeNs;
- int64_t intervalNs;
- wp<PullDataReceiver> receiver;
- } ReceiverInfo;
-
- // mapping from simple matcher tagId to receivers
- std::map<int, std::list<ReceiverInfo>> mReceivers;
-
- // locks for data receiver and StatsCompanionService changes
- Mutex mLock;
-
- void updateAlarmLocked();
-
- int64_t mNextPullTimeNs;
-
- FRIEND_TEST(GaugeMetricE2eTest, TestRandomSamplePulledEvents);
- FRIEND_TEST(GaugeMetricE2eTest, TestRandomSamplePulledEvent_LateAlarm);
- FRIEND_TEST(ValueMetricE2eTest, TestPulledEvents);
- FRIEND_TEST(ValueMetricE2eTest, TestPulledEvents_LateAlarm);
-};
-
-} // namespace statsd
-} // namespace os
-} // namespace android
diff --git a/cmds/statsd/src/external/puller_util.cpp b/cmds/statsd/src/external/puller_util.cpp
index 57fe10e51bfc..ea7fa972cb9c 100644
--- a/cmds/statsd/src/external/puller_util.cpp
+++ b/cmds/statsd/src/external/puller_util.cpp
@@ -17,7 +17,7 @@
#define DEBUG false // STOPSHIP if true
#include "Log.h"
-#include "StatsPullerManagerImpl.h"
+#include "StatsPullerManager.h"
#include "puller_util.h"
#include "statslog.h"
@@ -107,8 +107,8 @@ bool tryMerge(vector<shared_ptr<LogEvent>>& data, int child_pos, const vector<in
*/
void mergeIsolatedUidsToHostUid(vector<shared_ptr<LogEvent>>& data, const sp<UidMap>& uidMap,
int tagId) {
- if (StatsPullerManagerImpl::kAllPullAtomInfo.find(tagId) ==
- StatsPullerManagerImpl::kAllPullAtomInfo.end()) {
+ if (StatsPullerManager::kAllPullAtomInfo.find(tagId) ==
+ StatsPullerManager::kAllPullAtomInfo.end()) {
VLOG("Unknown pull atom id %d", tagId);
return;
}
@@ -121,9 +121,9 @@ void mergeIsolatedUidsToHostUid(vector<shared_ptr<LogEvent>>& data, const sp<Uid
uidField = it->second; // uidField is the field number in proto,
}
const vector<int>& additiveFields =
- StatsPullerManagerImpl::kAllPullAtomInfo.find(tagId)->second.additiveFields;
+ StatsPullerManager::kAllPullAtomInfo.find(tagId)->second.additiveFields;
const vector<int>& nonAdditiveFields =
- StatsPullerManagerImpl::kAllPullAtomInfo.find(tagId)->second.nonAdditiveFields;
+ StatsPullerManager::kAllPullAtomInfo.find(tagId)->second.nonAdditiveFields;
// map of host uid to their position in the original vector
map<int, vector<int>> hostPosition;
diff --git a/cmds/statsd/src/guardrail/StatsdStats.cpp b/cmds/statsd/src/guardrail/StatsdStats.cpp
index 764366fc420a..33f3917f05ad 100644
--- a/cmds/statsd/src/guardrail/StatsdStats.cpp
+++ b/cmds/statsd/src/guardrail/StatsdStats.cpp
@@ -50,7 +50,7 @@ const int FIELD_ID_ANOMALY_ALARM_STATS = 9;
// const int FIELD_ID_PULLED_ATOM_STATS = 10; // The proto is written in stats_log_util.cpp
const int FIELD_ID_LOGGER_ERROR_STATS = 11;
const int FIELD_ID_PERIODIC_ALARM_STATS = 12;
-const int FIELD_ID_LOG_LOSS_STATS = 14;
+// const int FIELD_ID_LOG_LOSS_STATS = 14;
const int FIELD_ID_SYSTEM_SERVER_RESTART = 15;
const int FIELD_ID_ATOM_STATS_TAG = 1;
@@ -100,10 +100,10 @@ const int FIELD_ID_UID_MAP_DROPPED_CHANGES = 3;
const int FIELD_ID_UID_MAP_DELETED_APPS = 4;
const std::map<int, std::pair<size_t, size_t>> StatsdStats::kAtomDimensionKeySizeLimitMap = {
+ {android::util::BINDER_CALLS, {6000, 10000}},
{android::util::CPU_TIME_PER_UID_FREQ, {6000, 10000}},
};
-// TODO: add stats for pulled atoms.
StatsdStats::StatsdStats() {
mPushedAtomStats.resize(android::util::kMaxPushedAtomId + 1);
mStartTimeSec = getWallClockSec();
@@ -180,12 +180,12 @@ void StatsdStats::noteConfigReset(const ConfigKey& key) {
noteConfigResetInternalLocked(key);
}
-void StatsdStats::noteLogLost(int64_t timestampNs) {
+void StatsdStats::noteLogLost(int32_t wallClockTimeSec, int32_t count) {
lock_guard<std::mutex> lock(mLock);
- if (mLogLossTimestampNs.size() == kMaxLoggerErrors) {
- mLogLossTimestampNs.pop_front();
+ if (mLogLossStats.size() == kMaxLoggerErrors) {
+ mLogLossStats.pop_front();
}
- mLogLossTimestampNs.push_back(timestampNs);
+ mLogLossStats.push_back(std::make_pair(wallClockTimeSec, count));
}
void StatsdStats::noteBroadcastSent(const ConfigKey& key) {
@@ -365,15 +365,6 @@ void StatsdStats::noteSystemServerRestart(int32_t timeSec) {
mSystemServerRestartSec.push_back(timeSec);
}
-void StatsdStats::noteLoggerError(int error) {
- lock_guard<std::mutex> lock(mLock);
- // grows strictly one at a time. so it won't > kMaxLoggerErrors
- if (mLoggerErrors.size() == kMaxLoggerErrors) {
- mLoggerErrors.pop_front();
- }
- mLoggerErrors.push_back(std::make_pair(getWallClockSec(), error));
-}
-
void StatsdStats::reset() {
lock_guard<std::mutex> lock(mLock);
resetInternalLocked();
@@ -386,9 +377,8 @@ void StatsdStats::resetInternalLocked() {
std::fill(mPushedAtomStats.begin(), mPushedAtomStats.end(), 0);
mAnomalyAlarmRegisteredStats = 0;
mPeriodicAlarmRegisteredStats = 0;
- mLoggerErrors.clear();
mSystemServerRestartSec.clear();
- mLogLossTimestampNs.clear();
+ mLogLossStats.clear();
for (auto& config : mConfigStats) {
config.second->broadcast_sent_time_sec.clear();
config.second->data_drop_time_sec.clear();
@@ -515,21 +505,14 @@ void StatsdStats::dumpStats(FILE* out) const {
mUidMapStats.bytes_used, mUidMapStats.changes, mUidMapStats.deleted_apps,
mUidMapStats.dropped_changes);
- for (const auto& error : mLoggerErrors) {
- time_t error_time = error.first;
- struct tm* error_tm = localtime(&error_time);
- char buffer[80];
- strftime(buffer, sizeof(buffer), "%Y-%m-%d %I:%M%p\n", error_tm);
- fprintf(out, "Logger error %d at %s\n", error.second, buffer);
- }
-
for (const auto& restart : mSystemServerRestartSec) {
fprintf(out, "System server restarts at %s(%lld)\n",
buildTimeString(restart).c_str(), (long long)restart);
}
- for (const auto& loss : mLogLossTimestampNs) {
- fprintf(out, "Log loss detected at %lld (elapsedRealtimeNs)\n", (long long)loss);
+ for (const auto& loss : mLogLossStats) {
+ fprintf(out, "Log loss: %lld (wall clock sec) - %d (count)\n", (long long)loss.first,
+ loss.second);
}
}
@@ -677,7 +660,10 @@ void StatsdStats::dumpStats(std::vector<uint8_t>* output, bool reset) {
proto.write(FIELD_TYPE_INT32 | FIELD_ID_UID_MAP_DELETED_APPS, mUidMapStats.deleted_apps);
proto.end(uidMapToken);
- for (const auto& error : mLoggerErrors) {
+ for (const auto& error : mLogLossStats) {
+ // The logger error stats are not used anymore since we move away from logd.
+ // Temporarily use this field to log the log loss timestamp and count
+ // TODO(b/80538532) Add a dedicated field in stats_log for this.
uint64_t token = proto.start(FIELD_TYPE_MESSAGE | FIELD_ID_LOGGER_ERROR_STATS |
FIELD_COUNT_REPEATED);
proto.write(FIELD_TYPE_INT32 | FIELD_ID_LOGGER_STATS_TIME, error.first);
@@ -685,11 +671,6 @@ void StatsdStats::dumpStats(std::vector<uint8_t>* output, bool reset) {
proto.end(token);
}
- for (const auto& loss : mLogLossTimestampNs) {
- proto.write(FIELD_TYPE_INT64 | FIELD_ID_LOG_LOSS_STATS | FIELD_COUNT_REPEATED,
- (long long)loss);
- }
-
for (const auto& restart : mSystemServerRestartSec) {
proto.write(FIELD_TYPE_INT32 | FIELD_ID_SYSTEM_SERVER_RESTART | FIELD_COUNT_REPEATED,
restart);
diff --git a/cmds/statsd/src/guardrail/StatsdStats.h b/cmds/statsd/src/guardrail/StatsdStats.h
index 74541d37b840..b5156dadade6 100644
--- a/cmds/statsd/src/guardrail/StatsdStats.h
+++ b/cmds/statsd/src/guardrail/StatsdStats.h
@@ -86,7 +86,6 @@ public:
static StatsdStats& getInstance();
~StatsdStats(){};
- // TODO: set different limit if the device is low ram.
const static int kDimensionKeySizeSoftLimit = 500;
const static int kDimensionKeySizeHardLimit = 800;
@@ -272,11 +271,6 @@ public:
// Notify pull request for an atom served from cached data
void notePullFromCache(int pullAtomId);
- /**
- * Records statsd met an error while reading from logd.
- */
- void noteLoggerError(int error);
-
/*
* Records when system server restarts.
*/
@@ -285,7 +279,7 @@ public:
/**
* Records statsd skipped an event.
*/
- void noteLogLost(int64_t timestamp);
+ void noteLogLost(int32_t wallClockTimeSec, int32_t count);
/**
* Reset the historical stats. Including all stats in icebox, and the tracked stats about
@@ -339,11 +333,8 @@ private:
// Maps PullAtomId to its stats. The size is capped by the puller atom counts.
std::map<int, PulledAtomStats> mPulledAtomStats;
- // Logd errors. Size capped by kMaxLoggerErrors.
- std::list<const std::pair<int, int>> mLoggerErrors;
-
- // Timestamps when we detect log loss after logd reconnect.
- std::list<int64_t> mLogLossTimestampNs;
+ // Timestamps when we detect log loss, and the number of logs lost.
+ std::list<std::pair<int32_t, int32_t>> mLogLossStats;
std::list<int32_t> mSystemServerRestartSec;
diff --git a/cmds/statsd/src/logd/LogEvent.cpp b/cmds/statsd/src/logd/LogEvent.cpp
index 4e4f146d27ac..73e6572db50f 100644
--- a/cmds/statsd/src/logd/LogEvent.cpp
+++ b/cmds/statsd/src/logd/LogEvent.cpp
@@ -18,6 +18,7 @@
#include "logd/LogEvent.h"
#include "stats_log_util.h"
+#include "statslog.h"
namespace android {
namespace os {
@@ -51,6 +52,52 @@ LogEvent::LogEvent(int32_t tagId, int64_t wallClockTimestampNs, int64_t elapsedT
}
}
+LogEvent::LogEvent(int32_t tagId, int64_t wallClockTimestampNs, int64_t elapsedTimestampNs,
+ int32_t uid,
+ const std::map<int32_t, int64_t>& int_map,
+ const std::map<int32_t, std::string>& string_map,
+ const std::map<int32_t, float>& float_map) {
+ mLogdTimestampNs = wallClockTimestampNs;
+ mElapsedTimestampNs = elapsedTimestampNs;
+ mTagId = android::util::KEY_VALUE_PAIRS_ATOM;
+ mLogUid = uid;
+
+ int pos[] = {1, 1, 1};
+
+ mValues.push_back(FieldValue(Field(mTagId, pos, 0 /* depth */), Value(uid)));
+ pos[0]++;
+ for (const auto&itr : int_map) {
+ pos[2] = 1;
+ mValues.push_back(FieldValue(Field(mTagId, pos, 2 /* depth */), Value(itr.first)));
+ pos[2] = 2;
+ mValues.push_back(FieldValue(Field(mTagId, pos, 2 /* depth */), Value(itr.second)));
+ mValues.back().mField.decorateLastPos(2);
+ pos[1]++;
+ }
+
+ for (const auto&itr : string_map) {
+ pos[2] = 1;
+ mValues.push_back(FieldValue(Field(mTagId, pos, 2 /* depth */), Value(itr.first)));
+ pos[2] = 3;
+ mValues.push_back(FieldValue(Field(mTagId, pos, 2 /* depth */), Value(itr.second)));
+ mValues.back().mField.decorateLastPos(2);
+ pos[1]++;
+ }
+
+ for (const auto&itr : float_map) {
+ pos[2] = 1;
+ mValues.push_back(FieldValue(Field(mTagId, pos, 2 /* depth */), Value(itr.first)));
+ pos[2] = 4;
+ mValues.push_back(FieldValue(Field(mTagId, pos, 2 /* depth */), Value(itr.second)));
+ mValues.back().mField.decorateLastPos(2);
+ pos[1]++;
+ }
+ if (!mValues.empty()) {
+ mValues.back().mField.decorateLastPos(1);
+ mValues.at(mValues.size() - 2).mField.decorateLastPos(1);
+ }
+}
+
LogEvent::LogEvent(int32_t tagId, int64_t timestampNs) {
mLogdTimestampNs = timestampNs;
mTagId = tagId;
@@ -273,7 +320,7 @@ void LogEvent::init(android_log_context context) {
}
int64_t LogEvent::GetLong(size_t key, status_t* err) const {
- // TODO: encapsulate the magical operations all in Field struct as a static function.
+ // TODO(b/110561208): encapsulate the magical operations in Field struct as static functions
int field = getSimpleField(key);
for (const auto& value : mValues) {
if (value.mField.getField() == field) {
diff --git a/cmds/statsd/src/logd/LogEvent.h b/cmds/statsd/src/logd/LogEvent.h
index 24d624d9d9be..9ed09dd12fbd 100644
--- a/cmds/statsd/src/logd/LogEvent.h
+++ b/cmds/statsd/src/logd/LogEvent.h
@@ -69,6 +69,15 @@ public:
// For testing. The timestamp is used as both elapsed real time and logd timestamp.
explicit LogEvent(int32_t tagId, int64_t timestampNs);
+ /**
+ * Constructs a KeyValuePairsAtom LogEvent from value maps.
+ */
+ explicit LogEvent(int32_t tagId, int64_t wallClockTimestampNs, int64_t elapsedTimestampNs,
+ int32_t uid,
+ const std::map<int32_t, int64_t>& int_map,
+ const std::map<int32_t, std::string>& string_map,
+ const std::map<int32_t, float>& float_map);
+
~LogEvent();
/**
diff --git a/cmds/statsd/src/logd/LogListener.cpp b/cmds/statsd/src/logd/LogListener.cpp
index 6ac7978bbac9..ddb26f9fe565 100644
--- a/cmds/statsd/src/logd/LogListener.cpp
+++ b/cmds/statsd/src/logd/LogListener.cpp
@@ -14,17 +14,7 @@
* limitations under the License.
*/
-#include "logd/LogReader.h"
-
-#include <log/log_read.h>
-
-#include <utils/Errors.h>
-
-#include <time.h>
-#include <unistd.h>
-
-using namespace android;
-using namespace std;
+#include "logd/LogListener.h"
namespace android {
namespace os {
diff --git a/cmds/statsd/src/logd/LogListener.h b/cmds/statsd/src/logd/LogListener.h
index f924040e3a7f..d8b06e9fab92 100644
--- a/cmds/statsd/src/logd/LogListener.h
+++ b/cmds/statsd/src/logd/LogListener.h
@@ -19,7 +19,6 @@
#include "logd/LogEvent.h"
#include <utils/RefBase.h>
-#include <vector>
namespace android {
namespace os {
@@ -33,7 +32,7 @@ public:
LogListener();
virtual ~LogListener();
- virtual void OnLogEvent(LogEvent* msg, bool reconnectionStarts) = 0;
+ virtual void OnLogEvent(LogEvent* msg) = 0;
};
} // namespace statsd
diff --git a/cmds/statsd/src/logd/LogReader.cpp b/cmds/statsd/src/logd/LogReader.cpp
deleted file mode 100644
index 26ae6a3e0e2e..000000000000
--- a/cmds/statsd/src/logd/LogReader.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "logd/LogReader.h"
-
-#include "guardrail/StatsdStats.h"
-
-#include <time.h>
-#include <unistd.h>
-#include <utils/Errors.h>
-
-using namespace android;
-using namespace std;
-
-namespace android {
-namespace os {
-namespace statsd {
-
-#define SNOOZE_INITIAL_MS 100
-#define SNOOZE_MAX_MS (10 * 60 * 1000) // Ten minutes
-
-LogReader::LogReader(const sp<LogListener>& listener) : mListener(listener) {
-}
-
-LogReader::~LogReader() {
-}
-
-void LogReader::Run() {
- int nextSnoozeMs = SNOOZE_INITIAL_MS;
-
- // In an ideal world, this outer loop will only ever run one iteration, but it
- // exists to handle crashes in logd. The inner loop inside connect_and_read()
- // reads from logd forever, but if that read fails, we fall out to the outer
- // loop, do the backoff (resetting the backoff timeout if we successfully read
- // something), and then try again.
- while (true) {
- // Connect and read
- int lineCount = connect_and_read();
-
- // Figure out how long to sleep.
- if (lineCount > 0) {
- // If we managed to read at least one line, reset the backoff
- nextSnoozeMs = SNOOZE_INITIAL_MS;
- } else {
- // Otherwise, expontial backoff
- nextSnoozeMs *= 1.5f;
- if (nextSnoozeMs > 10 * 60 * 1000) {
- // Don't wait for toooo long.
- nextSnoozeMs = SNOOZE_MAX_MS;
- }
- }
-
- // Sleep
- timespec ts;
- timespec rem;
- ts.tv_sec = nextSnoozeMs / 1000;
- ts.tv_nsec = (nextSnoozeMs % 1000) * 1000000L;
- while (nanosleep(&ts, &rem) == -1) {
- if (errno == EINTR) {
- ts = rem;
- }
- // other errors are basically impossible
- }
- }
-}
-
-int LogReader::connect_and_read() {
- int lineCount = 0;
- status_t err;
- logger_list* loggers;
- logger* eventLogger;
-
- // Prepare the logging context
- loggers = android_logger_list_alloc(ANDROID_LOG_RDONLY,
- /* don't stop after N lines */ 0,
- /* no pid restriction */ 0);
-
- // Open the buffer(s)
- eventLogger = android_logger_open(loggers, LOG_ID_STATS);
-
- // Read forever
- if (eventLogger) {
- log_msg msg;
- while (true) {
- // Read a message
- err = android_logger_list_read(loggers, &msg);
- // err = 0 - no content, unexpected connection drop or EOF.
- // err = +ive number - size of retrieved data from logger
- // err = -ive number, OS supplied error _except_ for -EAGAIN
- if (err <= 0) {
- StatsdStats::getInstance().noteLoggerError(err);
- fprintf(stderr, "logcat read failure: %s\n", strerror(err));
- break;
- }
-
- // Record that we read one (used above to know how to snooze).
- lineCount++;
-
- // Wrap it in a LogEvent object
- LogEvent event(msg);
-
- // Call the listener
- mListener->OnLogEvent(&event,
- lineCount == 1 /* indicate whether it's a new connection */);
- }
- }
-
- // Free the logger list and close the individual loggers
- android_logger_list_free(loggers);
-
- return lineCount;
-}
-
-} // namespace statsd
-} // namespace os
-} // namespace android
diff --git a/cmds/statsd/src/logd/LogReader.h b/cmds/statsd/src/logd/LogReader.h
deleted file mode 100644
index c51074c19d9a..000000000000
--- a/cmds/statsd/src/logd/LogReader.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LOGREADER_H
-#define LOGREADER_H
-
-#include "logd/LogListener.h"
-
-#include <utils/RefBase.h>
-
-#include <vector>
-
-namespace android {
-namespace os {
-namespace statsd {
-
-/**
- * Class to read logs from logd.
- */
-class LogReader : public virtual android::RefBase {
-public:
- /**
- * Construct the LogReader with the event listener. (Which is StatsService)
- */
- LogReader(const sp<LogListener>& listener);
-
- /**
- * Destructor.
- */
- virtual ~LogReader();
-
- /**
- * Run the main LogReader loop
- */
- void Run();
-
-private:
- /**
- * Who is going to get the events when they're read.
- */
- sp<LogListener> mListener;
-
- /**
- * Connect to a single instance of logd, and read until there's a read error.
- * Logd can crash, exit, be killed etc.
- *
- * Returns the number of lines that were read.
- */
- int connect_and_read();
-};
-
-} // namespace statsd
-} // namespace os
-} // namespace android
-
-#endif // LOGREADER_H
diff --git a/cmds/statsd/src/main.cpp b/cmds/statsd/src/main.cpp
index e8904c625325..9002f0773aaf 100644
--- a/cmds/statsd/src/main.cpp
+++ b/cmds/statsd/src/main.cpp
@@ -18,7 +18,6 @@
#include "Log.h"
#include "StatsService.h"
-#include "logd/LogReader.h"
#include "socket/StatsSocketListener.h"
#include <binder/IInterface.h>
@@ -29,6 +28,8 @@
#include <utils/Looper.h>
#include <utils/StrongPointer.h>
+#include <memory>
+
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
@@ -37,9 +38,6 @@
using namespace android;
using namespace android::os::statsd;
-const bool kUseLogd = false;
-const bool kUseStatsdSocket = true;
-
/**
* Thread function data.
*/
@@ -47,55 +45,6 @@ struct log_reader_thread_data {
sp<StatsService> service;
};
-/**
- * Thread func for where the log reader runs.
- */
-static void* log_reader_thread_func(void* cookie) {
- log_reader_thread_data* data = static_cast<log_reader_thread_data*>(cookie);
- sp<LogReader> reader = new LogReader(data->service);
-
- // Run the read loop. Never returns.
- reader->Run();
-
- ALOGW("statsd LogReader.Run() is not supposed to return.");
-
- delete data;
- return NULL;
-}
-
-/**
- * Creates and starts the thread to own the LogReader.
- */
-static status_t start_log_reader_thread(const sp<StatsService>& service) {
- status_t err;
- pthread_attr_t attr;
- pthread_t thread;
-
- // Thread data.
- log_reader_thread_data* data = new log_reader_thread_data();
- data->service = service;
-
- // Create the thread
- err = pthread_attr_init(&attr);
- if (err != NO_ERROR) {
- return err;
- }
- // TODO: Do we need to tweak thread priority?
- err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
- if (err != NO_ERROR) {
- pthread_attr_destroy(&attr);
- return err;
- }
- err = pthread_create(&thread, &attr, log_reader_thread_func, static_cast<void*>(data));
- if (err != NO_ERROR) {
- pthread_attr_destroy(&attr);
- return err;
- }
- pthread_attr_destroy(&attr);
-
- return NO_ERROR;
-}
-
int main(int /*argc*/, char** /*argv*/) {
// Set up the looper
sp<Looper> looper(Looper::prepare(0 /* opts */));
@@ -119,22 +68,11 @@ int main(int /*argc*/, char** /*argv*/) {
sp<StatsSocketListener> socketListener = new StatsSocketListener(service);
- if (kUseLogd) {
- ALOGI("using logd");
- // Start the log reader thread
- status_t err = start_log_reader_thread(service);
- if (err != NO_ERROR) {
- return 1;
- }
- }
-
- if (kUseStatsdSocket) {
ALOGI("using statsd socket");
// Backlog and /proc/sys/net/unix/max_dgram_qlen set to large value
if (socketListener->startListener(600)) {
exit(1);
}
- }
// Loop forever -- the reports run on this thread in a handler, and the
// binder calls remain responsive in their pool of one thread.
diff --git a/cmds/statsd/src/matchers/LogMatchingTracker.h b/cmds/statsd/src/matchers/LogMatchingTracker.h
index 4f30a047e256..88ab4e6f683a 100644
--- a/cmds/statsd/src/matchers/LogMatchingTracker.h
+++ b/cmds/statsd/src/matchers/LogMatchingTracker.h
@@ -86,8 +86,6 @@ protected:
// The collection of the event tag ids that this LogMatchingTracker cares. So we can quickly
// return kNotMatched when we receive an event with an id not in the list. This is especially
// useful when we have a complex CombinationLogMatcherTracker.
- // TODO: Consider use an array instead of stl set. In reality, the number of the tag ids a
- // LogMatchingTracker cares is only a few.
std::set<int> mAtomIds;
};
diff --git a/cmds/statsd/src/metrics/CountMetricProducer.cpp b/cmds/statsd/src/metrics/CountMetricProducer.cpp
index 43f53e057000..bd94800a327d 100644
--- a/cmds/statsd/src/metrics/CountMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/CountMetricProducer.cpp
@@ -66,9 +66,8 @@ const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 6;
CountMetricProducer::CountMetricProducer(const ConfigKey& key, const CountMetric& metric,
const int conditionIndex,
const sp<ConditionWizard>& wizard,
- const int64_t startTimeNs)
- : MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard) {
- // TODO: evaluate initial conditions. and set mConditionMet.
+ const int64_t timeBaseNs, const int64_t startTimeNs)
+ : MetricProducer(metric.id(), key, timeBaseNs, conditionIndex, wizard) {
if (metric.has_bucket()) {
mBucketSizeNs =
TimeUnitToBucketSizeInMillisGuardrailed(key.GetUid(), metric.bucket()) * 1000000;
@@ -101,6 +100,10 @@ CountMetricProducer::CountMetricProducer(const ConfigKey& key, const CountMetric
mConditionSliced = (metric.links().size() > 0) || (mDimensionsInCondition.size() > 0);
+ flushIfNeededLocked(startTimeNs);
+ // Adjust start for partial bucket
+ mCurrentBucketStartTimeNs = startTimeNs;
+
VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)metric.id(),
(long long)mBucketSizeNs, (long long)mTimeBaseNs);
}
diff --git a/cmds/statsd/src/metrics/CountMetricProducer.h b/cmds/statsd/src/metrics/CountMetricProducer.h
index 139c0838fef0..39d4ae2f36a5 100644
--- a/cmds/statsd/src/metrics/CountMetricProducer.h
+++ b/cmds/statsd/src/metrics/CountMetricProducer.h
@@ -40,10 +40,9 @@ struct CountBucket {
class CountMetricProducer : public MetricProducer {
public:
- // TODO: Pass in the start time from MetricsManager, it should be consistent for all metrics.
CountMetricProducer(const ConfigKey& key, const CountMetric& countMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
- const int64_t startTimeNs);
+ const int64_t timeBaseNs, const int64_t startTimeNs);
virtual ~CountMetricProducer();
@@ -80,7 +79,6 @@ private:
void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
- // TODO: Add a lock to mPastBuckets.
std::unordered_map<MetricDimensionKey, std::vector<CountBucket>> mPastBuckets;
// The current bucket (may be a partial bucket).
@@ -100,6 +98,7 @@ private:
FRIEND_TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced);
FRIEND_TEST(CountMetricProducerTest, TestEventWithAppUpgrade);
FRIEND_TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket);
+ FRIEND_TEST(CountMetricProducerTest, TestFirstBucket);
};
} // namespace statsd
diff --git a/cmds/statsd/src/metrics/DurationMetricProducer.cpp b/cmds/statsd/src/metrics/DurationMetricProducer.cpp
index 62237bc04642..9d9e5be9e165 100644
--- a/cmds/statsd/src/metrics/DurationMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/DurationMetricProducer.cpp
@@ -68,17 +68,14 @@ DurationMetricProducer::DurationMetricProducer(const ConfigKey& key, const Durat
const bool nesting,
const sp<ConditionWizard>& wizard,
const FieldMatcher& internalDimensions,
- const int64_t startTimeNs)
- : MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard),
+ const int64_t timeBaseNs, const int64_t startTimeNs)
+ : MetricProducer(metric.id(), key, timeBaseNs, conditionIndex, wizard),
mAggregationType(metric.aggregation_type()),
mStartIndex(startIndex),
mStopIndex(stopIndex),
mStopAllIndex(stopAllIndex),
mNested(nesting),
mContainANYPositionInInternalDimensions(false) {
- // TODO: The following boiler plate code appears in all MetricProducers, but we can't abstract
- // them in the base class, because the proto generated CountMetric, and DurationMetric are
- // not related. Maybe we should add a template in the future??
if (metric.has_bucket()) {
mBucketSizeNs =
TimeUnitToBucketSizeInMillisGuardrailed(key.GetUid(), metric.bucket()) * 1000000;
@@ -131,6 +128,9 @@ DurationMetricProducer::DurationMetricProducer(const ConfigKey& key, const Durat
mMetric2ConditionLinks.begin()->conditionFields);
}
}
+ flushIfNeededLocked(startTimeNs);
+ // Adjust start for partial bucket
+ mCurrentBucketStartTimeNs = startTimeNs;
VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)metric.id(),
(long long)mBucketSizeNs, (long long)mTimeBaseNs);
}
@@ -434,8 +434,6 @@ void DurationMetricProducer::onConditionChangedLocked(const bool conditionMet,
VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
mCondition = conditionMet;
flushIfNeededLocked(eventTime);
- // TODO: need to populate the condition change time from the event which triggers the condition
- // change, instead of using current time.
for (auto& whatIt : mCurrentSlicedDurationTrackerMap) {
for (auto& pair : whatIt.second) {
pair.second->onConditionChanged(conditionMet, eventTime);
diff --git a/cmds/statsd/src/metrics/DurationMetricProducer.h b/cmds/statsd/src/metrics/DurationMetricProducer.h
index 88e455a3f1a1..12addb8727f1 100644
--- a/cmds/statsd/src/metrics/DurationMetricProducer.h
+++ b/cmds/statsd/src/metrics/DurationMetricProducer.h
@@ -42,7 +42,7 @@ public:
const int conditionIndex, const size_t startIndex,
const size_t stopIndex, const size_t stopAllIndex, const bool nesting,
const sp<ConditionWizard>& wizard,
- const FieldMatcher& internalDimensions, const int64_t startTimeNs);
+ const FieldMatcher& internalDimensions, const int64_t timeBaseNs, const int64_t startTimeNs);
virtual ~DurationMetricProducer();
@@ -115,7 +115,6 @@ private:
ConditionState mUnSlicedPartCondition;
// Save the past buckets and we can clear when the StatsLogReport is dumped.
- // TODO: Add a lock to mPastBuckets.
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>> mPastBuckets;
// The duration trackers in the current bucket.
@@ -142,6 +141,7 @@ private:
FRIEND_TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade);
FRIEND_TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgradeInNextBucket);
FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicates);
+ FRIEND_TEST(DurationMetricTrackerTest, TestFirstBucket);
};
} // namespace statsd
diff --git a/cmds/statsd/src/metrics/EventMetricProducer.h b/cmds/statsd/src/metrics/EventMetricProducer.h
index 62d1105e0514..7f7aa3711255 100644
--- a/cmds/statsd/src/metrics/EventMetricProducer.h
+++ b/cmds/statsd/src/metrics/EventMetricProducer.h
@@ -33,7 +33,6 @@ namespace statsd {
class EventMetricProducer : public MetricProducer {
public:
- // TODO: Pass in the start time from MetricsManager, it should be consistent for all metrics.
EventMetricProducer(const ConfigKey& key, const EventMetric& eventMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
const int64_t startTimeNs);
diff --git a/cmds/statsd/src/metrics/GaugeMetricProducer.cpp b/cmds/statsd/src/metrics/GaugeMetricProducer.cpp
index aabd3616e2fe..284c4511a16f 100644
--- a/cmds/statsd/src/metrics/GaugeMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/GaugeMetricProducer.cpp
@@ -71,11 +71,15 @@ const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 8;
GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& metric,
const int conditionIndex,
const sp<ConditionWizard>& wizard, const int pullTagId,
+ const int triggerAtomId, const int atomId,
const int64_t timeBaseNs, const int64_t startTimeNs,
- shared_ptr<StatsPullerManager> statsPullerManager)
+ const sp<StatsPullerManager>& pullerManager)
: MetricProducer(metric.id(), key, timeBaseNs, conditionIndex, wizard),
- mStatsPullerManager(statsPullerManager),
+ mPullerManager(pullerManager),
mPullTagId(pullTagId),
+ mTriggerAtomId(triggerAtomId),
+ mAtomId(atomId),
+ mIsPulled(pullTagId != -1),
mMinBucketSizeNs(metric.min_bucket_size_nanos()),
mDimensionSoftLimit(StatsdStats::kAtomDimensionKeySizeLimitMap.find(pullTagId) !=
StatsdStats::kAtomDimensionKeySizeLimitMap.end()
@@ -101,7 +105,6 @@ GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric
translateFieldMatcher(metric.gauge_fields_filter().fields(), &mFieldMatchers);
}
- // TODO: use UidMap if uid->pkg_name is required
if (metric.has_dimensions_in_what()) {
translateFieldMatcher(metric.dimensions_in_what(), &mDimensionsInWhat);
mContainANYPositionInDimensionsInWhat = HasPositionANY(metric.dimensions_in_what());
@@ -126,9 +129,15 @@ GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric
flushIfNeededLocked(startTimeNs);
// Kicks off the puller immediately.
- if (mPullTagId != -1 && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
- mStatsPullerManager->RegisterReceiver(
- mPullTagId, this, getCurrentBucketEndTimeNs(), mBucketSizeNs);
+ if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+ mPullerManager->RegisterReceiver(mPullTagId, this, getCurrentBucketEndTimeNs(),
+ mBucketSizeNs);
+ }
+
+ // Adjust start for partial bucket
+ mCurrentBucketStartTimeNs = startTimeNs;
+ if (mIsPulled) {
+ pullLocked(startTimeNs);
}
VLOG("Gauge metric %lld created. bucket size %lld start_time: %lld sliced %d",
@@ -136,19 +145,10 @@ GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric
mConditionSliced);
}
-// for testing
-GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& metric,
- const int conditionIndex,
- const sp<ConditionWizard>& wizard, const int pullTagId,
- const int64_t timeBaseNs, const int64_t startTimeNs)
- : GaugeMetricProducer(key, metric, conditionIndex, wizard, pullTagId, timeBaseNs, startTimeNs,
- make_shared<StatsPullerManager>()) {
-}
-
GaugeMetricProducer::~GaugeMetricProducer() {
VLOG("~GaugeMetricProducer() called");
- if (mPullTagId != -1 && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
- mStatsPullerManager->UnRegisterReceiver(mPullTagId, this);
+ if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+ mPullerManager->UnRegisterReceiver(mPullTagId, this);
}
}
@@ -275,12 +275,12 @@ void GaugeMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
uint64_t atomsToken =
protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED |
FIELD_ID_ATOM);
- writeFieldValueTreeToStream(mTagId, *(atom.mFields), protoOutput);
+ writeFieldValueTreeToStream(mAtomId, *(atom.mFields), protoOutput);
protoOutput->end(atomsToken);
}
const bool truncateTimestamp =
android::util::AtomsInfo::kNotTruncatingTimestampAtomWhiteList.find(
- mTagId) ==
+ mAtomId) ==
android::util::AtomsInfo::kNotTruncatingTimestampAtomWhiteList.end();
for (const auto& atom : bucket.mGaugeAtoms) {
const int64_t elapsedTimestampNs = truncateTimestamp ?
@@ -308,7 +308,6 @@ void GaugeMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
protoOutput->end(protoToken);
mPastBuckets.clear();
- // TODO: Clear mDimensionKeyMap once the report is dumped.
}
void GaugeMetricProducer::pullLocked(const int64_t timestampNs) {
@@ -334,13 +333,11 @@ void GaugeMetricProducer::pullLocked(const int64_t timestampNs) {
if (!triggerPuller) {
return;
}
-
vector<std::shared_ptr<LogEvent>> allData;
- if (!mStatsPullerManager->Pull(mPullTagId, timestampNs, &allData)) {
- ALOGE("Gauge Stats puller failed for tag: %d", mPullTagId);
+ if (!mPullerManager->Pull(mPullTagId, timestampNs, &allData)) {
+ ALOGE("Gauge Stats puller failed for tag: %d at %lld", mPullTagId, (long long)timestampNs);
return;
}
-
for (const auto& data : allData) {
onMatchedLogEventLocked(0, *data);
}
@@ -351,8 +348,7 @@ void GaugeMetricProducer::onConditionChangedLocked(const bool conditionMet,
VLOG("GaugeMetric %lld onConditionChanged", (long long)mMetricId);
flushIfNeededLocked(eventTimeNs);
mCondition = conditionMet;
-
- if (mPullTagId != -1) {
+ if (mIsPulled) {
pullLocked(eventTimeNs);
} // else: Push mode. No need to proactively pull the gauge data.
}
@@ -365,7 +361,7 @@ void GaugeMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition
// If the condition is sliced, mCondition is true if any of the dimensions is true. And we will
// pull for every dimension.
mCondition = overallCondition;
- if (mPullTagId != -1) {
+ if (mIsPulled) {
pullLocked(eventTimeNs);
} // else: Push mode. No need to proactively pull the gauge data.
}
@@ -417,7 +413,6 @@ void GaugeMetricProducer::onMatchedLogEventInternalLocked(
return;
}
int64_t eventTimeNs = event.GetElapsedTimestampNs();
- mTagId = event.GetTagId();
if (eventTimeNs < mCurrentBucketStartTimeNs) {
VLOG("Gauge Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
(long long)mCurrentBucketStartTimeNs);
@@ -425,6 +420,11 @@ void GaugeMetricProducer::onMatchedLogEventInternalLocked(
}
flushIfNeededLocked(eventTimeNs);
+ if (mTriggerAtomId == event.GetTagId()) {
+ pullLocked(eventTimeNs);
+ return;
+ }
+
// When gauge metric wants to randomly sample the output atom, we just simply use the first
// gauge in the given bucket.
if (mCurrentSlicedBucket->find(eventKey) != mCurrentSlicedBucket->end() &&
diff --git a/cmds/statsd/src/metrics/GaugeMetricProducer.h b/cmds/statsd/src/metrics/GaugeMetricProducer.h
index c74f7927dfac..15be1d7c470b 100644
--- a/cmds/statsd/src/metrics/GaugeMetricProducer.h
+++ b/cmds/statsd/src/metrics/GaugeMetricProducer.h
@@ -33,7 +33,7 @@ namespace os {
namespace statsd {
struct GaugeAtom {
- GaugeAtom(std::shared_ptr<vector<FieldValue>> fields, int64_t elapsedTimeNs, int wallClockNs)
+ GaugeAtom(std::shared_ptr<vector<FieldValue>> fields, int64_t elapsedTimeNs, int64_t wallClockNs)
: mFields(fields), mElapsedTimestamps(elapsedTimeNs), mWallClockTimestampNs(wallClockNs) {
}
std::shared_ptr<vector<FieldValue>> mFields;
@@ -58,7 +58,9 @@ class GaugeMetricProducer : public virtual MetricProducer, public virtual PullDa
public:
GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& gaugeMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
- const int pullTagId, const int64_t timeBaseNs, const int64_t startTimeNs);
+ const int pullTagId, const int triggerAtomId, const int atomId,
+ const int64_t timeBaseNs, const int64_t startTimeNs,
+ const sp<StatsPullerManager>& pullerManager);
virtual ~GaugeMetricProducer();
@@ -76,7 +78,7 @@ public:
}
flushCurrentBucketLocked(eventTimeNs);
mCurrentBucketStartTimeNs = eventTimeNs;
- if (mPullTagId != -1) {
+ if (mIsPulled) {
pullLocked(eventTimeNs);
}
};
@@ -94,13 +96,6 @@ private:
android::util::ProtoOutputStream* protoOutput) override;
void clearPastBucketsLocked(const int64_t dumpTimeNs) override;
- // for testing
- GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& gaugeMetric,
- const int conditionIndex, const sp<ConditionWizard>& wizard,
- const int pullTagId,
- const int64_t timeBaseNs, const int64_t startTimeNs,
- std::shared_ptr<StatsPullerManager> statsPullerManager);
-
// Internal interface to handle condition change.
void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
@@ -121,14 +116,20 @@ private:
void pullLocked(const int64_t timestampNs);
- int mTagId;
-
- std::shared_ptr<StatsPullerManager> mStatsPullerManager;
+ sp<StatsPullerManager> mPullerManager;
// tagId for pulled data. -1 if this is not pulled
const int mPullTagId;
+ // tagId for atoms that trigger the pulling, if any
+ const int mTriggerAtomId;
+
+ // tagId for output atom
+ const int mAtomId;
+
+ // if this is pulled metric
+ const bool mIsPulled;
+
// Save the past buckets and we can clear when the StatsLogReport is dumped.
- // TODO: Add a lock to mPastBuckets.
std::unordered_map<MetricDimensionKey, std::vector<GaugeBucket>> mPastBuckets;
// The current partial bucket.
@@ -166,12 +167,14 @@ private:
const size_t mGaugeAtomsPerDimensionLimit;
- FRIEND_TEST(GaugeMetricProducerTest, TestWithCondition);
- FRIEND_TEST(GaugeMetricProducerTest, TestWithSlicedCondition);
- FRIEND_TEST(GaugeMetricProducerTest, TestNoCondition);
+ FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsWithCondition);
+ FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsWithSlicedCondition);
+ FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsNoCondition);
FRIEND_TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade);
FRIEND_TEST(GaugeMetricProducerTest, TestPulledWithUpgrade);
- FRIEND_TEST(GaugeMetricProducerTest, TestAnomalyDetection);
+ FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsAnomalyDetection);
+ FRIEND_TEST(GaugeMetricProducerTest, TestFirstBucket);
+ FRIEND_TEST(GaugeMetricProducerTest, TestPullOnTrigger);
};
} // namespace statsd
diff --git a/cmds/statsd/src/metrics/MetricsManager.cpp b/cmds/statsd/src/metrics/MetricsManager.cpp
index 4fac0e1a141b..0e5ef4d3e59a 100644
--- a/cmds/statsd/src/metrics/MetricsManager.cpp
+++ b/cmds/statsd/src/metrics/MetricsManager.cpp
@@ -56,10 +56,12 @@ const int FIELD_ID_ANNOTATIONS_INT32 = 2;
MetricsManager::MetricsManager(const ConfigKey& key, const StatsdConfig& config,
const int64_t timeBaseNs, const int64_t currentTimeNs,
- const sp<UidMap> &uidMap,
+ const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerManager,
const sp<AlarmMonitor>& anomalyAlarmMonitor,
const sp<AlarmMonitor>& periodicAlarmMonitor)
- : mConfigKey(key), mUidMap(uidMap),
+ : mConfigKey(key),
+ mUidMap(uidMap),
mTtlNs(config.has_ttl_in_seconds() ? config.ttl_in_seconds() * NS_PER_SEC : -1),
mTtlEndNs(-1),
mLastReportTimeNs(currentTimeNs),
@@ -67,12 +69,11 @@ MetricsManager::MetricsManager(const ConfigKey& key, const StatsdConfig& config,
// Init the ttl end timestamp.
refreshTtl(timeBaseNs);
- mConfigValid =
- initStatsdConfig(key, config, *uidMap, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseNs, currentTimeNs, mTagIds, mAllAtomMatchers,
- mAllConditionTrackers, mAllMetricProducers, mAllAnomalyTrackers,
- mAllPeriodicAlarmTrackers, mConditionToMetricMap, mTrackerToMetricMap,
- mTrackerToConditionMap, mNoReportMetricIds);
+ mConfigValid = initStatsdConfig(
+ key, config, *uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
+ timeBaseNs, currentTimeNs, mTagIds, mAllAtomMatchers, mAllConditionTrackers,
+ mAllMetricProducers, mAllAnomalyTrackers, mAllPeriodicAlarmTrackers,
+ mConditionToMetricMap, mTrackerToMetricMap, mTrackerToConditionMap, mNoReportMetricIds);
mHashStringsInReport = config.hash_strings_in_metric_report();
@@ -237,7 +238,6 @@ void MetricsManager::onLogEvent(const LogEvent& event) {
if (event.GetTagId() == android::util::APP_BREADCRUMB_REPORTED) {
// Check that app breadcrumb reported fields are valid.
- // TODO: Find a way to make these checks easier to maintain.
status_t err = NO_ERROR;
// Uid is 3rd from last field and must match the caller's uid,
diff --git a/cmds/statsd/src/metrics/MetricsManager.h b/cmds/statsd/src/metrics/MetricsManager.h
index 6f4db48def86..dfbb69f1ab7c 100644
--- a/cmds/statsd/src/metrics/MetricsManager.h
+++ b/cmds/statsd/src/metrics/MetricsManager.h
@@ -16,6 +16,7 @@
#pragma once
+#include "external/StatsPullerManager.h"
#include "anomaly/AlarmMonitor.h"
#include "anomaly/AlarmTracker.h"
#include "anomaly/AnomalyTracker.h"
@@ -36,9 +37,10 @@ namespace statsd {
// A MetricsManager is responsible for managing metrics from one single config source.
class MetricsManager : public PackageInfoListener {
public:
- MetricsManager(const ConfigKey& configKey, const StatsdConfig& config,
- const int64_t timeBaseNs, const int64_t currentTimeNs,
- const sp<UidMap>& uidMap, const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ MetricsManager(const ConfigKey& configKey, const StatsdConfig& config, const int64_t timeBaseNs,
+ const int64_t currentTimeNs, const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerManager,
+ const sp<AlarmMonitor>& anomalyAlarmMonitor,
const sp<AlarmMonitor>& periodicAlarmMonitor);
virtual ~MetricsManager();
diff --git a/cmds/statsd/src/metrics/ValueMetricProducer.cpp b/cmds/statsd/src/metrics/ValueMetricProducer.cpp
index 41e55cb27f5e..192a54b7e0a3 100644
--- a/cmds/statsd/src/metrics/ValueMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/ValueMetricProducer.cpp
@@ -27,7 +27,7 @@
using android::util::FIELD_COUNT_REPEATED;
using android::util::FIELD_TYPE_BOOL;
-using android::util::FIELD_TYPE_FLOAT;
+using android::util::FIELD_TYPE_DOUBLE;
using android::util::FIELD_TYPE_INT32;
using android::util::FIELD_TYPE_INT64;
using android::util::FIELD_TYPE_MESSAGE;
@@ -64,7 +64,8 @@ const int FIELD_ID_BUCKET_INFO = 3;
const int FIELD_ID_DIMENSION_LEAF_IN_WHAT = 4;
const int FIELD_ID_DIMENSION_LEAF_IN_CONDITION = 5;
// for ValueBucketInfo
-const int FIELD_ID_VALUE = 3;
+const int FIELD_ID_VALUE_LONG = 7;
+const int FIELD_ID_VALUE_DOUBLE = 8;
const int FIELD_ID_BUCKET_NUM = 4;
const int FIELD_ID_START_BUCKET_ELAPSED_MILLIS = 5;
const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 6;
@@ -73,12 +74,13 @@ const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 6;
ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric& metric,
const int conditionIndex,
const sp<ConditionWizard>& wizard, const int pullTagId,
- const int64_t timeBaseNs, const int64_t startTimestampNs,
- shared_ptr<StatsPullerManager> statsPullerManager)
+ const int64_t timeBaseNs, const int64_t startTimeNs,
+ const sp<StatsPullerManager>& pullerManager)
: MetricProducer(metric.id(), key, timeBaseNs, conditionIndex, wizard),
+ mPullerManager(pullerManager),
mValueField(metric.value_field()),
- mStatsPullerManager(statsPullerManager),
mPullTagId(pullTagId),
+ mIsPulled(pullTagId != -1),
mMinBucketSizeNs(metric.min_bucket_size_nanos()),
mDimensionSoftLimit(StatsdStats::kAtomDimensionKeySizeLimitMap.find(pullTagId) !=
StatsdStats::kAtomDimensionKeySizeLimitMap.end()
@@ -88,8 +90,9 @@ ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric
StatsdStats::kAtomDimensionKeySizeLimitMap.end()
? StatsdStats::kAtomDimensionKeySizeLimitMap.at(pullTagId).second
: StatsdStats::kDimensionKeySizeHardLimit),
- mUseAbsoluteValueOnReset(metric.use_absolute_value_on_reset()) {
- // TODO: valuemetric for pushed events may need unlimited bucket length
+ mUseAbsoluteValueOnReset(metric.use_absolute_value_on_reset()),
+ mAggregationType(metric.aggregation_type()),
+ mValueType(metric.aggregation_type() == ValueMetric::AVG ? DOUBLE : LONG) {
int64_t bucketSizeMills = 0;
if (metric.has_bucket()) {
bucketSizeMills = TimeUnitToBucketSizeInMillisGuardrailed(key.GetUid(), metric.bucket());
@@ -124,30 +127,28 @@ ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric
mSliceByPositionALL = HasPositionALL(metric.dimensions_in_what()) ||
HasPositionALL(metric.dimensions_in_condition());
+ flushIfNeededLocked(startTimeNs);
// Kicks off the puller immediately.
- flushIfNeededLocked(startTimestampNs);
- if (mPullTagId != -1) {
- mStatsPullerManager->RegisterReceiver(
- mPullTagId, this, mCurrentBucketStartTimeNs + mBucketSizeNs, mBucketSizeNs);
+ if (mIsPulled) {
+ mPullerManager->RegisterReceiver(mPullTagId, this, getCurrentBucketEndTimeNs(),
+ mBucketSizeNs);
}
+ // TODO: Only do this for partial buckets like first bucket. All other buckets should use
+ // flushIfNeeded to adjust start and end to bucket boundaries.
+ // Adjust start for partial bucket
+ mCurrentBucketStartTimeNs = startTimeNs;
+ if (mIsPulled) {
+ pullLocked(startTimeNs);
+ }
VLOG("value metric %lld created. bucket size %lld start_time: %lld",
(long long)metric.id(), (long long)mBucketSizeNs, (long long)mTimeBaseNs);
}
-// for testing
-ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric& metric,
- const int conditionIndex,
- const sp<ConditionWizard>& wizard, const int pullTagId,
- const int64_t timeBaseNs, const int64_t startTimeNs)
- : ValueMetricProducer(key, metric, conditionIndex, wizard, pullTagId, timeBaseNs, startTimeNs,
- make_shared<StatsPullerManager>()) {
-}
-
ValueMetricProducer::~ValueMetricProducer() {
VLOG("~ValueMetricProducer() called");
- if (mPullTagId != -1) {
- mStatsPullerManager->UnRegisterReceiver(mPullTagId, this);
+ if (mIsPulled) {
+ mPullerManager->UnRegisterReceiver(mPullTagId, this);
}
}
@@ -255,11 +256,15 @@ void ValueMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_BUCKET_NUM,
(long long)(getBucketNumFromEndTimeNs(bucket.mBucketEndNs)));
}
-
- protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_VALUE, (long long)bucket.mValue);
+ if (mValueType == LONG) {
+ protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_VALUE_LONG,
+ (long long)bucket.mValueLong);
+ } else {
+ protoOutput->write(FIELD_TYPE_DOUBLE | FIELD_ID_VALUE_DOUBLE, bucket.mValueDouble);
+ }
protoOutput->end(bucketInfoToken);
- VLOG("\t bucket [%lld - %lld] count: %lld", (long long)bucket.mBucketStartNs,
- (long long)bucket.mBucketEndNs, (long long)bucket.mValue);
+ VLOG("\t bucket [%lld - %lld] count: %lld, %.2f", (long long)bucket.mBucketStartNs,
+ (long long)bucket.mBucketEndNs, (long long)bucket.mValueLong, bucket.mValueDouble);
}
protoOutput->end(wrapperToken);
}
@@ -281,17 +286,20 @@ void ValueMetricProducer::onConditionChangedLocked(const bool condition,
flushIfNeededLocked(eventTimeNs);
- if (mPullTagId != -1) {
- vector<shared_ptr<LogEvent>> allData;
- if (mStatsPullerManager->Pull(mPullTagId, eventTimeNs, &allData)) {
- if (allData.size() == 0) {
- return;
- }
- for (const auto& data : allData) {
- onMatchedLogEventLocked(0, *data);
- }
+ if (mIsPulled) {
+ pullLocked(eventTimeNs);
+ }
+}
+
+void ValueMetricProducer::pullLocked(const int64_t timestampNs) {
+ vector<std::shared_ptr<LogEvent>> allData;
+ if (mPullerManager->Pull(mPullTagId, timestampNs, &allData)) {
+ if (allData.size() == 0) {
+ return;
+ }
+ for (const auto& data : allData) {
+ onMatchedLogEventLocked(0, *data);
}
- return;
}
}
@@ -308,12 +316,14 @@ void ValueMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEven
int64_t eventTime = mTimeBaseNs +
((realEventTime - mTimeBaseNs) / mBucketSizeNs) * mBucketSizeNs;
+ // close the end of the bucket
mCondition = false;
for (const auto& data : allData) {
data->setElapsedTimestampNs(eventTime - 1);
onMatchedLogEventLocked(0, *data);
}
+ // start a new bucket
mCondition = true;
for (const auto& data : allData) {
data->setElapsedTimestampNs(eventTime);
@@ -331,10 +341,10 @@ void ValueMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const {
(unsigned long)mCurrentSlicedBucket.size());
if (verbose) {
for (const auto& it : mCurrentSlicedBucket) {
- fprintf(out, "\t(what)%s\t(condition)%s (value)%lld\n",
- it.first.getDimensionKeyInWhat().toString().c_str(),
- it.first.getDimensionKeyInCondition().toString().c_str(),
- (unsigned long long)it.second.sum);
+ fprintf(out, "\t(what)%s\t(condition)%s (value)%s\n",
+ it.first.getDimensionKeyInWhat().toString().c_str(),
+ it.first.getDimensionKeyInCondition().toString().c_str(),
+ it.second.value.toString().c_str());
}
}
}
@@ -359,6 +369,27 @@ bool ValueMetricProducer::hitGuardRailLocked(const MetricDimensionKey& newKey) {
return false;
}
+const Value getDoubleOrLong(const Value& value) {
+ Value v;
+ switch (value.type) {
+ case INT:
+ v.setLong(value.int_value);
+ break;
+ case LONG:
+ v.setLong(value.long_value);
+ break;
+ case FLOAT:
+ v.setDouble(value.float_value);
+ break;
+ case DOUBLE:
+ v.setDouble(value.double_value);
+ break;
+ default:
+ break;
+ }
+ return v;
+}
+
void ValueMetricProducer::onMatchedLogEventInternalLocked(
const size_t matcherIndex, const MetricDimensionKey& eventKey,
const ConditionKey& conditionKey, bool condition,
@@ -377,19 +408,25 @@ void ValueMetricProducer::onMatchedLogEventInternalLocked(
}
Interval& interval = mCurrentSlicedBucket[eventKey];
- int error = 0;
- const int64_t value = event.GetLong(mField, &error);
- if (error < 0) {
+ if (mField > event.size()) {
+ VLOG("Failed to extract value field %d from atom %s. %d", mField, event.ToString().c_str(),
+ (int)event.size());
return;
}
+ Value value = getDoubleOrLong(event.getValues()[mField - 1].mValue);
- if (mPullTagId != -1) { // for pulled events
+ Value diff;
+ bool hasDiff = false;
+ if (mIsPulled) {
+ // Always require condition for pulled events. In the case of no condition, only pull
+ // on bucket boundaries, in which we fake condition changes.
if (mCondition == true) {
if (!interval.startUpdated) {
interval.start = value;
interval.startUpdated = true;
} else {
- // skip it if there is already value recorded for the start
+ // Skip it if there is already value recorded for the start. Happens when puller
+ // takes too long to finish. In this case we take the previous value.
VLOG("Already recorded value for this dimension %s", eventKey.toString().c_str());
}
} else {
@@ -397,31 +434,55 @@ void ValueMetricProducer::onMatchedLogEventInternalLocked(
// If not, take absolute value or drop it, based on config.
if (interval.startUpdated) {
if (value >= interval.start) {
- interval.sum += (value - interval.start);
- interval.hasValue = true;
+ diff = (value - interval.start);
+ hasDiff = true;
} else {
if (mUseAbsoluteValueOnReset) {
- interval.sum += value;
- interval.hasValue = true;
+ diff = value;
+ hasDiff = true;
} else {
- VLOG("Dropping data for atom %d, prev: %lld, now: %lld", mPullTagId,
- (long long)interval.start, (long long)value);
+ VLOG("Dropping data for atom %d, prev: %s, now: %s", mPullTagId,
+ interval.start.toString().c_str(), value.toString().c_str());
}
}
interval.startUpdated = false;
} else {
- VLOG("No start for matching end %lld", (long long)value);
- interval.tainted += 1;
+ VLOG("No start for matching end %s", value.toString().c_str());
}
}
- } else { // for pushed events, only accumulate when condition is true
- if (mCondition == true || mConditionTrackerIndex < 0) {
- interval.sum += value;
+ } else {
+ // for pushed events, only aggregate when sliced condition is true
+ if (condition == true || mConditionTrackerIndex < 0) {
+ diff = value;
+ hasDiff = true;
+ }
+ }
+ if (hasDiff) {
+ if (interval.hasValue) {
+ switch (mAggregationType) {
+ case ValueMetric::SUM:
+ // for AVG, we add up and take average when flushing the bucket
+ case ValueMetric::AVG:
+ interval.value += diff;
+ break;
+ case ValueMetric::MIN:
+ interval.value = diff < interval.value ? diff : interval.value;
+ break;
+ case ValueMetric::MAX:
+ interval.value = diff > interval.value ? diff : interval.value;
+ break;
+ default:
+ break;
+ }
+ } else {
+ interval.value = diff;
interval.hasValue = true;
}
+ interval.sampleSize += 1;
}
- long wholeBucketVal = interval.sum;
+ // TODO: propgate proper values down stream when anomaly support doubles
+ long wholeBucketVal = interval.value.long_value;
auto prev = mCurrentFullBucket.find(eventKey);
if (prev != mCurrentFullBucket.end()) {
wholeBucketVal += prev->second;
@@ -468,18 +529,15 @@ void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
if (info.mBucketEndNs - mCurrentBucketStartTimeNs >= mMinBucketSizeNs) {
// The current bucket is large enough to keep.
- int tainted = 0;
for (const auto& slice : mCurrentSlicedBucket) {
- tainted += slice.second.tainted;
- tainted += slice.second.startUpdated;
if (slice.second.hasValue) {
- info.mValue = slice.second.sum;
+ info.mValueLong = slice.second.value.long_value;
+ info.mValueDouble = (double)slice.second.value.long_value / slice.second.sampleSize;
// it will auto create new vector of ValuebucketInfo if the key is not found.
auto& bucketList = mPastBuckets[slice.first];
bucketList.push_back(info);
}
}
- VLOG("%d tainted pairs in the bucket", tainted);
} else {
mSkippedBuckets.emplace_back(info.mBucketStartNs, info.mBucketEndNs);
}
@@ -488,7 +546,8 @@ void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
// Accumulate partial buckets with current value and then send to anomaly tracker.
if (mCurrentFullBucket.size() > 0) {
for (const auto& slice : mCurrentSlicedBucket) {
- mCurrentFullBucket[slice.first] += slice.second.sum;
+ // TODO: fix this when anomaly can accept double values
+ mCurrentFullBucket[slice.first] += slice.second.value.long_value;
}
for (const auto& slice : mCurrentFullBucket) {
for (auto& tracker : mAnomalyTrackers) {
@@ -503,7 +562,9 @@ void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
for (const auto& slice : mCurrentSlicedBucket) {
for (auto& tracker : mAnomalyTrackers) {
if (tracker != nullptr) {
- tracker->addPastBucket(slice.first, slice.second.sum, mCurrentBucketNum);
+ // TODO: fix this when anomaly can accept double values
+ tracker->addPastBucket(slice.first, slice.second.value.long_value,
+ mCurrentBucketNum);
}
}
}
@@ -511,7 +572,8 @@ void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
} else {
// Accumulate partial bucket.
for (const auto& slice : mCurrentSlicedBucket) {
- mCurrentFullBucket[slice.first] += slice.second.sum;
+ // TODO: fix this when anomaly can accept double values
+ mCurrentFullBucket[slice.first] += slice.second.value.long_value;
}
}
diff --git a/cmds/statsd/src/metrics/ValueMetricProducer.h b/cmds/statsd/src/metrics/ValueMetricProducer.h
index cb6b051cd484..b2f0b6ff4d78 100644
--- a/cmds/statsd/src/metrics/ValueMetricProducer.h
+++ b/cmds/statsd/src/metrics/ValueMetricProducer.h
@@ -23,6 +23,7 @@
#include "../condition/ConditionTracker.h"
#include "../external/PullDataReceiver.h"
#include "../external/StatsPullerManager.h"
+#include "../stats_log_util.h"
#include "MetricProducer.h"
#include "frameworks/base/cmds/statsd/src/statsd_config.pb.h"
@@ -33,17 +34,20 @@ namespace statsd {
struct ValueBucket {
int64_t mBucketStartNs;
int64_t mBucketEndNs;
- int64_t mValue;
+ int64_t mValueLong;
+ double mValueDouble;
};
class ValueMetricProducer : public virtual MetricProducer, public virtual PullDataReceiver {
public:
ValueMetricProducer(const ConfigKey& key, const ValueMetric& valueMetric,
const int conditionIndex, const sp<ConditionWizard>& wizard,
- const int pullTagId, const int64_t timeBaseNs, const int64_t startTimeNs);
+ const int pullTagId, const int64_t timeBaseNs, const int64_t startTimeNs,
+ const sp<StatsPullerManager>& pullerManager);
virtual ~ValueMetricProducer();
+ // Process data pulled on bucket boundary.
void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data) override;
// ValueMetric needs special logic if it's a pulled atom.
@@ -51,9 +55,9 @@ public:
const int64_t version) override {
std::lock_guard<std::mutex> lock(mMutex);
- if (mPullTagId != -1 && (mCondition == true || mConditionTrackerIndex < 0) ) {
+ if (mIsPulled && (mCondition == true || mConditionTrackerIndex < 0)) {
vector<shared_ptr<LogEvent>> allData;
- mStatsPullerManager->Pull(mPullTagId, eventTimeNs, &allData);
+ mPullerManager->Pull(mPullTagId, eventTimeNs, &allData);
if (allData.size() == 0) {
// This shouldn't happen since this valuemetric is not useful now.
}
@@ -112,33 +116,29 @@ private:
void dropDataLocked(const int64_t dropTimeNs) override;
- const FieldMatcher mValueField;
-
- std::shared_ptr<StatsPullerManager> mStatsPullerManager;
+ sp<StatsPullerManager> mPullerManager;
- // for testing
- ValueMetricProducer(const ConfigKey& key, const ValueMetric& valueMetric,
- const int conditionIndex, const sp<ConditionWizard>& wizard,
- const int pullTagId, const int64_t timeBaseNs, const int64_t startTimeNs,
- std::shared_ptr<StatsPullerManager> statsPullerManager);
+ const FieldMatcher mValueField;
// tagId for pulled data. -1 if this is not pulled
const int mPullTagId;
+ // if this is pulled metric
+ const bool mIsPulled;
+
int mField;
// internal state of a bucket.
typedef struct {
// Pulled data always come in pair of <start, end>. This holds the value
- // for start. The diff (end - start) is added to sum.
- int64_t start;
+ // for start. The diff (end - start) is taken as the real value.
+ Value start;
// Whether the start data point is updated
bool startUpdated;
- // If end data point comes before the start, record this pair as tainted
- // and the value is not added to the running sum.
- int tainted;
- // Running sum of known pairs in this bucket
- int64_t sum;
+ // Current value, depending on the aggregation type.
+ Value value;
+ // Number of samples collected.
+ int sampleSize;
// If this dimension has any non-tainted value. If not, don't report the
// dimension.
bool hasValue;
@@ -149,7 +149,6 @@ private:
std::unordered_map<MetricDimensionKey, int64_t> mCurrentFullBucket;
// Save the past buckets and we can clear when the StatsLogReport is dumped.
- // TODO: Add a lock to mPastBuckets.
std::unordered_map<MetricDimensionKey, std::vector<ValueBucket>> mPastBuckets;
// Pairs of (elapsed start, elapsed end) denoting buckets that were skipped.
@@ -160,6 +159,8 @@ private:
// Util function to check whether the specified dimension hits the guardrail.
bool hitGuardRailLocked(const MetricDimensionKey& newKey);
+ void pullLocked(const int64_t timestampNs);
+
static const size_t kBucketSize = sizeof(ValueBucket{});
const size_t mDimensionSoftLimit;
@@ -168,7 +169,11 @@ private:
const bool mUseAbsoluteValueOnReset;
- FRIEND_TEST(ValueMetricProducerTest, TestNonDimensionalEvents);
+ const ValueMetric::AggregationType mAggregationType;
+
+ const Type mValueType;
+
+ FRIEND_TEST(ValueMetricProducerTest, TestPulledEventsNoCondition);
FRIEND_TEST(ValueMetricProducerTest, TestPulledEventsTakeAbsoluteValueOnReset);
FRIEND_TEST(ValueMetricProducerTest, TestPulledEventsTakeZeroOnReset);
FRIEND_TEST(ValueMetricProducerTest, TestEventsWithNonSlicedCondition);
@@ -182,6 +187,12 @@ private:
FRIEND_TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition);
FRIEND_TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition2);
FRIEND_TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition3);
+ FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateMin);
+ FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateMax);
+ FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateAvg);
+ FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateSum);
+ FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateSumSliced);
+ FRIEND_TEST(ValueMetricProducerTest, TestFirstBucket);
};
} // namespace statsd
diff --git a/cmds/statsd/src/metrics/duration_helper/DurationTracker.h b/cmds/statsd/src/metrics/duration_helper/DurationTracker.h
index 149b3189dfba..ccb1d4359e89 100644
--- a/cmds/statsd/src/metrics/duration_helper/DurationTracker.h
+++ b/cmds/statsd/src/metrics/duration_helper/DurationTracker.h
@@ -44,7 +44,6 @@ struct DurationInfo {
int64_t lastStartTime;
// existing duration in current bucket.
int64_t lastDuration;
- // TODO: Optimize the way we track sliced condition in duration metrics.
// cache the HashableDimensionKeys we need to query the condition for this duration event.
ConditionKey conditionKeys;
diff --git a/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp b/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
index b833dfc79a22..956383a99eea 100644
--- a/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
+++ b/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
@@ -326,7 +326,6 @@ void OringDurationTracker::onConditionChanged(bool condition, const int64_t time
int64_t OringDurationTracker::predictAnomalyTimestampNs(
const DurationAnomalyTracker& anomalyTracker, const int64_t eventTimestampNs) const {
- // TODO: Unit-test this and see if it can be done more efficiently (e.g. use int32).
// The anomaly threshold.
const int64_t thresholdNs = anomalyTracker.getAnomalyThreshold();
diff --git a/cmds/statsd/src/metrics/metrics_manager_util.cpp b/cmds/statsd/src/metrics/metrics_manager_util.cpp
index 811a00e47ae5..75d6df95852d 100644
--- a/cmds/statsd/src/metrics/metrics_manager_util.cpp
+++ b/cmds/statsd/src/metrics/metrics_manager_util.cpp
@@ -82,6 +82,28 @@ bool handleMetricWithLogTrackers(const int64_t what, const int metricIndex,
return true;
}
+bool handlePullMetricTriggerWithLogTrackers(
+ const int64_t trigger, const int metricIndex,
+ const vector<sp<LogMatchingTracker>>& allAtomMatchers,
+ const unordered_map<int64_t, int>& logTrackerMap,
+ unordered_map<int, std::vector<int>>& trackerToMetricMap, int& logTrackerIndex) {
+ auto logTrackerIt = logTrackerMap.find(trigger);
+ if (logTrackerIt == logTrackerMap.end()) {
+ ALOGW("cannot find the AtomMatcher \"%lld\" in config", (long long)trigger);
+ return false;
+ }
+ if (allAtomMatchers[logTrackerIt->second]->getAtomIds().size() > 1) {
+ ALOGE("AtomMatcher \"%lld\" has more than one tag ids."
+ "Trigger can only be one atom type.",
+ (long long)trigger);
+ return false;
+ }
+ logTrackerIndex = logTrackerIt->second;
+ auto& metric_list = trackerToMetricMap[logTrackerIndex];
+ metric_list.push_back(metricIndex);
+ return true;
+}
+
bool handleMetricWithConditions(
const int64_t condition, const int metricIndex,
const unordered_map<int64_t, int>& conditionTrackerMap,
@@ -103,7 +125,6 @@ bool handleMetricWithConditions(
}
allConditionTrackers[condition_it->second]->setSliced(true);
allConditionTrackers[it->second]->setSliced(true);
- // TODO: We need to verify the link is valid.
}
conditionIndex = condition_it->second;
@@ -169,7 +190,6 @@ bool initLogTrackers(const StatsdConfig& config, const UidMap& uidMap,
bool isStateTracker(const SimplePredicate& simplePredicate, vector<Matcher>* primaryKeys) {
// 1. must not have "stop". must have "dimension"
if (!simplePredicate.has_stop() && simplePredicate.has_dimensions()) {
- // TODO: need to check the start atom matcher too.
auto it = android::util::AtomsInfo::kStateAtomsFieldOptions.find(
simplePredicate.dimensions().field());
// 2. must be based on a state atom.
@@ -262,9 +282,10 @@ bool initConditions(const ConfigKey& key, const StatsdConfig& config,
return true;
}
-bool initMetrics(const ConfigKey& key, const StatsdConfig& config,
- const int64_t timeBaseTimeNs, const int64_t currentTimeNs,
- UidMap& uidMap, const unordered_map<int64_t, int>& logTrackerMap,
+bool initMetrics(const ConfigKey& key, const StatsdConfig& config, const int64_t timeBaseTimeNs,
+ const int64_t currentTimeNs, UidMap& uidMap,
+ const sp<StatsPullerManager>& pullerManager,
+ const unordered_map<int64_t, int>& logTrackerMap,
const unordered_map<int64_t, int>& conditionTrackerMap,
const vector<sp<LogMatchingTracker>>& allAtomMatchers,
vector<sp<ConditionTracker>>& allConditionTrackers,
@@ -313,7 +334,7 @@ bool initMetrics(const ConfigKey& key, const StatsdConfig& config,
}
sp<MetricProducer> countProducer =
- new CountMetricProducer(key, metric, conditionIndex, wizard, timeBaseTimeNs);
+ new CountMetricProducer(key, metric, conditionIndex, wizard, timeBaseTimeNs, currentTimeNs);
allMetricProducers.push_back(countProducer);
}
@@ -383,7 +404,7 @@ bool initMetrics(const ConfigKey& key, const StatsdConfig& config,
sp<MetricProducer> durationMetric = new DurationMetricProducer(
key, metric, conditionIndex, trackerIndices[0], trackerIndices[1],
- trackerIndices[2], nesting, wizard, internalDimensions, timeBaseTimeNs);
+ trackerIndices[2], nesting, wizard, internalDimensions, timeBaseTimeNs, currentTimeNs);
allMetricProducers.push_back(durationMetric);
}
@@ -465,9 +486,9 @@ bool initMetrics(const ConfigKey& key, const StatsdConfig& config,
}
}
- sp<MetricProducer> valueProducer = new ValueMetricProducer(key, metric, conditionIndex,
- wizard, pullTagId,
- timeBaseTimeNs, currentTimeNs);
+ sp<MetricProducer> valueProducer =
+ new ValueMetricProducer(key, metric, conditionIndex, wizard, pullTagId,
+ timeBaseTimeNs, currentTimeNs, pullerManager);
allMetricProducers.push_back(valueProducer);
}
@@ -503,13 +524,29 @@ bool initMetrics(const ConfigKey& key, const StatsdConfig& config,
}
sp<LogMatchingTracker> atomMatcher = allAtomMatchers.at(trackerIndex);
- // If it is pulled atom, it should be simple matcher with one tagId.
+ // For GaugeMetric atom, it should be simple matcher with one tagId.
if (atomMatcher->getAtomIds().size() != 1) {
return false;
}
int atomTagId = *(atomMatcher->getAtomIds().begin());
int pullTagId = statsPullerManager.PullerForMatcherExists(atomTagId) ? atomTagId : -1;
+ int triggerTrackerIndex;
+ int triggerAtomId = -1;
+ if (pullTagId != -1 && metric.has_trigger_event()) {
+ // event_trigger should be used with ALL_CONDITION_CHANGES
+ if (metric.sampling_type() != GaugeMetric::ALL_CONDITION_CHANGES) {
+ return false;
+ }
+ if (!handlePullMetricTriggerWithLogTrackers(metric.trigger_event(), metricIndex,
+ allAtomMatchers, logTrackerMap,
+ trackerToMetricMap, triggerTrackerIndex)) {
+ return false;
+ }
+ sp<LogMatchingTracker> triggerAtomMatcher = allAtomMatchers.at(triggerTrackerIndex);
+ triggerAtomId = *(triggerAtomMatcher->getAtomIds().begin());
+ }
+
int conditionIndex = -1;
if (metric.has_condition()) {
bool good = handleMetricWithConditions(
@@ -526,7 +563,8 @@ bool initMetrics(const ConfigKey& key, const StatsdConfig& config,
}
sp<MetricProducer> gaugeProducer = new GaugeMetricProducer(
- key, metric, conditionIndex, wizard, pullTagId, timeBaseTimeNs, currentTimeNs);
+ key, metric, conditionIndex, wizard, pullTagId, triggerAtomId, atomTagId,
+ timeBaseTimeNs, currentTimeNs, pullerManager);
allMetricProducers.push_back(gaugeProducer);
}
for (int i = 0; i < config.no_report_metric_size(); ++i) {
@@ -645,10 +683,10 @@ bool initAlarms(const StatsdConfig& config, const ConfigKey& key,
}
bool initStatsdConfig(const ConfigKey& key, const StatsdConfig& config, UidMap& uidMap,
+ const sp<StatsPullerManager>& pullerManager,
const sp<AlarmMonitor>& anomalyAlarmMonitor,
- const sp<AlarmMonitor>& periodicAlarmMonitor,
- const int64_t timeBaseNs, const int64_t currentTimeNs,
- set<int>& allTagIds,
+ const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
+ const int64_t currentTimeNs, set<int>& allTagIds,
vector<sp<LogMatchingTracker>>& allAtomMatchers,
vector<sp<ConditionTracker>>& allConditionTrackers,
vector<sp<MetricProducer>>& allMetricProducers,
@@ -674,9 +712,8 @@ bool initStatsdConfig(const ConfigKey& key, const StatsdConfig& config, UidMap&
return false;
}
- if (!initMetrics(key, config, timeBaseNs, currentTimeNs, uidMap,
- logTrackerMap, conditionTrackerMap,
- allAtomMatchers, allConditionTrackers, allMetricProducers,
+ if (!initMetrics(key, config, timeBaseNs, currentTimeNs, uidMap, pullerManager, logTrackerMap,
+ conditionTrackerMap, allAtomMatchers, allConditionTrackers, allMetricProducers,
conditionToMetricMap, trackerToMetricMap, metricProducerMap,
noReportMetricIds)) {
ALOGE("initMetricProducers failed");
diff --git a/cmds/statsd/src/metrics/metrics_manager_util.h b/cmds/statsd/src/metrics/metrics_manager_util.h
index d749bf43c9be..c6601493135f 100644
--- a/cmds/statsd/src/metrics/metrics_manager_util.h
+++ b/cmds/statsd/src/metrics/metrics_manager_util.h
@@ -23,7 +23,7 @@
#include "../anomaly/AlarmTracker.h"
#include "../condition/ConditionTracker.h"
-#include "../external/StatsPullerManagerImpl.h"
+#include "../external/StatsPullerManager.h"
#include "../matchers/LogMatchingTracker.h"
#include "../metrics/MetricProducer.h"
@@ -81,9 +81,8 @@ bool initConditions(const ConfigKey& key, const StatsdConfig& config,
// the list of MetricProducer index
// [trackerToMetricMap]: contains the mapping from log tracker to MetricProducer index.
bool initMetrics(
- const ConfigKey& key, const StatsdConfig& config,
- const int64_t timeBaseTimeNs, const int64_t currentTimeNs,
- UidMap& uidMap,
+ const ConfigKey& key, const StatsdConfig& config, const int64_t timeBaseTimeNs,
+ const int64_t currentTimeNs, UidMap& uidMap, const sp<StatsPullerManager>& pullerManager,
const std::unordered_map<int64_t, int>& logTrackerMap,
const std::unordered_map<int64_t, int>& conditionTrackerMap,
const std::unordered_map<int, std::vector<MetricConditionLink>>& eventConditionLinks,
@@ -97,10 +96,10 @@ bool initMetrics(
// Initialize MetricsManager from StatsdConfig.
// Parameters are the members of MetricsManager. See MetricsManager for declaration.
bool initStatsdConfig(const ConfigKey& key, const StatsdConfig& config, UidMap& uidMap,
+ const sp<StatsPullerManager>& pullerManager,
const sp<AlarmMonitor>& anomalyAlarmMonitor,
- const sp<AlarmMonitor>& periodicAlarmMonitor,
- const int64_t timeBaseNs, const int64_t currentTimeNs,
- std::set<int>& allTagIds,
+ const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
+ const int64_t currentTimeNs, std::set<int>& allTagIds,
std::vector<sp<LogMatchingTracker>>& allAtomMatchers,
std::vector<sp<ConditionTracker>>& allConditionTrackers,
std::vector<sp<MetricProducer>>& allMetricProducers,
diff --git a/cmds/statsd/src/packages/UidMap.cpp b/cmds/statsd/src/packages/UidMap.cpp
index fff909c12441..be94725991af 100644
--- a/cmds/statsd/src/packages/UidMap.cpp
+++ b/cmds/statsd/src/packages/UidMap.cpp
@@ -272,7 +272,7 @@ void UidMap::assignIsolatedUid(int isolatedUid, int parentUid) {
mIsolatedUidMap[isolatedUid] = parentUid;
}
-void UidMap::removeIsolatedUid(int isolatedUid, int parentUid) {
+void UidMap::removeIsolatedUid(int isolatedUid) {
lock_guard<mutex> lock(mIsolatedMutex);
auto it = mIsolatedUidMap.find(isolatedUid);
diff --git a/cmds/statsd/src/packages/UidMap.h b/cmds/statsd/src/packages/UidMap.h
index 5e42cd18de32..91f203084388 100644
--- a/cmds/statsd/src/packages/UidMap.h
+++ b/cmds/statsd/src/packages/UidMap.h
@@ -119,7 +119,7 @@ public:
void OnConfigRemoved(const ConfigKey& key);
void assignIsolatedUid(int isolatedUid, int parentUid);
- void removeIsolatedUid(int isolatedUid, int parentUid);
+ void removeIsolatedUid(int isolatedUid);
// Returns the host uid if it exists. Otherwise, returns the same uid that was passed-in.
virtual int getHostUidOrSelf(int uid) const;
@@ -146,7 +146,6 @@ private:
void getListenerListCopyLocked(std::vector<wp<PackageInfoListener>>* output);
- // TODO: Use shared_mutex for improved read-locking if a library can be found in Android.
mutable mutex mMutex;
mutable mutex mIsolatedMutex;
diff --git a/cmds/statsd/src/perfetto/perfetto_config.proto b/cmds/statsd/src/perfetto/perfetto_config.proto
deleted file mode 100644
index 56d12f8d81d4..000000000000
--- a/cmds/statsd/src/perfetto/perfetto_config.proto
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-syntax = "proto2";
-
-package perfetto.protos;
-
-message DataSourceConfig {
- message FtraceConfig {
- repeated string event_names = 1;
- }
-
- optional string name = 1;
-
- optional uint32 target_buffer = 2;
-
- optional FtraceConfig ftrace_config = 100;
-}
-
-message TraceConfig {
- message BufferConfig {
- optional uint32 size_kb = 1;
-
- enum OptimizeFor {
- DEFAULT = 0;
- ONE_SHOT_READ = 1;
-
- }
- optional OptimizeFor optimize_for = 3;
-
- enum FillPolicy {
- UNSPECIFIED = 0;
- RING_BUFFER = 1;
- }
- optional FillPolicy fill_policy = 4;
- }
- repeated BufferConfig buffers = 1;
-
- message DataSource {
- optional protos.DataSourceConfig config = 1;
-
- repeated string producer_name_filter = 2;
- }
- repeated DataSource data_sources = 2;
-
- optional uint32 duration_ms = 3;
-}
diff --git a/cmds/statsd/src/socket/StatsSocketListener.cpp b/cmds/statsd/src/socket/StatsSocketListener.cpp
index 0392d6756292..9b0691b6092d 100755
--- a/cmds/statsd/src/socket/StatsSocketListener.cpp
+++ b/cmds/statsd/src/socket/StatsSocketListener.cpp
@@ -40,6 +40,7 @@ namespace os {
namespace statsd {
static const int kLogMsgHeaderSize = 28;
+static const int kLibLogTag = 1006;
StatsSocketListener::StatsSocketListener(const sp<LogListener>& listener)
: SocketListener(getLogSocket(), false /*start listen*/), mListener(listener) {
@@ -99,6 +100,23 @@ bool StatsSocketListener::onDataAvailable(SocketClient* cli) {
char* ptr = ((char*)buffer) + sizeof(android_log_header_t);
n -= sizeof(android_log_header_t);
+ // When a log failed to write to statsd socket (e.g., due ot EBUSY), a special message would
+ // be sent to statsd when the socket communication becomes available again.
+ // The format is android_log_event_int_t with a single integer in the payload indicating the
+ // number of logs that failed. (*FORMAT MUST BE IN SYNC WITH system/core/libstats*)
+ // Note that all normal stats logs are in the format of event_list, so there won't be confusion.
+ //
+ // TODO(b/80538532): In addition to log it in StatsdStats, we should properly reset the config.
+ if (n == sizeof(android_log_event_int_t)) {
+ android_log_event_int_t* int_event = reinterpret_cast<android_log_event_int_t*>(ptr);
+ if (int_event->header.tag == kLibLogTag && int_event->payload.type == EVENT_TYPE_INT) {
+ ALOGE("Found dropped events: %d", int_event->payload.data);
+ StatsdStats::getInstance().noteLogLost((int32_t)getWallClockSec(),
+ int_event->payload.data);
+ return true;
+ }
+ }
+
log_msg msg;
msg.entry.len = n;
@@ -111,7 +129,7 @@ bool StatsSocketListener::onDataAvailable(SocketClient* cli) {
LogEvent event(msg);
// Call the listener
- mListener->OnLogEvent(&event, false /*reconnected, N/A in statsd socket*/);
+ mListener->OnLogEvent(&event);
return true;
}
diff --git a/cmds/statsd/src/stats_log.proto b/cmds/statsd/src/stats_log.proto
index 2fe17daf7542..db7e680f1d37 100644
--- a/cmds/statsd/src/stats_log.proto
+++ b/cmds/statsd/src/stats_log.proto
@@ -106,7 +106,13 @@ message ValueBucketInfo {
optional int64 end_bucket_elapsed_nanos = 2;
- optional int64 value = 3;
+ optional int64 value = 3 [deprecated = true];
+
+ oneof values {
+ int64 value_long = 7;
+
+ double value_double = 8;
+ }
optional int64 bucket_num = 4;
diff --git a/cmds/statsd/src/stats_util.h b/cmds/statsd/src/stats_util.h
index 5fcb16111f97..cfc411fdd25f 100644
--- a/cmds/statsd/src/stats_util.h
+++ b/cmds/statsd/src/stats_util.h
@@ -17,7 +17,6 @@
#pragma once
#include "HashableDimensionKey.h"
-#include "logd/LogReader.h"
#include <unordered_map>
diff --git a/cmds/statsd/src/statsd_config.proto b/cmds/statsd/src/statsd_config.proto
index cf5530000555..d19e247ae6c7 100644
--- a/cmds/statsd/src/statsd_config.proto
+++ b/cmds/statsd/src/statsd_config.proto
@@ -21,8 +21,6 @@ package android.os.statsd;
option java_package = "com.android.internal.os";
option java_outer_classname = "StatsdConfigProto";
-import "frameworks/base/cmds/statsd/src/perfetto/perfetto_config.proto";
-
enum Position {
POSITION_UNKNOWN = 0;
@@ -219,6 +217,8 @@ message GaugeMetric {
optional int64 what = 2;
+ optional int64 trigger_event = 12;
+
optional FieldFilter gauge_fields_filter = 3;
optional int64 condition = 4;
@@ -261,6 +261,9 @@ message ValueMetric {
enum AggregationType {
SUM = 1;
+ MIN = 2;
+ MAX = 3;
+ AVG = 4;
}
optional AggregationType aggregation_type = 8 [default = SUM];
@@ -300,7 +303,21 @@ message IncidentdDetails {
}
message PerfettoDetails {
- optional perfetto.protos.TraceConfig trace_config = 1;
+ // The |trace_config| field is a proto-encoded message of type
+ // perfetto.protos.TraceConfig defined in
+ // //external/perfetto/protos/perfetto/config/. On device,
+ // statsd doesn't need to deserialize the message as it's just
+ // passed binary-encoded to the perfetto cmdline client.
+ optional bytes trace_config = 1;
+}
+
+message PerfprofdDetails {
+ // The |perfprofd_config| field is a proto-encoded message of type
+ // android.perfprofd.ProfilingConfig defined in
+ // //system/extras/perfprofd/. On device, statsd doesn't need to
+ // deserialize the message as it's just passed binary-encoded to
+ // the perfprofd service.
+ optional bytes perfprofd_config = 1;
}
message BroadcastSubscriberDetails {
@@ -324,6 +341,7 @@ message Subscription {
IncidentdDetails incidentd_details = 4;
PerfettoDetails perfetto_details = 5;
BroadcastSubscriberDetails broadcast_subscriber_details = 6;
+ PerfprofdDetails perfprofd_details = 8;
}
optional float probability_of_informing = 7 [default = 1.1];
diff --git a/cmds/statsd/src/storage/StorageManager.cpp b/cmds/statsd/src/storage/StorageManager.cpp
index 1f8181266b65..3ebc8a492d68 100644
--- a/cmds/statsd/src/storage/StorageManager.cpp
+++ b/cmds/statsd/src/storage/StorageManager.cpp
@@ -57,7 +57,7 @@ static void parseFileName(char* name, int64_t* result) {
}
// When index ends before hitting 3, file name is corrupted. We
// intentionally put -1 at index 0 to indicate the error to caller.
- // TODO: consider removing files with unexpected name format.
+ // TODO(b/110563137): consider removing files with unexpected name format.
if (index < 3) {
result[0] = -1;
}
diff --git a/cmds/statsd/tests/FieldValue_test.cpp b/cmds/statsd/tests/FieldValue_test.cpp
index c253bc19d641..a9305accb1be 100644
--- a/cmds/statsd/tests/FieldValue_test.cpp
+++ b/cmds/statsd/tests/FieldValue_test.cpp
@@ -312,7 +312,8 @@ TEST(AtomMatcherTest, TestSubscriberDimensionWrite) {
dim.addValue(FieldValue(field4, value4));
SubscriberReporter::getStatsDimensionsValue(dim);
- // TODO: can't test anything here because SubscriberReport class doesn't have any read api.
+ // TODO(b/110562792): can't test anything here because StatsDimensionsValue class doesn't
+ // have any read api.
}
TEST(AtomMatcherTest, TestWriteDimensionToProto) {
@@ -483,4 +484,4 @@ TEST(AtomMatcherTest, TestWriteAtomToProto) {
} // namespace android
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
-#endif \ No newline at end of file
+#endif
diff --git a/cmds/statsd/tests/LogEvent_test.cpp b/cmds/statsd/tests/LogEvent_test.cpp
index 2fcde29fbbdb..acfa151c6f95 100644
--- a/cmds/statsd/tests/LogEvent_test.cpp
+++ b/cmds/statsd/tests/LogEvent_test.cpp
@@ -158,6 +158,96 @@ TEST(LogEventTest, TestLogParsing2) {
EXPECT_EQ((float)1.1, item7.mValue.float_value);
}
+TEST(LogEventTest, TestKeyValuePairsAtomParsing) {
+ std::map<int32_t, int64_t> int_map;
+ std::map<int32_t, std::string> string_map;
+ std::map<int32_t, float> float_map;
+
+ int_map[11] = 123L;
+ int_map[22] = 345L;
+
+ string_map[1] = "test2";
+ string_map[2] = "test1";
+
+ float_map[111] = 2.2f;
+ float_map[222] = 1.1f;
+
+ LogEvent event1(83, 2000, 2001, 10001, int_map, string_map, float_map);
+ event1.init();
+
+ EXPECT_EQ(83, event1.GetTagId());
+ EXPECT_EQ((int64_t)2000, event1.GetLogdTimestampNs());
+ EXPECT_EQ((int64_t)2001, event1.GetElapsedTimestampNs());
+
+ const auto& items = event1.getValues();
+ EXPECT_EQ((size_t)13, items.size());
+
+ const FieldValue& item0 = event1.getValues()[0];
+ EXPECT_EQ(0x00010000, item0.mField.getField());
+ EXPECT_EQ(Type::INT, item0.mValue.getType());
+ EXPECT_EQ(10001, item0.mValue.int_value);
+
+ const FieldValue& item1 = event1.getValues()[1];
+ EXPECT_EQ(0x2020101, item1.mField.getField());
+ EXPECT_EQ(Type::INT, item1.mValue.getType());
+ EXPECT_EQ(11, item1.mValue.int_value);
+
+ const FieldValue& item2 = event1.getValues()[2];
+ EXPECT_EQ(0x2020182, item2.mField.getField());
+ EXPECT_EQ(Type::LONG, item2.mValue.getType());
+ EXPECT_EQ(123L, item2.mValue.long_value);
+
+ const FieldValue& item3 = event1.getValues()[3];
+ EXPECT_EQ(0x2020201, item3.mField.getField());
+ EXPECT_EQ(Type::INT, item3.mValue.getType());
+ EXPECT_EQ(22, item3.mValue.int_value);
+
+ const FieldValue& item4 = event1.getValues()[4];
+ EXPECT_EQ(0x2020282, item4.mField.getField());
+ EXPECT_EQ(Type::LONG, item4.mValue.getType());
+ EXPECT_EQ(345L, item4.mValue.long_value);
+
+ const FieldValue& item5 = event1.getValues()[5];
+ EXPECT_EQ(0x2020301, item5.mField.getField());
+ EXPECT_EQ(Type::INT, item5.mValue.getType());
+ EXPECT_EQ(1, item5.mValue.int_value);
+
+ const FieldValue& item6 = event1.getValues()[6];
+ EXPECT_EQ(0x2020383, item6.mField.getField());
+ EXPECT_EQ(Type::STRING, item6.mValue.getType());
+ EXPECT_EQ("test2", item6.mValue.str_value);
+
+ const FieldValue& item7 = event1.getValues()[7];
+ EXPECT_EQ(0x2020401, item7.mField.getField());
+ EXPECT_EQ(Type::INT, item7.mValue.getType());
+ EXPECT_EQ(2, item7.mValue.int_value);
+
+ const FieldValue& item8 = event1.getValues()[8];
+ EXPECT_EQ(0x2020483, item8.mField.getField());
+ EXPECT_EQ(Type::STRING, item8.mValue.getType());
+ EXPECT_EQ("test1", item8.mValue.str_value);
+
+ const FieldValue& item9 = event1.getValues()[9];
+ EXPECT_EQ(0x2020501, item9.mField.getField());
+ EXPECT_EQ(Type::INT, item9.mValue.getType());
+ EXPECT_EQ(111, item9.mValue.int_value);
+
+ const FieldValue& item10 = event1.getValues()[10];
+ EXPECT_EQ(0x2020584, item10.mField.getField());
+ EXPECT_EQ(Type::FLOAT, item10.mValue.getType());
+ EXPECT_EQ(2.2f, item10.mValue.float_value);
+
+ const FieldValue& item11 = event1.getValues()[11];
+ EXPECT_EQ(0x2028601, item11.mField.getField());
+ EXPECT_EQ(Type::INT, item11.mValue.getType());
+ EXPECT_EQ(222, item11.mValue.int_value);
+
+ const FieldValue& item12 = event1.getValues()[12];
+ EXPECT_EQ(0x2028684, item12.mField.getField());
+ EXPECT_EQ(Type::FLOAT, item12.mValue.getType());
+ EXPECT_EQ(1.1f, item12.mValue.float_value);
+}
+
} // namespace statsd
} // namespace os
diff --git a/cmds/statsd/tests/MetricsManager_test.cpp b/cmds/statsd/tests/MetricsManager_test.cpp
index 07378dbcce1a..8fbb58a956d5 100644
--- a/cmds/statsd/tests/MetricsManager_test.cpp
+++ b/cmds/statsd/tests/MetricsManager_test.cpp
@@ -39,8 +39,6 @@ using android::os::statsd::Predicate;
#ifdef __ANDROID__
-// TODO: ADD MORE TEST CASES.
-
const ConfigKey kConfigKey(0, 12345);
const long timeBaseSec = 1000;
@@ -271,6 +269,7 @@ StatsdConfig buildCirclePredicates() {
TEST(MetricsManagerTest, TestGoodConfig) {
UidMap uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
StatsdConfig config = buildGoodConfig();
@@ -285,13 +284,11 @@ TEST(MetricsManagerTest, TestGoodConfig) {
unordered_map<int, std::vector<int>> trackerToConditionMap;
std::set<int64_t> noReportMetricIds;
- EXPECT_TRUE(initStatsdConfig(kConfigKey, config, uidMap,
- anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchers,
- allConditionTrackers, allMetricProducers, allAnomalyTrackers,
- allAlarmTrackers,
- conditionToMetricMap, trackerToMetricMap, trackerToConditionMap,
- noReportMetricIds));
+ EXPECT_TRUE(initStatsdConfig(kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor,
+ periodicAlarmMonitor, timeBaseSec, timeBaseSec, allTagIds,
+ allAtomMatchers, allConditionTrackers, allMetricProducers,
+ allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap,
+ trackerToMetricMap, trackerToConditionMap, noReportMetricIds));
EXPECT_EQ(1u, allMetricProducers.size());
EXPECT_EQ(1u, allAnomalyTrackers.size());
EXPECT_EQ(1u, noReportMetricIds.size());
@@ -299,6 +296,7 @@ TEST(MetricsManagerTest, TestGoodConfig) {
TEST(MetricsManagerTest, TestDimensionMetricsWithMultiTags) {
UidMap uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
StatsdConfig config = buildDimensionMetricsWithMultiTags();
@@ -313,17 +311,16 @@ TEST(MetricsManagerTest, TestDimensionMetricsWithMultiTags) {
unordered_map<int, std::vector<int>> trackerToConditionMap;
std::set<int64_t> noReportMetricIds;
- EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap,
- anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchers,
- allConditionTrackers, allMetricProducers, allAnomalyTrackers,
- allAlarmTrackers,
- conditionToMetricMap, trackerToMetricMap, trackerToConditionMap,
- noReportMetricIds));
+ EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor,
+ periodicAlarmMonitor, timeBaseSec, timeBaseSec, allTagIds,
+ allAtomMatchers, allConditionTrackers, allMetricProducers,
+ allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap,
+ trackerToMetricMap, trackerToConditionMap, noReportMetricIds));
}
TEST(MetricsManagerTest, TestCircleLogMatcherDependency) {
UidMap uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
StatsdConfig config = buildCircleMatchers();
@@ -338,17 +335,16 @@ TEST(MetricsManagerTest, TestCircleLogMatcherDependency) {
unordered_map<int, std::vector<int>> trackerToConditionMap;
std::set<int64_t> noReportMetricIds;
- EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap,
- anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchers,
- allConditionTrackers, allMetricProducers, allAnomalyTrackers,
- allAlarmTrackers,
- conditionToMetricMap, trackerToMetricMap, trackerToConditionMap,
- noReportMetricIds));
+ EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor,
+ periodicAlarmMonitor, timeBaseSec, timeBaseSec, allTagIds,
+ allAtomMatchers, allConditionTrackers, allMetricProducers,
+ allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap,
+ trackerToMetricMap, trackerToConditionMap, noReportMetricIds));
}
TEST(MetricsManagerTest, TestMissingMatchers) {
UidMap uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
StatsdConfig config = buildMissingMatchers();
@@ -362,17 +358,16 @@ TEST(MetricsManagerTest, TestMissingMatchers) {
unordered_map<int, std::vector<int>> trackerToMetricMap;
unordered_map<int, std::vector<int>> trackerToConditionMap;
std::set<int64_t> noReportMetricIds;
- EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap,
- anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchers,
- allConditionTrackers, allMetricProducers, allAnomalyTrackers,
- allAlarmTrackers,
- conditionToMetricMap, trackerToMetricMap, trackerToConditionMap,
- noReportMetricIds));
+ EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor,
+ periodicAlarmMonitor, timeBaseSec, timeBaseSec, allTagIds,
+ allAtomMatchers, allConditionTrackers, allMetricProducers,
+ allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap,
+ trackerToMetricMap, trackerToConditionMap, noReportMetricIds));
}
TEST(MetricsManagerTest, TestMissingPredicate) {
UidMap uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
StatsdConfig config = buildMissingPredicate();
@@ -386,17 +381,16 @@ TEST(MetricsManagerTest, TestMissingPredicate) {
unordered_map<int, std::vector<int>> trackerToMetricMap;
unordered_map<int, std::vector<int>> trackerToConditionMap;
std::set<int64_t> noReportMetricIds;
- EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap,
- anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchers,
- allConditionTrackers, allMetricProducers, allAnomalyTrackers,
- allAlarmTrackers,
- conditionToMetricMap, trackerToMetricMap, trackerToConditionMap,
- noReportMetricIds));
+ EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor,
+ periodicAlarmMonitor, timeBaseSec, timeBaseSec, allTagIds,
+ allAtomMatchers, allConditionTrackers, allMetricProducers,
+ allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap,
+ trackerToMetricMap, trackerToConditionMap, noReportMetricIds));
}
TEST(MetricsManagerTest, TestCirclePredicateDependency) {
UidMap uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
StatsdConfig config = buildCirclePredicates();
@@ -411,17 +405,16 @@ TEST(MetricsManagerTest, TestCirclePredicateDependency) {
unordered_map<int, std::vector<int>> trackerToConditionMap;
std::set<int64_t> noReportMetricIds;
- EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap,
- anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchers,
- allConditionTrackers, allMetricProducers, allAnomalyTrackers,
- allAlarmTrackers,
- conditionToMetricMap, trackerToMetricMap, trackerToConditionMap,
- noReportMetricIds));
+ EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor,
+ periodicAlarmMonitor, timeBaseSec, timeBaseSec, allTagIds,
+ allAtomMatchers, allConditionTrackers, allMetricProducers,
+ allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap,
+ trackerToMetricMap, trackerToConditionMap, noReportMetricIds));
}
TEST(MetricsManagerTest, testAlertWithUnknownMetric) {
UidMap uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
StatsdConfig config = buildAlertWithUnknownMetric();
@@ -436,13 +429,11 @@ TEST(MetricsManagerTest, testAlertWithUnknownMetric) {
unordered_map<int, std::vector<int>> trackerToConditionMap;
std::set<int64_t> noReportMetricIds;
- EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap,
- anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchers,
- allConditionTrackers, allMetricProducers, allAnomalyTrackers,
- allAlarmTrackers,
- conditionToMetricMap, trackerToMetricMap, trackerToConditionMap,
- noReportMetricIds));
+ EXPECT_FALSE(initStatsdConfig(kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor,
+ periodicAlarmMonitor, timeBaseSec, timeBaseSec, allTagIds,
+ allAtomMatchers, allConditionTrackers, allMetricProducers,
+ allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap,
+ trackerToMetricMap, trackerToConditionMap, noReportMetricIds));
}
#else
diff --git a/cmds/statsd/tests/StatsLogProcessor_test.cpp b/cmds/statsd/tests/StatsLogProcessor_test.cpp
index 76f3d8181dee..b6f635c6a0cb 100644
--- a/cmds/statsd/tests/StatsLogProcessor_test.cpp
+++ b/cmds/statsd/tests/StatsLogProcessor_test.cpp
@@ -44,13 +44,13 @@ using android::util::ProtoOutputStream;
*/
class MockMetricsManager : public MetricsManager {
public:
- MockMetricsManager() : MetricsManager(
- ConfigKey(1, 12345), StatsdConfig(), 1000, 1000,
- new UidMap(),
- new AlarmMonitor(10, [](const sp<IStatsCompanionService>&, int64_t){},
- [](const sp<IStatsCompanionService>&){}),
- new AlarmMonitor(10, [](const sp<IStatsCompanionService>&, int64_t){},
- [](const sp<IStatsCompanionService>&){})) {
+ MockMetricsManager()
+ : MetricsManager(ConfigKey(1, 12345), StatsdConfig(), 1000, 1000, new UidMap(),
+ new StatsPullerManager(),
+ new AlarmMonitor(10, [](const sp<IStatsCompanionService>&, int64_t) {},
+ [](const sp<IStatsCompanionService>&) {}),
+ new AlarmMonitor(10, [](const sp<IStatsCompanionService>&, int64_t) {},
+ [](const sp<IStatsCompanionService>&) {})) {
}
MOCK_METHOD0(byteSize, size_t());
@@ -60,11 +60,12 @@ public:
TEST(StatsLogProcessorTest, TestRateLimitByteSize) {
sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
// Construct the processor with a dummy sendBroadcast function that does nothing.
- StatsLogProcessor p(m, anomalyAlarmMonitor, periodicAlarmMonitor, 0,
- [](const ConfigKey& key) {return true;});
+ StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor, 0,
+ [](const ConfigKey& key) { return true; });
MockMetricsManager mockMetricsManager;
@@ -79,11 +80,15 @@ TEST(StatsLogProcessorTest, TestRateLimitByteSize) {
TEST(StatsLogProcessorTest, TestRateLimitBroadcast) {
sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) { broadcastCount++; return true;});
+ StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ });
MockMetricsManager mockMetricsManager;
@@ -105,11 +110,15 @@ TEST(StatsLogProcessorTest, TestRateLimitBroadcast) {
TEST(StatsLogProcessorTest, TestDropWhenByteSizeTooLarge) {
sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) { broadcastCount++; return true;});
+ StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ });
MockMetricsManager mockMetricsManager;
@@ -143,12 +152,16 @@ StatsdConfig MakeConfig(bool includeMetric) {
TEST(StatsLogProcessorTest, TestUidMapHasSnapshot) {
// Setup simple config key corresponding to empty config.
sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
m->updateMap(1, {1, 2}, {1, 2}, {String16("p1"), String16("p2")});
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) { broadcastCount++; return true;});
+ StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ });
ConfigKey key(3, 4);
StatsdConfig config = MakeConfig(true);
p.OnConfigUpdated(0, key, config);
@@ -168,12 +181,16 @@ TEST(StatsLogProcessorTest, TestUidMapHasSnapshot) {
TEST(StatsLogProcessorTest, TestEmptyConfigHasNoUidMap) {
// Setup simple config key corresponding to empty config.
sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
m->updateMap(1, {1, 2}, {1, 2}, {String16("p1"), String16("p2")});
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) { broadcastCount++; return true;});
+ StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ });
ConfigKey key(3, 4);
StatsdConfig config = MakeConfig(false);
p.OnConfigUpdated(0, key, config);
@@ -191,11 +208,15 @@ TEST(StatsLogProcessorTest, TestEmptyConfigHasNoUidMap) {
TEST(StatsLogProcessorTest, TestReportIncludesSubConfig) {
// Setup simple config key corresponding to empty config.
sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) { broadcastCount++; return true;});
+ StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ });
ConfigKey key(3, 4);
StatsdConfig config;
auto annotation = config.add_annotation();
@@ -217,128 +238,6 @@ TEST(StatsLogProcessorTest, TestReportIncludesSubConfig) {
EXPECT_EQ(2, report.annotation(0).field_int32());
}
-TEST(StatsLogProcessorTest, TestOutOfOrderLogs) {
- // Setup simple config key corresponding to empty config.
- sp<UidMap> m = new UidMap();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> subscriberAlarmMonitor;
- int broadcastCount = 0;
- StatsLogProcessor p(m, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) { broadcastCount++; return true;});
-
- LogEvent event1(0, 1 /*logd timestamp*/, 1001 /*elapsedRealtime*/);
- event1.init();
-
- LogEvent event2(0, 2, 1002);
- event2.init();
-
- LogEvent event3(0, 3, 1005);
- event3.init();
-
- LogEvent event4(0, 4, 1004);
- event4.init();
-
- // <----- Reconnection happens
-
- LogEvent event5(0, 5, 999);
- event5.init();
-
- LogEvent event6(0, 6, 2000);
- event6.init();
-
- // <----- Reconnection happens
-
- LogEvent event7(0, 7, 3000);
- event7.init();
-
- // first event ever
- p.OnLogEvent(&event1, true);
- EXPECT_EQ(1UL, p.mLogCount);
- EXPECT_EQ(1001LL, p.mLargestTimestampSeen);
- EXPECT_EQ(1001LL, p.mLastTimestampSeen);
-
- p.OnLogEvent(&event2, false);
- EXPECT_EQ(2UL, p.mLogCount);
- EXPECT_EQ(1002LL, p.mLargestTimestampSeen);
- EXPECT_EQ(1002LL, p.mLastTimestampSeen);
-
- p.OnLogEvent(&event3, false);
- EXPECT_EQ(3UL, p.mLogCount);
- EXPECT_EQ(1005LL, p.mLargestTimestampSeen);
- EXPECT_EQ(1005LL, p.mLastTimestampSeen);
-
- p.OnLogEvent(&event4, false);
- EXPECT_EQ(4UL, p.mLogCount);
- EXPECT_EQ(1005LL, p.mLargestTimestampSeen);
- EXPECT_EQ(1004LL, p.mLastTimestampSeen);
- EXPECT_FALSE(p.mInReconnection);
-
- // Reconnect happens, event1 out of buffer. Read event2
- p.OnLogEvent(&event2, true);
- EXPECT_EQ(4UL, p.mLogCount);
- EXPECT_EQ(1005LL, p.mLargestTimestampSeen);
- EXPECT_EQ(1004LL, p.mLastTimestampSeen);
- EXPECT_TRUE(p.mInReconnection);
-
- p.OnLogEvent(&event3, false);
- EXPECT_EQ(4UL, p.mLogCount);
- EXPECT_EQ(1005LL, p.mLargestTimestampSeen);
- EXPECT_EQ(1004LL, p.mLastTimestampSeen);
- EXPECT_TRUE(p.mInReconnection);
-
- p.OnLogEvent(&event4, false);
- EXPECT_EQ(4UL, p.mLogCount);
- EXPECT_EQ(1005LL, p.mLargestTimestampSeen);
- EXPECT_EQ(1004LL, p.mLastTimestampSeen);
- EXPECT_FALSE(p.mInReconnection);
-
- // Fresh event comes.
- p.OnLogEvent(&event5, false);
- EXPECT_EQ(5UL, p.mLogCount);
- EXPECT_EQ(1005LL, p.mLargestTimestampSeen);
- EXPECT_EQ(999LL, p.mLastTimestampSeen);
-
- p.OnLogEvent(&event6, false);
- EXPECT_EQ(6UL, p.mLogCount);
- EXPECT_EQ(2000LL, p.mLargestTimestampSeen);
- EXPECT_EQ(2000LL, p.mLastTimestampSeen);
-
- // Reconnect happens, read from event4
- p.OnLogEvent(&event4, true);
- EXPECT_EQ(6UL, p.mLogCount);
- EXPECT_EQ(2000LL, p.mLargestTimestampSeen);
- EXPECT_EQ(2000LL, p.mLastTimestampSeen);
- EXPECT_TRUE(p.mInReconnection);
-
- p.OnLogEvent(&event5, false);
- EXPECT_EQ(6UL, p.mLogCount);
- EXPECT_EQ(2000LL, p.mLargestTimestampSeen);
- EXPECT_EQ(2000LL, p.mLastTimestampSeen);
- EXPECT_TRUE(p.mInReconnection);
-
- // Before we get out of reconnection state, it reconnects again.
- p.OnLogEvent(&event5, true);
- EXPECT_EQ(6UL, p.mLogCount);
- EXPECT_EQ(2000LL, p.mLargestTimestampSeen);
- EXPECT_EQ(2000LL, p.mLastTimestampSeen);
- EXPECT_TRUE(p.mInReconnection);
-
- p.OnLogEvent(&event6, false);
- EXPECT_EQ(6UL, p.mLogCount);
- EXPECT_EQ(2000LL, p.mLargestTimestampSeen);
- EXPECT_EQ(2000LL, p.mLastTimestampSeen);
- EXPECT_FALSE(p.mInReconnection);
- EXPECT_EQ(0, p.mLogLossCount);
-
- // it reconnects again. All old events are gone. We lose CP.
- p.OnLogEvent(&event7, true);
- EXPECT_EQ(7UL, p.mLogCount);
- EXPECT_EQ(3000LL, p.mLargestTimestampSeen);
- EXPECT_EQ(3000LL, p.mLastTimestampSeen);
- EXPECT_EQ(1, p.mLogLossCount);
- EXPECT_FALSE(p.mInReconnection);
-}
-
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif
diff --git a/cmds/statsd/tests/UidMap_test.cpp b/cmds/statsd/tests/UidMap_test.cpp
index e23131d7b45d..99082cc647f6 100644
--- a/cmds/statsd/tests/UidMap_test.cpp
+++ b/cmds/statsd/tests/UidMap_test.cpp
@@ -40,11 +40,12 @@ const string kApp2 = "app2.sharing.1";
TEST(UidMapTest, TestIsolatedUID) {
sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
// Construct the processor with a dummy sendBroadcast function that does nothing.
- StatsLogProcessor p(m, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [](const ConfigKey& key) {return true;});
+ StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [](const ConfigKey& key) { return true; });
LogEvent addEvent(android::util::ISOLATED_UID_CHANGED, 1);
addEvent.write(100); // parent UID
addEvent.write(101); // isolated UID
diff --git a/cmds/statsd/tests/anomaly/AnomalyTracker_test.cpp b/cmds/statsd/tests/anomaly/AnomalyTracker_test.cpp
index 218d52a5c046..79bed52f0202 100644
--- a/cmds/statsd/tests/anomaly/AnomalyTracker_test.cpp
+++ b/cmds/statsd/tests/anomaly/AnomalyTracker_test.cpp
@@ -305,10 +305,10 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 16, bucket16, {keyB}, {keyA, keyC, keyD}));
- // TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
+ EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 15L);
detectAndDeclareAnomalies(anomalyTracker, 16, bucket16, eventTimestamp2);
- // TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
+ EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 15L);
checkRefractoryTimes(anomalyTracker, eventTimestamp2, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp2}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
@@ -366,7 +366,7 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 25, bucket25, {}, {keyA, keyB, keyC, keyD}));
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 24L);
- // TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
+ EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
detectAndDeclareAnomalies(anomalyTracker, 25, bucket25, eventTimestamp5);
checkRefractoryTimes(anomalyTracker, eventTimestamp5, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp4}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
@@ -374,14 +374,14 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
// Add past bucket #25
anomalyTracker.addPastBucket(bucket25, 25);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 25L);
- // TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 1UL);
+ EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 1UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyD), 1LL);
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 28, bucket28, {},
{keyA, keyB, keyC, keyD, keyE}));
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 27L);
- // TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
+ EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
detectAndDeclareAnomalies(anomalyTracker, 28, bucket28, eventTimestamp6);
- // TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
+ EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
checkRefractoryTimes(anomalyTracker, eventTimestamp6, refractoryPeriodSec,
{{keyA, -1}, {keyB, -1}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
@@ -390,9 +390,9 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 28, bucket28, {keyE},
{keyA, keyB, keyC, keyD}));
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 27L);
- // TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
+ EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
detectAndDeclareAnomalies(anomalyTracker, 28, bucket28, eventTimestamp6 + 7);
- // TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
+ EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
checkRefractoryTimes(anomalyTracker, eventTimestamp6, refractoryPeriodSec,
{{keyA, -1}, {keyB, -1}, {keyC, -1}, {keyD, -1}, {keyE, eventTimestamp6 + 7}});
}
diff --git a/cmds/statsd/tests/condition/CombinationConditionTracker_test.cpp b/cmds/statsd/tests/condition/CombinationConditionTracker_test.cpp
index 23d69267d1c0..6529d65a5825 100644
--- a/cmds/statsd/tests/condition/CombinationConditionTracker_test.cpp
+++ b/cmds/statsd/tests/condition/CombinationConditionTracker_test.cpp
@@ -40,6 +40,7 @@ TEST(ConditionTrackerTest, TestUnknownCondition) {
EXPECT_EQ(evaluateCombinationCondition(children, operation, conditionResults),
ConditionState::kUnknown);
}
+
TEST(ConditionTrackerTest, TestAndCondition) {
// Set up the matcher
LogicalOperation operation = LogicalOperation::AND;
@@ -103,6 +104,11 @@ TEST(ConditionTrackerTest, TestNotCondition) {
conditionResults.clear();
conditionResults.push_back(ConditionState::kFalse);
EXPECT_TRUE(evaluateCombinationCondition(children, operation, conditionResults));
+
+ children.clear();
+ conditionResults.clear();
+ EXPECT_EQ(evaluateCombinationCondition(children, operation, conditionResults),
+ ConditionState::kUnknown);
}
TEST(ConditionTrackerTest, TestNandCondition) {
diff --git a/cmds/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp b/cmds/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
index eca5690de478..d98395e78467 100644
--- a/cmds/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
+++ b/cmds/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
@@ -66,7 +66,7 @@ TEST(GaugeMetricE2eTest, TestRandomSamplePulledEvents) {
baseTimeNs, configAddedTimeNs, config, cfgKey);
EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
- processor->mStatsPullerManager.ForceClearPullerCache();
+ processor->mPullerManager->ForceClearPullerCache();
int startBucketNum = processor->mMetricsManagers.begin()->second->
mAllMetricProducers[0]->getCurrentBucketNum();
@@ -74,12 +74,11 @@ TEST(GaugeMetricE2eTest, TestRandomSamplePulledEvents) {
// When creating the config, the gauge metric producer should register the alarm at the
// end of the current bucket.
- EXPECT_EQ((size_t)1, StatsPullerManagerImpl::GetInstance().mReceivers.size());
+ EXPECT_EQ((size_t)1, processor->mPullerManager->mReceivers.size());
EXPECT_EQ(bucketSizeNs,
- StatsPullerManagerImpl::GetInstance().mReceivers.begin()->
- second.front().intervalNs);
- int64_t& nextPullTimeNs = StatsPullerManagerImpl::GetInstance().mReceivers.begin()->
- second.front().nextPullTimeNs;
+ processor->mPullerManager->mReceivers.begin()->second.front().intervalNs);
+ int64_t& nextPullTimeNs =
+ processor->mPullerManager->mReceivers.begin()->second.front().nextPullTimeNs;
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + bucketSizeNs, nextPullTimeNs);
auto screenOffEvent = CreateScreenStateChangedEvent(android::view::DISPLAY_STATE_OFF,
@@ -212,7 +211,7 @@ TEST(GaugeMetricE2eTest, TestAllConditionChangesSamplePulledEvents) {
baseTimeNs, configAddedTimeNs, config, cfgKey);
EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
- processor->mStatsPullerManager.ForceClearPullerCache();
+ processor->mPullerManager->ForceClearPullerCache();
int startBucketNum = processor->mMetricsManagers.begin()->second->
mAllMetricProducers[0]->getCurrentBucketNum();
@@ -313,7 +312,7 @@ TEST(GaugeMetricE2eTest, TestRandomSamplePulledEvent_LateAlarm) {
baseTimeNs, configAddedTimeNs, config, cfgKey);
EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
- processor->mStatsPullerManager.ForceClearPullerCache();
+ processor->mPullerManager->ForceClearPullerCache();
int startBucketNum = processor->mMetricsManagers.begin()->second->
mAllMetricProducers[0]->getCurrentBucketNum();
@@ -321,12 +320,11 @@ TEST(GaugeMetricE2eTest, TestRandomSamplePulledEvent_LateAlarm) {
// When creating the config, the gauge metric producer should register the alarm at the
// end of the current bucket.
- EXPECT_EQ((size_t)1, StatsPullerManagerImpl::GetInstance().mReceivers.size());
+ EXPECT_EQ((size_t)1, processor->mPullerManager->mReceivers.size());
EXPECT_EQ(bucketSizeNs,
- StatsPullerManagerImpl::GetInstance().mReceivers.begin()->
- second.front().intervalNs);
- int64_t& nextPullTimeNs = StatsPullerManagerImpl::GetInstance().mReceivers.begin()->
- second.front().nextPullTimeNs;
+ processor->mPullerManager->mReceivers.begin()->second.front().intervalNs);
+ int64_t& nextPullTimeNs =
+ processor->mPullerManager->mReceivers.begin()->second.front().nextPullTimeNs;
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + bucketSizeNs, nextPullTimeNs);
auto screenOffEvent = CreateScreenStateChangedEvent(android::view::DISPLAY_STATE_OFF,
diff --git a/cmds/statsd/tests/e2e/MetricConditionLink_e2e_test.cpp b/cmds/statsd/tests/e2e/MetricConditionLink_e2e_test.cpp
index 11aaab00d88c..cc8894bdbca6 100644
--- a/cmds/statsd/tests/e2e/MetricConditionLink_e2e_test.cpp
+++ b/cmds/statsd/tests/e2e/MetricConditionLink_e2e_test.cpp
@@ -99,7 +99,6 @@ StatsdConfig CreateStatsdConfig() {
// If we want to test multiple dump data, we must do it in separate tests, because in the e2e tests,
// we should use the real API which will clear the data after dump data is called.
-// TODO: better refactor the code so that the tests are not so verbose.
TEST(MetricConditionLinkE2eTest, TestMultiplePredicatesAndLinks1) {
auto config = CreateStatsdConfig();
uint64_t bucketStartTimeNs = 10000000000;
diff --git a/cmds/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp b/cmds/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
index dd28d3611b4f..f2e8f58fe763 100644
--- a/cmds/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
+++ b/cmds/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
@@ -66,7 +66,7 @@ TEST(ValueMetricE2eTest, TestPulledEvents) {
baseTimeNs, configAddedTimeNs, config, cfgKey);
EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
- processor->mStatsPullerManager.ForceClearPullerCache();
+ processor->mPullerManager->ForceClearPullerCache();
int startBucketNum = processor->mMetricsManagers.begin()->second->
mAllMetricProducers[0]->getCurrentBucketNum();
@@ -74,12 +74,11 @@ TEST(ValueMetricE2eTest, TestPulledEvents) {
// When creating the config, the gauge metric producer should register the alarm at the
// end of the current bucket.
- EXPECT_EQ((size_t)1, StatsPullerManagerImpl::GetInstance().mReceivers.size());
+ EXPECT_EQ((size_t)1, processor->mPullerManager->mReceivers.size());
EXPECT_EQ(bucketSizeNs,
- StatsPullerManagerImpl::GetInstance().mReceivers.begin()->
- second.front().intervalNs);
- int64_t& expectedPullTimeNs = StatsPullerManagerImpl::GetInstance().mReceivers.begin()->
- second.front().nextPullTimeNs;
+ processor->mPullerManager->mReceivers.begin()->second.front().intervalNs);
+ int64_t& expectedPullTimeNs =
+ processor->mPullerManager->mReceivers.begin()->second.front().nextPullTimeNs;
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + bucketSizeNs, expectedPullTimeNs);
auto screenOffEvent = CreateScreenStateChangedEvent(android::view::DISPLAY_STATE_OFF,
@@ -142,23 +141,23 @@ TEST(ValueMetricE2eTest, TestPulledEvents) {
EXPECT_EQ(baseTimeNs + 2 * bucketSizeNs, data.bucket_info(0).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 3 * bucketSizeNs, data.bucket_info(0).end_bucket_elapsed_nanos());
- EXPECT_TRUE(data.bucket_info(0).has_value());
+ EXPECT_TRUE(data.bucket_info(0).has_value_long());
EXPECT_EQ(baseTimeNs + 3 * bucketSizeNs, data.bucket_info(1).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 4 * bucketSizeNs, data.bucket_info(1).end_bucket_elapsed_nanos());
- EXPECT_TRUE(data.bucket_info(1).has_value());
+ EXPECT_TRUE(data.bucket_info(1).has_value_long());
EXPECT_EQ(baseTimeNs + 4 * bucketSizeNs, data.bucket_info(2).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 5 * bucketSizeNs, data.bucket_info(2).end_bucket_elapsed_nanos());
- EXPECT_TRUE(data.bucket_info(2).has_value());
+ EXPECT_TRUE(data.bucket_info(2).has_value_long());
EXPECT_EQ(baseTimeNs + 6 * bucketSizeNs, data.bucket_info(3).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 7 * bucketSizeNs, data.bucket_info(3).end_bucket_elapsed_nanos());
- EXPECT_TRUE(data.bucket_info(3).has_value());
+ EXPECT_TRUE(data.bucket_info(3).has_value_long());
EXPECT_EQ(baseTimeNs + 7 * bucketSizeNs, data.bucket_info(4).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 8 * bucketSizeNs, data.bucket_info(4).end_bucket_elapsed_nanos());
- EXPECT_TRUE(data.bucket_info(4).has_value());
+ EXPECT_TRUE(data.bucket_info(4).has_value_long());
}
TEST(ValueMetricE2eTest, TestPulledEvents_LateAlarm) {
@@ -173,7 +172,7 @@ TEST(ValueMetricE2eTest, TestPulledEvents_LateAlarm) {
baseTimeNs, configAddedTimeNs, config, cfgKey);
EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
- processor->mStatsPullerManager.ForceClearPullerCache();
+ processor->mPullerManager->ForceClearPullerCache();
int startBucketNum = processor->mMetricsManagers.begin()->second->
mAllMetricProducers[0]->getCurrentBucketNum();
@@ -181,12 +180,11 @@ TEST(ValueMetricE2eTest, TestPulledEvents_LateAlarm) {
// When creating the config, the gauge metric producer should register the alarm at the
// end of the current bucket.
- EXPECT_EQ((size_t)1, StatsPullerManagerImpl::GetInstance().mReceivers.size());
+ EXPECT_EQ((size_t)1, processor->mPullerManager->mReceivers.size());
EXPECT_EQ(bucketSizeNs,
- StatsPullerManagerImpl::GetInstance().mReceivers.begin()->
- second.front().intervalNs);
- int64_t& expectedPullTimeNs = StatsPullerManagerImpl::GetInstance().mReceivers.begin()->
- second.front().nextPullTimeNs;
+ processor->mPullerManager->mReceivers.begin()->second.front().intervalNs);
+ int64_t& expectedPullTimeNs =
+ processor->mPullerManager->mReceivers.begin()->second.front().nextPullTimeNs;
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + bucketSizeNs, expectedPullTimeNs);
// Screen off/on/off events.
@@ -250,15 +248,15 @@ TEST(ValueMetricE2eTest, TestPulledEvents_LateAlarm) {
EXPECT_EQ(baseTimeNs + 2 * bucketSizeNs, data.bucket_info(0).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 3 * bucketSizeNs, data.bucket_info(0).end_bucket_elapsed_nanos());
- EXPECT_TRUE(data.bucket_info(0).has_value());
+ EXPECT_TRUE(data.bucket_info(0).has_value_long());
EXPECT_EQ(baseTimeNs + 8 * bucketSizeNs, data.bucket_info(1).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 9 * bucketSizeNs, data.bucket_info(1).end_bucket_elapsed_nanos());
- EXPECT_TRUE(data.bucket_info(1).has_value());
+ EXPECT_TRUE(data.bucket_info(1).has_value_long());
EXPECT_EQ(baseTimeNs + 9 * bucketSizeNs, data.bucket_info(2).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 10 * bucketSizeNs, data.bucket_info(2).end_bucket_elapsed_nanos());
- EXPECT_TRUE(data.bucket_info(2).has_value());
+ EXPECT_TRUE(data.bucket_info(2).has_value_long());
}
#else
diff --git a/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp b/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
index 9a8919e98f6d..67c704eb87fd 100644
--- a/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
@@ -37,6 +37,19 @@ namespace statsd {
const ConfigKey kConfigKey(0, 12345);
+TEST(CountMetricProducerTest, TestFirstBucket) {
+ CountMetric metric;
+ metric.set_id(1);
+ metric.set_bucket(ONE_MINUTE);
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+
+ CountMetricProducer countProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
+ 5, 600 * NS_PER_SEC + NS_PER_SEC/2);
+ EXPECT_EQ(600500000000, countProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(10, countProducer.mCurrentBucketNum);
+ EXPECT_EQ(660000000005, countProducer.getCurrentBucketEndTimeNs());
+}
+
TEST(CountMetricProducerTest, TestNonDimensionalEvents) {
int64_t bucketStartTimeNs = 10000000000;
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
@@ -56,8 +69,7 @@ TEST(CountMetricProducerTest, TestNonDimensionalEvents) {
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
CountMetricProducer countProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
- bucketStartTimeNs);
- countProducer.setBucketSize(60 * NS_PER_SEC);
+ bucketStartTimeNs, bucketStartTimeNs);
// 2 events in bucket 1.
countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
@@ -119,8 +131,7 @@ TEST(CountMetricProducerTest, TestEventsWithNonSlicedCondition) {
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- CountMetricProducer countProducer(kConfigKey, metric, 1, wizard, bucketStartTimeNs);
- countProducer.setBucketSize(60 * NS_PER_SEC);
+ CountMetricProducer countProducer(kConfigKey, metric, 1, wizard, bucketStartTimeNs, bucketStartTimeNs);
countProducer.onConditionChanged(true, bucketStartTimeNs);
countProducer.onMatchedLogEvent(1 /*matcher index*/, event1);
@@ -181,8 +192,7 @@ TEST(CountMetricProducerTest, TestEventsWithSlicedCondition) {
EXPECT_CALL(*wizard, query(_, key2, _, _, _, _)).WillOnce(Return(ConditionState::kTrue));
CountMetricProducer countProducer(kConfigKey, metric, 1 /*condition tracker index*/, wizard,
- bucketStartTimeNs);
- countProducer.setBucketSize(60 * NS_PER_SEC);
+ bucketStartTimeNs, bucketStartTimeNs);
countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
countProducer.flushIfNeededLocked(bucketStartTimeNs + 1);
@@ -221,8 +231,7 @@ TEST(CountMetricProducerTest, TestEventWithAppUpgrade) {
event1.init();
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
CountMetricProducer countProducer(kConfigKey, metric, -1 /* no condition */, wizard,
- bucketStartTimeNs);
- countProducer.setBucketSize(60 * NS_PER_SEC);
+ bucketStartTimeNs, bucketStartTimeNs);
sp<AnomalyTracker> anomalyTracker = countProducer.addAnomalyTracker(alert, alarmMonitor);
EXPECT_TRUE(anomalyTracker != nullptr);
@@ -280,8 +289,7 @@ TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket) {
event1.init();
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
CountMetricProducer countProducer(kConfigKey, metric, -1 /* no condition */, wizard,
- bucketStartTimeNs);
- countProducer.setBucketSize(60 * NS_PER_SEC);
+ bucketStartTimeNs, bucketStartTimeNs);
// Bucket is flushed yet.
countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
@@ -337,8 +345,7 @@ TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced) {
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
CountMetricProducer countProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
- bucketStartTimeNs);
- countProducer.setBucketSize(60 * NS_PER_SEC);
+ bucketStartTimeNs, bucketStartTimeNs);
sp<AnomalyTracker> anomalyTracker = countProducer.addAnomalyTracker(alert, alarmMonitor);
diff --git a/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp b/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
index 7ef8c5bd6a1b..b54096441d3f 100644
--- a/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
@@ -39,6 +39,23 @@ namespace statsd {
const ConfigKey kConfigKey(0, 12345);
+TEST(DurationMetricTrackerTest, TestFirstBucket) {
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ DurationMetric metric;
+ metric.set_id(1);
+ metric.set_bucket(ONE_MINUTE);
+ metric.set_aggregation_type(DurationMetric_AggregationType_SUM);
+
+ FieldMatcher dimensions;
+ DurationMetricProducer durationProducer(
+ kConfigKey, metric, -1 /*no condition*/, 1 /* start index */, 2 /* stop index */,
+ 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, 5, 600 * NS_PER_SEC + NS_PER_SEC/2);
+
+ EXPECT_EQ(600500000000, durationProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(10, durationProducer.mCurrentBucketNum);
+ EXPECT_EQ(660000000005, durationProducer.getCurrentBucketEndTimeNs());
+}
+
TEST(DurationMetricTrackerTest, TestNoCondition) {
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
int64_t bucketStartTimeNs = 10000000000;
@@ -58,8 +75,7 @@ TEST(DurationMetricTrackerTest, TestNoCondition) {
FieldMatcher dimensions;
DurationMetricProducer durationProducer(
kConfigKey, metric, -1 /*no condition*/, 1 /* start index */, 2 /* stop index */,
- 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
- durationProducer.setBucketSize(60 * NS_PER_SEC);
+ 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs, bucketStartTimeNs);
durationProducer.onMatchedLogEvent(1 /* start index*/, event1);
durationProducer.onMatchedLogEvent(2 /* stop index*/, event2);
@@ -100,8 +116,7 @@ TEST(DurationMetricTrackerTest, TestNonSlicedCondition) {
FieldMatcher dimensions;
DurationMetricProducer durationProducer(
kConfigKey, metric, 0 /* condition index */, 1 /* start index */, 2 /* stop index */,
- 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
- durationProducer.setBucketSize(60 * NS_PER_SEC);
+ 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs, bucketStartTimeNs);
EXPECT_FALSE(durationProducer.mCondition);
EXPECT_FALSE(durationProducer.isConditionSliced());
@@ -151,8 +166,7 @@ TEST(DurationMetricTrackerTest, TestSumDurationWithUpgrade) {
FieldMatcher dimensions;
DurationMetricProducer durationProducer(
kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
- 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
- durationProducer.setBucketSize(60 * NS_PER_SEC);
+ 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs, bucketStartTimeNs);
LogEvent start_event(tagId, startTimeNs);
start_event.init();
@@ -206,8 +220,7 @@ TEST(DurationMetricTrackerTest, TestSumDurationWithUpgradeInFollowingBucket) {
FieldMatcher dimensions;
DurationMetricProducer durationProducer(
kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
- 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
- durationProducer.setBucketSize(60 * NS_PER_SEC);
+ 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs, bucketStartTimeNs);
LogEvent start_event(tagId, startTimeNs);
start_event.init();
@@ -261,8 +274,7 @@ TEST(DurationMetricTrackerTest, TestSumDurationAnomalyWithUpgrade) {
FieldMatcher dimensions;
DurationMetricProducer durationProducer(
kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
- 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
- durationProducer.setBucketSize(60 * NS_PER_SEC);
+ 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs, bucketStartTimeNs);
sp<AnomalyTracker> anomalyTracker = durationProducer.addAnomalyTracker(alert, alarmMonitor);
EXPECT_TRUE(anomalyTracker != nullptr);
@@ -300,8 +312,7 @@ TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade) {
FieldMatcher dimensions;
DurationMetricProducer durationProducer(
kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
- 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
- durationProducer.setBucketSize(60 * NS_PER_SEC);
+ 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs, bucketStartTimeNs);
LogEvent start_event(tagId, startTimeNs);
start_event.init();
@@ -348,8 +359,7 @@ TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgradeInNextBucket) {
FieldMatcher dimensions;
DurationMetricProducer durationProducer(
kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
- 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
- durationProducer.setBucketSize(60 * NS_PER_SEC);
+ 3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs, bucketStartTimeNs);
LogEvent start_event(tagId, startTimeNs);
start_event.init();
diff --git a/cmds/statsd/tests/metrics/EventMetricProducer_test.cpp b/cmds/statsd/tests/metrics/EventMetricProducer_test.cpp
index 3a1546641d45..d2fd95c818cf 100644
--- a/cmds/statsd/tests/metrics/EventMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/EventMetricProducer_test.cpp
@@ -54,8 +54,8 @@ TEST(EventMetricProducerTest, TestNoCondition) {
eventProducer.onMatchedLogEvent(1 /*matcher index*/, event1);
eventProducer.onMatchedLogEvent(1 /*matcher index*/, event2);
- // TODO: get the report and check the content after the ProtoOutputStream change is done.
- // eventProducer.onDumpReport();
+ // TODO(b/110561136): get the report and check the content after the ProtoOutputStream change
+ // is done eventProducer.onDumpReport();
}
TEST(EventMetricProducerTest, TestEventsWithNonSlicedCondition) {
diff --git a/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp b/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
index 698ce727e688..9471faa89547 100644
--- a/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
@@ -47,7 +47,10 @@ const int64_t bucket3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
const int64_t bucket4StartTimeNs = bucketStartTimeNs + 3 * bucketSizeNs;
const int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
-TEST(GaugeMetricProducerTest, TestNoCondition) {
+/*
+ * Tests that the first bucket works correctly
+ */
+TEST(GaugeMetricProducerTest, TestFirstBucket) {
GaugeMetric metric;
metric.set_id(metricId);
metric.set_bucket(ONE_MINUTE);
@@ -58,17 +61,48 @@ TEST(GaugeMetricProducerTest, TestNoCondition) {
gaugeFieldMatcher->add_child()->set_field(3);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
- // TODO: pending refactor of StatsPullerManager
- // For now we still need this so that it doesn't do real pulling.
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ // statsd started long ago.
+ // The metric starts in the middle of the bucket
+ GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
+ -1, -1, tagId, 5, 600 * NS_PER_SEC + NS_PER_SEC / 2,
+ pullerManager);
+
+ EXPECT_EQ(600500000000, gaugeProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(10, gaugeProducer.mCurrentBucketNum);
+ EXPECT_EQ(660000000005, gaugeProducer.getCurrentBucketEndTimeNs());
+}
+
+TEST(GaugeMetricProducerTest, TestPulledEventsNoCondition) {
+ GaugeMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.mutable_gauge_fields_filter()->set_include_all(false);
+ auto gaugeFieldMatcher = metric.mutable_gauge_fields_filter()->mutable_fields();
+ gaugeFieldMatcher->set_field(tagId);
+ gaugeFieldMatcher->add_child()->set_field(1);
+ gaugeFieldMatcher->add_child()->set_field(3);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Invoke([](int tagId, int64_t timeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+ event->write(3);
+ event->init();
+ data->push_back(event);
+ return true;
+ }));
GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
- tagId, bucketStartTimeNs, bucketStartTimeNs, pullerManager);
- gaugeProducer.setBucketSize(60 * NS_PER_SEC);
+ tagId, -1, tagId, bucketStartTimeNs, bucketStartTimeNs,
+ pullerManager);
vector<shared_ptr<LogEvent>> allData;
allData.clear();
@@ -86,11 +120,12 @@ TEST(GaugeMetricProducerTest, TestNoCondition) {
EXPECT_EQ(10, it->mValue.int_value);
it++;
EXPECT_EQ(11, it->mValue.int_value);
- EXPECT_EQ(0UL, gaugeProducer.mPastBuckets.size());
+ EXPECT_EQ(1UL, gaugeProducer.mPastBuckets.size());
+ EXPECT_EQ(3, gaugeProducer.mPastBuckets.begin()->second.back().mGaugeAtoms
+ .front().mFields->begin()->mValue.int_value);
allData.clear();
- std::shared_ptr<LogEvent> event2 =
- std::make_shared<LogEvent>(tagId, bucket3StartTimeNs + 10);
+ std::shared_ptr<LogEvent> event2 = std::make_shared<LogEvent>(tagId, bucket3StartTimeNs + 10);
event2->write(24);
event2->write("some value");
event2->write(25);
@@ -106,7 +141,7 @@ TEST(GaugeMetricProducerTest, TestNoCondition) {
EXPECT_EQ(25, it->mValue.int_value);
// One dimension.
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets.size());
- EXPECT_EQ(1UL, gaugeProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(2UL, gaugeProducer.mPastBuckets.begin()->second.size());
it = gaugeProducer.mPastBuckets.begin()->second.back().mGaugeAtoms.front().mFields->begin();
EXPECT_EQ(INT, it->mValue.getType());
EXPECT_EQ(10L, it->mValue.int_value);
@@ -118,7 +153,7 @@ TEST(GaugeMetricProducerTest, TestNoCondition) {
EXPECT_EQ(0UL, gaugeProducer.mCurrentSlicedBucket->size());
// One dimension.
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets.size());
- EXPECT_EQ(2UL, gaugeProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(3UL, gaugeProducer.mPastBuckets.begin()->second.size());
it = gaugeProducer.mPastBuckets.begin()->second.back().mGaugeAtoms.front().mFields->begin();
EXPECT_EQ(INT, it->mValue.getType());
EXPECT_EQ(24L, it->mValue.int_value);
@@ -140,13 +175,11 @@ TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade) {
alert.set_trigger_if_sum_gt(25);
alert.set_num_buckets(100);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
- -1 /* -1 means no pulling */, bucketStartTimeNs,
+ -1 /* -1 means no pulling */, -1, tagId, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- gaugeProducer.setBucketSize(60 * NS_PER_SEC);
sp<AnomalyTracker> anomalyTracker = gaugeProducer.addAnomalyTracker(alert, alarmMonitor);
EXPECT_TRUE(anomalyTracker != nullptr);
@@ -211,11 +244,11 @@ TEST(GaugeMetricProducerTest, TestPulledWithUpgrade) {
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Return(false))
.WillOnce(Invoke([](int tagId, int64_t timeNs,
vector<std::shared_ptr<LogEvent>>* data) {
data->clear();
@@ -228,8 +261,8 @@ TEST(GaugeMetricProducerTest, TestPulledWithUpgrade) {
}));
GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
- tagId, bucketStartTimeNs, bucketStartTimeNs, pullerManager);
- gaugeProducer.setBucketSize(60 * NS_PER_SEC);
+ tagId, -1, tagId, bucketStartTimeNs, bucketStartTimeNs,
+ pullerManager);
vector<shared_ptr<LogEvent>> allData;
shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 1);
@@ -269,7 +302,7 @@ TEST(GaugeMetricProducerTest, TestPulledWithUpgrade) {
->mValue.int_value);
}
-TEST(GaugeMetricProducerTest, TestWithCondition) {
+TEST(GaugeMetricProducerTest, TestPulledEventsWithCondition) {
GaugeMetric metric;
metric.set_id(metricId);
metric.set_bucket(ONE_MINUTE);
@@ -280,8 +313,7 @@ TEST(GaugeMetricProducerTest, TestWithCondition) {
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
@@ -296,9 +328,8 @@ TEST(GaugeMetricProducerTest, TestWithCondition) {
return true;
}));
- GaugeMetricProducer gaugeProducer(kConfigKey, metric, 1, wizard, tagId,
+ GaugeMetricProducer gaugeProducer(kConfigKey, metric, 1, wizard, tagId, -1, tagId,
bucketStartTimeNs, bucketStartTimeNs, pullerManager);
- gaugeProducer.setBucketSize(60 * NS_PER_SEC);
gaugeProducer.onConditionChanged(true, bucketStartTimeNs + 8);
EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
@@ -340,7 +371,7 @@ TEST(GaugeMetricProducerTest, TestWithCondition) {
->mValue.int_value);
}
-TEST(GaugeMetricProducerTest, TestWithSlicedCondition) {
+TEST(GaugeMetricProducerTest, TestPulledEventsWithSlicedCondition) {
const int conditionTag = 65;
GaugeMetric metric;
metric.set_id(1111111);
@@ -372,8 +403,7 @@ TEST(GaugeMetricProducerTest, TestWithSlicedCondition) {
return ConditionState::kTrue;
}));
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
@@ -388,9 +418,8 @@ TEST(GaugeMetricProducerTest, TestWithSlicedCondition) {
return true;
}));
- GaugeMetricProducer gaugeProducer(kConfigKey, metric, 1, wizard, tagId, bucketStartTimeNs,
- bucketStartTimeNs, pullerManager);
- gaugeProducer.setBucketSize(60 * NS_PER_SEC);
+ GaugeMetricProducer gaugeProducer(kConfigKey, metric, 1, wizard, tagId, -1, tagId,
+ bucketStartTimeNs, bucketStartTimeNs, pullerManager);
gaugeProducer.onSlicedConditionMayChange(true, bucketStartTimeNs + 8);
@@ -417,14 +446,14 @@ TEST(GaugeMetricProducerTest, TestWithSlicedCondition) {
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets.size());
}
-TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
+TEST(GaugeMetricProducerTest, TestPulledEventsAnomalyDetection) {
sp<AlarmMonitor> alarmMonitor;
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, Pull(tagId, _, _)).WillOnce(Return(false));
GaugeMetric metric;
metric.set_id(metricId);
@@ -433,8 +462,8 @@ TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
gaugeFieldMatcher->set_field(tagId);
gaugeFieldMatcher->add_child()->set_field(2);
GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
- tagId, bucketStartTimeNs, bucketStartTimeNs, pullerManager);
- gaugeProducer.setBucketSize(60 * NS_PER_SEC);
+ tagId, -1, tagId, bucketStartTimeNs, bucketStartTimeNs,
+ pullerManager);
Alert alert;
alert.set_id(101);
@@ -472,7 +501,7 @@ TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
.mFields->begin()
->mValue.int_value);
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY),
- std::ceil(1.0 * event2->GetElapsedTimestampNs() / NS_PER_SEC) + refPeriodSec);
+ std::ceil(1.0 * event2->GetElapsedTimestampNs() / NS_PER_SEC) + refPeriodSec);
std::shared_ptr<LogEvent> event3 =
std::make_shared<LogEvent>(tagId, bucketStartTimeNs + 2 * bucketSizeNs + 10);
@@ -487,7 +516,7 @@ TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
.mFields->begin()
->mValue.int_value);
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY),
- std::ceil(1.0 * event2->GetElapsedTimestampNs() / NS_PER_SEC + refPeriodSec));
+ std::ceil(1.0 * event2->GetElapsedTimestampNs() / NS_PER_SEC + refPeriodSec));
// The event4 does not have the gauge field. Thus the current bucket value is 0.
std::shared_ptr<LogEvent> event4 =
@@ -499,6 +528,83 @@ TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
EXPECT_TRUE(gaugeProducer.mCurrentSlicedBucket->begin()->second.front().mFields->empty());
}
+TEST(GaugeMetricProducerTest, TestPullOnTrigger) {
+ GaugeMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.set_sampling_type(GaugeMetric::ALL_CONDITION_CHANGES);
+ metric.mutable_gauge_fields_filter()->set_include_all(false);
+ auto gaugeFieldMatcher = metric.mutable_gauge_fields_filter()->mutable_fields();
+ gaugeFieldMatcher->set_field(tagId);
+ gaugeFieldMatcher->add_child()->set_field(1);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+ EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Invoke([](int tagId, int64_t timeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 3);
+ event->write(3);
+ event->init();
+ data->push_back(event);
+ return true;
+ }))
+ .WillOnce(Invoke([](int tagId, int64_t timeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+ event->write(4);
+ event->init();
+ data->push_back(event);
+ return true;
+ }))
+ .WillOnce(Invoke([](int tagId, int64_t timeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 20);
+ event->write(5);
+ event->init();
+ data->push_back(event);
+ return true;
+ }));
+
+ int triggerId = 5;
+ GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
+ tagId, triggerId, tagId, bucketStartTimeNs, bucketStartTimeNs,
+ pullerManager);
+
+ vector<shared_ptr<LogEvent>> allData;
+ allData.clear();
+
+ EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
+ LogEvent trigger(triggerId, bucketStartTimeNs + 10);
+ trigger.init();
+ gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, trigger);
+ EXPECT_EQ(2UL, gaugeProducer.mCurrentSlicedBucket->begin()->second.size());
+ trigger.setElapsedTimestampNs(bucketStartTimeNs + 20);
+ gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, trigger);
+ EXPECT_EQ(3UL, gaugeProducer.mCurrentSlicedBucket->begin()->second.size());
+
+ allData.clear();
+ shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucket2StartTimeNs + 1);
+ event->write(10);
+ event->init();
+ allData.push_back(event);
+
+ gaugeProducer.onDataPulled(allData);
+ EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
+ auto it = gaugeProducer.mCurrentSlicedBucket->begin()->second.front().mFields->begin();
+ EXPECT_EQ(INT, it->mValue.getType());
+ EXPECT_EQ(10, it->mValue.int_value);
+ EXPECT_EQ(1UL, gaugeProducer.mPastBuckets.size());
+ EXPECT_EQ(3UL, gaugeProducer.mPastBuckets.begin()->second.back().mGaugeAtoms.size());
+ EXPECT_EQ(3, gaugeProducer.mPastBuckets.begin()->second.back().mGaugeAtoms[0].mFields->begin()->mValue.int_value);
+ EXPECT_EQ(4, gaugeProducer.mPastBuckets.begin()->second.back().mGaugeAtoms[1].mFields->begin()->mValue.int_value);
+ EXPECT_EQ(5, gaugeProducer.mPastBuckets.begin()->second.back().mGaugeAtoms[2].mFields->begin()->mValue.int_value);
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp b/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
index e3a8a553acc9..57aab971eaaa 100644
--- a/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
@@ -50,9 +50,32 @@ const int64_t bucket6StartTimeNs = bucketStartTimeNs + 5 * bucketSizeNs;
const int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
/*
+ * Tests that the first bucket works correctly
+ */
+TEST(ValueMetricProducerTest, TestFirstBucket) {
+ ValueMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.mutable_value_field()->set_field(tagId);
+ metric.mutable_value_field()->add_child()->set_field(2);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ // statsd started long ago.
+ // The metric starts in the middle of the bucket
+ ValueMetricProducer valueProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
+ -1, 5, 600 * NS_PER_SEC + NS_PER_SEC/2, pullerManager);
+
+ EXPECT_EQ(600500000000, valueProducer.mCurrentBucketStartTimeNs);
+ EXPECT_EQ(10, valueProducer.mCurrentBucketNum);
+ EXPECT_EQ(660000000005, valueProducer.getCurrentBucketEndTimeNs());
+}
+
+/*
* Tests pulled atoms with no conditions
*/
-TEST(ValueMetricProducerTest, TestNonDimensionalEvents) {
+TEST(ValueMetricProducerTest, TestPulledEventsNoCondition) {
ValueMetric metric;
metric.set_id(metricId);
metric.set_bucket(ONE_MINUTE);
@@ -60,16 +83,23 @@ TEST(ValueMetricProducerTest, TestNonDimensionalEvents) {
metric.mutable_value_field()->add_child()->set_field(2);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- // TODO: pending refactor of StatsPullerManager
- // For now we still need this so that it doesn't do real pulling.
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Invoke([](int tagId, int64_t timeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+ event->write(tagId);
+ event->write(3);
+ event->init();
+ data->push_back(event);
+ return true;
+ }));
ValueMetricProducer valueProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
tagId, bucketStartTimeNs, bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
vector<shared_ptr<LogEvent>> allData;
allData.clear();
@@ -83,14 +113,13 @@ TEST(ValueMetricProducerTest, TestNonDimensionalEvents) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- valueProducer.setBucketSize(60 * NS_PER_SEC);
- // startUpdated:true tainted:0 sum:0 start:11
+ // startUpdated:true sum:0 start:11
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
- EXPECT_EQ(11, curInterval.start);
- EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
+ EXPECT_EQ(false, curInterval.hasValue);
+ EXPECT_EQ(11, curInterval.start.long_value);
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
+ EXPECT_EQ(8, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
allData.clear();
event = make_shared<LogEvent>(tagId, bucket3StartTimeNs + 1);
@@ -102,13 +131,12 @@ TEST(ValueMetricProducerTest, TestNonDimensionalEvents) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // tartUpdated:false tainted:0 sum:12
+ // tartUpdated:false sum:12
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
- EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(12, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(2UL, valueProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(12, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
allData.clear();
event = make_shared<LogEvent>(tagId, bucket4StartTimeNs + 1);
@@ -119,13 +147,12 @@ TEST(ValueMetricProducerTest, TestNonDimensionalEvents) {
valueProducer.onDataPulled(allData);
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:false tainted:0 sum:12
+ // startUpdated:false sum:12
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
- EXPECT_EQ(2UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(13, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(3UL, valueProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(13, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
}
/*
@@ -140,14 +167,13 @@ TEST(ValueMetricProducerTest, TestPulledEventsTakeAbsoluteValueOnReset) {
metric.set_use_absolute_value_on_reset(true);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, Pull(tagId, _, _)).WillOnce(Return(false));
ValueMetricProducer valueProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
tagId, bucketStartTimeNs, bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
vector<shared_ptr<LogEvent>> allData;
allData.clear();
@@ -161,12 +187,10 @@ TEST(ValueMetricProducerTest, TestPulledEventsTakeAbsoluteValueOnReset) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- valueProducer.setBucketSize(60 * NS_PER_SEC);
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
- EXPECT_EQ(11, curInterval.start);
+ EXPECT_EQ(false, curInterval.hasValue);
+ EXPECT_EQ(11, curInterval.start.long_value);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
allData.clear();
@@ -180,11 +204,10 @@ TEST(ValueMetricProducerTest, TestPulledEventsTakeAbsoluteValueOnReset) {
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(10, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(10, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
allData.clear();
event = make_shared<LogEvent>(tagId, bucket4StartTimeNs + 1);
@@ -196,11 +219,10 @@ TEST(ValueMetricProducerTest, TestPulledEventsTakeAbsoluteValueOnReset) {
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
EXPECT_EQ(2UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(26, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(26, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
}
/*
@@ -214,14 +236,13 @@ TEST(ValueMetricProducerTest, TestPulledEventsTakeZeroOnReset) {
metric.mutable_value_field()->add_child()->set_field(2);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, Pull(tagId, _, _)).WillOnce(Return(false));
ValueMetricProducer valueProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
tagId, bucketStartTimeNs, bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
vector<shared_ptr<LogEvent>> allData;
allData.clear();
@@ -235,12 +256,10 @@ TEST(ValueMetricProducerTest, TestPulledEventsTakeZeroOnReset) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- valueProducer.setBucketSize(60 * NS_PER_SEC);
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
- EXPECT_EQ(11, curInterval.start);
+ EXPECT_EQ(false, curInterval.hasValue);
+ EXPECT_EQ(11, curInterval.start.long_value);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
allData.clear();
@@ -254,8 +273,7 @@ TEST(ValueMetricProducerTest, TestPulledEventsTakeZeroOnReset) {
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
allData.clear();
@@ -268,11 +286,10 @@ TEST(ValueMetricProducerTest, TestPulledEventsTakeZeroOnReset) {
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(26, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(26, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
}
/*
@@ -287,12 +304,22 @@ TEST(ValueMetricProducerTest, TestEventsWithNonSlicedCondition) {
metric.set_condition(StringToId("SCREEN_ON"));
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillRepeatedly(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ // should not take effect
+ .WillOnce(Invoke([](int tagId, int64_t timeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+ event->write(tagId);
+ event->write(3);
+ event->init();
+ data->push_back(event);
+ return true;
+ }))
.WillOnce(Invoke([](int tagId, int64_t timeNs,
vector<std::shared_ptr<LogEvent>>* data) {
data->clear();
@@ -316,17 +343,15 @@ TEST(ValueMetricProducerTest, TestEventsWithNonSlicedCondition) {
ValueMetricProducer valueProducer(kConfigKey, metric, 1, wizard, tagId, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
valueProducer.onConditionChanged(true, bucketStartTimeNs + 8);
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:false tainted:0 sum:0 start:100
- EXPECT_EQ(100, curInterval.start);
+ // startUpdated:false sum:0 start:100
+ EXPECT_EQ(100, curInterval.start.long_value);
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
vector<shared_ptr<LogEvent>> allData;
@@ -341,19 +366,19 @@ TEST(ValueMetricProducerTest, TestEventsWithNonSlicedCondition) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:false tainted:0 sum:0 start:110
- EXPECT_EQ(110, curInterval.start);
+ // startUpdated:false sum:0 start:110
+ EXPECT_EQ(110, curInterval.start.long_value);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(10, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(10, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
valueProducer.onConditionChanged(false, bucket2StartTimeNs + 1);
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:false tainted:0 sum:0 start:110
- EXPECT_EQ(10, curInterval.sum);
+ // startUpdated:false sum:0 start:110
+ EXPECT_EQ(10, curInterval.value.long_value);
EXPECT_EQ(false, curInterval.startUpdated);
}
@@ -365,11 +390,9 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithUpgrade) {
metric.mutable_value_field()->add_child()->set_field(2);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, -1, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
event1->write(1);
@@ -408,11 +431,11 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
metric.mutable_value_field()->add_child()->set_field(2);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Return(false))
.WillOnce(Invoke([](int tagId, int64_t timeNs,
vector<std::shared_ptr<LogEvent>>* data) {
data->clear();
@@ -425,7 +448,6 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
}));
ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, tagId, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
vector<shared_ptr<LogEvent>> allData;
allData.clear();
@@ -441,7 +463,7 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
valueProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
EXPECT_EQ(eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
- EXPECT_EQ(20L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mValue);
+ EXPECT_EQ(20L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mValueLong);
allData.clear();
event = make_shared<LogEvent>(tagId, bucket2StartTimeNs + 1);
@@ -452,7 +474,7 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
valueProducer.onDataPulled(allData);
EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
EXPECT_EQ(bucket2StartTimeNs, valueProducer.mCurrentBucketStartTimeNs);
- EXPECT_EQ(30L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mValue);
+ EXPECT_EQ(30L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mValueLong);
}
TEST(ValueMetricProducerTest, TestPulledValueWithUpgradeWhileConditionFalse) {
@@ -464,11 +486,11 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgradeWhileConditionFalse) {
metric.set_condition(StringToId("SCREEN_ON"));
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Return(false))
.WillOnce(Invoke([](int tagId, int64_t timeNs,
vector<std::shared_ptr<LogEvent>>* data) {
data->clear();
@@ -491,7 +513,6 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgradeWhileConditionFalse) {
}));
ValueMetricProducer valueProducer(kConfigKey, metric, 1, wizard, tagId, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
valueProducer.onConditionChanged(true, bucketStartTimeNs + 1);
valueProducer.onConditionChanged(false, bucket2StartTimeNs-100);
@@ -502,7 +523,7 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgradeWhileConditionFalse) {
EXPECT_EQ(bucket2StartTimeNs-50, valueProducer.mCurrentBucketStartTimeNs);
EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
EXPECT_EQ(bucketStartTimeNs, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
- EXPECT_EQ(20L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mValue);
+ EXPECT_EQ(20L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mValueLong);
EXPECT_FALSE(valueProducer.mCondition);
}
@@ -514,12 +535,10 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithoutCondition) {
metric.mutable_value_field()->add_child()->set_field(2);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, -1, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
event1->write(1);
@@ -533,19 +552,20 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithoutCondition) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- EXPECT_EQ(10, curInterval.sum);
+ EXPECT_EQ(10, curInterval.value.long_value);
+ EXPECT_EQ(true, curInterval.hasValue);
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- EXPECT_EQ(30, curInterval.sum);
+ EXPECT_EQ(30, curInterval.value.long_value);
valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(30, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(30, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
}
TEST(ValueMetricProducerTest, TestPushedEventsWithCondition) {
@@ -556,12 +576,10 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithCondition) {
metric.mutable_value_field()->add_child()->set_field(2);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
ValueMetricProducer valueProducer(kConfigKey, metric, 1, wizard, -1, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
event1->write(1);
@@ -583,7 +601,7 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithCondition) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- EXPECT_EQ(20, curInterval.sum);
+ EXPECT_EQ(20, curInterval.value.long_value);
shared_ptr<LogEvent> event3 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 30);
event3->write(1);
@@ -594,7 +612,7 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithCondition) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- EXPECT_EQ(50, curInterval.sum);
+ EXPECT_EQ(50, curInterval.value.long_value);
valueProducer.onConditionChangedLocked(false, bucketStartTimeNs + 35);
shared_ptr<LogEvent> event4 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 40);
@@ -606,12 +624,12 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithCondition) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- EXPECT_EQ(50, curInterval.sum);
+ EXPECT_EQ(50, curInterval.value.long_value);
valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(50, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(50, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
}
TEST(ValueMetricProducerTest, TestAnomalyDetection) {
@@ -631,9 +649,10 @@ TEST(ValueMetricProducerTest, TestAnomalyDetection) {
metric.mutable_value_field()->add_child()->set_field(2);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
ValueMetricProducer valueProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
- -1 /*not pulled*/, bucketStartTimeNs, bucketStartTimeNs);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
+ -1 /*not pulled*/, bucketStartTimeNs, bucketStartTimeNs,
+ pullerManager);
sp<AnomalyTracker> anomalyTracker = valueProducer.addAnomalyTracker(alert, alarmMonitor);
@@ -705,14 +724,13 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryNoCondition) {
metric.mutable_value_field()->add_child()->set_field(2);
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, Pull(tagId, _, _)).WillOnce(Return(false));
ValueMetricProducer valueProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
tagId, bucketStartTimeNs, bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
vector<shared_ptr<LogEvent>> allData;
// pull 1
@@ -728,11 +746,10 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryNoCondition) {
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:true tainted:0 sum:0 start:11
+ // startUpdated:true sum:0 start:11
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
- EXPECT_EQ(11, curInterval.start);
+ EXPECT_EQ(false, curInterval.hasValue);
+ EXPECT_EQ(11, curInterval.start.long_value);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
// pull 2 at correct time
@@ -746,13 +763,12 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryNoCondition) {
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // tartUpdated:false tainted:0 sum:12
+ // tartUpdated:false sum:12
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(12, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(12, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
// pull 3 come late.
// The previous bucket gets closed with error. (Has start value 23, no ending)
@@ -767,14 +783,13 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryNoCondition) {
valueProducer.onDataPulled(allData);
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:false tainted:0 sum:12
+ // startUpdated:false sum:12
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(36, curInterval.start);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(36, curInterval.start.long_value);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
- EXPECT_EQ(12, valueProducer.mPastBuckets.begin()->second.back().mValue);
+ EXPECT_EQ(12, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
}
/*
@@ -790,12 +805,12 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition) {
metric.set_condition(StringToId("SCREEN_ON"));
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillRepeatedly(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Return(false))
// condition becomes true
.WillOnce(Invoke([](int tagId, int64_t timeNs,
vector<std::shared_ptr<LogEvent>>* data) {
@@ -821,25 +836,22 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition) {
ValueMetricProducer valueProducer(kConfigKey, metric, 1, wizard, tagId, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
valueProducer.onConditionChanged(true, bucketStartTimeNs + 8);
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:false tainted:0 sum:0 start:100
- EXPECT_EQ(100, curInterval.start);
+ // startUpdated:false sum:0 start:100
+ EXPECT_EQ(100, curInterval.start.long_value);
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
// pull on bucket boundary come late, condition change happens before it
valueProducer.onConditionChanged(false, bucket2StartTimeNs + 1);
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(false, curInterval.startUpdated);
- EXPECT_EQ(1, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
// Now the alarm is delivered.
@@ -855,8 +867,7 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition) {
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(false, curInterval.startUpdated);
- EXPECT_EQ(1, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
}
@@ -873,12 +884,12 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition2) {
metric.set_condition(StringToId("SCREEN_ON"));
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillRepeatedly(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillRepeatedly(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Return(false))
// condition becomes true
.WillOnce(Invoke([](int tagId, int64_t timeNs,
vector<std::shared_ptr<LogEvent>>* data) {
@@ -915,34 +926,30 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition2) {
ValueMetricProducer valueProducer(kConfigKey, metric, 1, wizard, tagId, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
valueProducer.onConditionChanged(true, bucketStartTimeNs + 8);
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:false tainted:0 sum:0 start:100
- EXPECT_EQ(100, curInterval.start);
+ // startUpdated:false sum:0 start:100
+ EXPECT_EQ(100, curInterval.start.long_value);
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
// pull on bucket boundary come late, condition change happens before it
valueProducer.onConditionChanged(false, bucket2StartTimeNs + 1);
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(false, curInterval.startUpdated);
- EXPECT_EQ(1, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
// condition changed to true again, before the pull alarm is delivered
valueProducer.onConditionChanged(true, bucket2StartTimeNs + 25);
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(130, curInterval.start);
- EXPECT_EQ(1, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(130, curInterval.start.long_value);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
// Now the alarm is delivered, but it is considered late, it has no effect
@@ -957,9 +964,8 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition2) {
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(130, curInterval.start);
- EXPECT_EQ(1, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(130, curInterval.start.long_value);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
}
@@ -976,12 +982,12 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition3) {
metric.set_condition(StringToId("SCREEN_ON"));
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
- shared_ptr<MockStatsPullerManager> pullerManager =
- make_shared<StrictMock<MockStatsPullerManager>>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillRepeatedly(Return());
EXPECT_CALL(*pullerManager, Pull(tagId, _, _))
+ .WillOnce(Return(false))
// condition becomes true
.WillOnce(Invoke([](int tagId, int64_t timeNs,
vector<std::shared_ptr<LogEvent>>* data) {
@@ -1007,17 +1013,15 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition3) {
ValueMetricProducer valueProducer(kConfigKey, metric, 1, wizard, tagId, bucketStartTimeNs,
bucketStartTimeNs, pullerManager);
- valueProducer.setBucketSize(60 * NS_PER_SEC);
valueProducer.onConditionChanged(true, bucketStartTimeNs + 8);
// has one slice
EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
- // startUpdated:false tainted:0 sum:0 start:100
- EXPECT_EQ(100, curInterval.start);
+ // startUpdated:false sum:0 start:100
+ EXPECT_EQ(100, curInterval.start.long_value);
EXPECT_EQ(true, curInterval.startUpdated);
- EXPECT_EQ(0, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
// pull on bucket boundary come late, condition change happens before it.
@@ -1025,8 +1029,7 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition3) {
valueProducer.onConditionChanged(false, bucket2StartTimeNs + 1);
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(false, curInterval.startUpdated);
- EXPECT_EQ(1, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
// Alarm is delivered in time, but the pull is very slow, and pullers are called in order,
@@ -1042,11 +1045,241 @@ TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition3) {
curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
EXPECT_EQ(false, curInterval.startUpdated);
- EXPECT_EQ(1, curInterval.tainted);
- EXPECT_EQ(0, curInterval.sum);
+ EXPECT_EQ(false, curInterval.hasValue);
EXPECT_EQ(0UL, valueProducer.mPastBuckets.size());
}
+TEST(ValueMetricProducerTest, TestPushedAggregateMin) {
+ ValueMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.mutable_value_field()->set_field(tagId);
+ metric.mutable_value_field()->add_child()->set_field(2);
+ metric.set_aggregation_type(ValueMetric::MIN);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, -1, bucketStartTimeNs,
+ bucketStartTimeNs, pullerManager);
+
+ shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+ event1->write(1);
+ event1->write(10);
+ event1->init();
+ shared_ptr<LogEvent> event2 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 20);
+ event2->write(1);
+ event2->write(20);
+ event2->init();
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event1);
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(10, curInterval.value.long_value);
+ EXPECT_EQ(true, curInterval.hasValue);
+
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
+
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(10, curInterval.value.long_value);
+
+ valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(10, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
+}
+
+TEST(ValueMetricProducerTest, TestPushedAggregateMax) {
+ ValueMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.mutable_value_field()->set_field(tagId);
+ metric.mutable_value_field()->add_child()->set_field(2);
+ metric.set_aggregation_type(ValueMetric::MAX);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, -1, bucketStartTimeNs,
+ bucketStartTimeNs, pullerManager);
+
+ shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+ event1->write(1);
+ event1->write(10);
+ event1->init();
+ shared_ptr<LogEvent> event2 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 20);
+ event2->write(1);
+ event2->write(20);
+ event2->init();
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event1);
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(10, curInterval.value.long_value);
+ EXPECT_EQ(true, curInterval.hasValue);
+
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
+
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(20, curInterval.value.long_value);
+
+ valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(20, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
+}
+
+TEST(ValueMetricProducerTest, TestPushedAggregateAvg) {
+ ValueMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.mutable_value_field()->set_field(tagId);
+ metric.mutable_value_field()->add_child()->set_field(2);
+ metric.set_aggregation_type(ValueMetric::AVG);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, -1, bucketStartTimeNs,
+ bucketStartTimeNs, pullerManager);
+
+ shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+ event1->write(1);
+ event1->write(10);
+ event1->init();
+ shared_ptr<LogEvent> event2 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 20);
+ event2->write(1);
+ event2->write(15);
+ event2->init();
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event1);
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ ValueMetricProducer::Interval curInterval;
+ curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(10, curInterval.value.long_value);
+ EXPECT_EQ(true, curInterval.hasValue);
+ EXPECT_EQ(1, curInterval.sampleSize);
+
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
+
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(25, curInterval.value.long_value);
+ EXPECT_EQ(2, curInterval.sampleSize);
+
+ valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(12.5, valueProducer.mPastBuckets.begin()->second.back().mValueDouble);
+}
+
+TEST(ValueMetricProducerTest, TestPushedAggregateSum) {
+ ValueMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.mutable_value_field()->set_field(tagId);
+ metric.mutable_value_field()->add_child()->set_field(2);
+ metric.set_aggregation_type(ValueMetric::SUM);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, -1, bucketStartTimeNs,
+ bucketStartTimeNs, pullerManager);
+
+ shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+ event1->write(1);
+ event1->write(10);
+ event1->init();
+ shared_ptr<LogEvent> event2 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 20);
+ event2->write(1);
+ event2->write(15);
+ event2->init();
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event1);
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(10, curInterval.value.long_value);
+ EXPECT_EQ(true, curInterval.hasValue);
+
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
+
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(25, curInterval.value.long_value);
+
+ valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(25, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
+}
+
+TEST(ValueMetricProducerTest, TestPushedAggregateSumSliced) {
+ string slicedConditionName = "UID";
+ const int conditionTagId = 2;
+ ValueMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.mutable_value_field()->set_field(tagId);
+ metric.mutable_value_field()->add_child()->set_field(1);
+ metric.set_aggregation_type(ValueMetric::SUM);
+
+ metric.set_condition(StringToId(slicedConditionName));
+ MetricConditionLink* link = metric.add_links();
+ link->set_condition(StringToId(slicedConditionName));
+ buildSimpleAtomFieldMatcher(tagId, 2, link->mutable_fields_in_what());
+ buildSimpleAtomFieldMatcher(conditionTagId, 2, link->mutable_fields_in_condition());
+
+ LogEvent event1(tagId, bucketStartTimeNs + 10);
+ event1.write(10); // value
+ event1.write("111"); // uid
+ event1.init();
+ ConditionKey key1;
+ key1[StringToId(slicedConditionName)] =
+ {getMockedDimensionKey(conditionTagId, 2, "111")};
+
+ LogEvent event2(tagId, bucketStartTimeNs + 20);
+ event2.write(15);
+ event2.write("222");
+ event2.init();
+ ConditionKey key2;
+ key2[StringToId(slicedConditionName)] =
+ {getMockedDimensionKey(conditionTagId, 2, "222")};
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ EXPECT_CALL(*wizard, query(_, key1, _, _, _, _)).WillOnce(Return(ConditionState::kFalse));
+ EXPECT_CALL(*wizard, query(_, key2, _, _, _, _)).WillOnce(Return(ConditionState::kTrue));
+
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ ValueMetricProducer valueProducer(kConfigKey, metric, 1, wizard, -1, bucketStartTimeNs,
+ bucketStartTimeNs, pullerManager);
+
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
+
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ ValueMetricProducer::Interval curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(false, curInterval.hasValue);
+
+ valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
+
+ // has one slice
+ EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+ curInterval = valueProducer.mCurrentSlicedBucket.begin()->second;
+ EXPECT_EQ(15, curInterval.value.long_value);
+
+ valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
+ EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
+ EXPECT_EQ(15, valueProducer.mPastBuckets.begin()->second.back().mValueLong);
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/cmds/statsd/tests/statsd_test_util.cpp b/cmds/statsd/tests/statsd_test_util.cpp
index e0c98cb9735b..b8b1a1db2c12 100644
--- a/cmds/statsd/tests/statsd_test_util.cpp
+++ b/cmds/statsd/tests/statsd_test_util.cpp
@@ -452,14 +452,16 @@ std::unique_ptr<LogEvent> CreateIsolatedUidChangedEvent(
sp<StatsLogProcessor> CreateStatsLogProcessor(const int64_t timeBaseNs, const int64_t currentTimeNs,
const StatsdConfig& config, const ConfigKey& key) {
sp<UidMap> uidMap = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor =
new AlarmMonitor(1, [](const sp<IStatsCompanionService>&, int64_t){},
[](const sp<IStatsCompanionService>&){});
sp<AlarmMonitor> periodicAlarmMonitor =
new AlarmMonitor(1, [](const sp<IStatsCompanionService>&, int64_t){},
[](const sp<IStatsCompanionService>&){});
- sp<StatsLogProcessor> processor = new StatsLogProcessor(
- uidMap, anomalyAlarmMonitor, periodicAlarmMonitor, timeBaseNs, [](const ConfigKey&){return true;});
+ sp<StatsLogProcessor> processor =
+ new StatsLogProcessor(uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
+ timeBaseNs, [](const ConfigKey&) { return true; });
processor->OnConfigUpdated(currentTimeNs, key, config);
return processor;
}
diff --git a/cmds/uiautomator/library/testrunner-src/com/android/uiautomator/core/ShellUiAutomatorBridge.java b/cmds/uiautomator/library/testrunner-src/com/android/uiautomator/core/ShellUiAutomatorBridge.java
index 653851546d01..950a258d123d 100644
--- a/cmds/uiautomator/library/testrunner-src/com/android/uiautomator/core/ShellUiAutomatorBridge.java
+++ b/cmds/uiautomator/library/testrunner-src/com/android/uiautomator/core/ShellUiAutomatorBridge.java
@@ -62,7 +62,7 @@ public class ShellUiAutomatorBridge extends UiAutomatorBridge {
IBinder token = new Binder();
try {
ContentProviderHolder holder = activityManager.getContentProviderExternal(
- providerName, UserHandle.USER_SYSTEM, token);
+ providerName, UserHandle.USER_SYSTEM, token, "*uiautomator*");
if (holder == null) {
throw new IllegalStateException("Could not find provider: " + providerName);
}