1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License 15 */ 16 17 package com.android.server.job; 18 19 import static android.net.NetworkCapabilities.NET_CAPABILITY_TEMPORARILY_NOT_METERED; 20 import static android.net.NetworkCapabilities.TRANSPORT_TEST; 21 22 import static com.android.server.job.JobSchedulerService.sElapsedRealtimeClock; 23 import static com.android.server.job.JobSchedulerService.sSystemClock; 24 25 import android.annotation.NonNull; 26 import android.annotation.Nullable; 27 import android.app.job.JobInfo; 28 import android.app.job.JobWorkItem; 29 import android.content.ComponentName; 30 import android.content.Context; 31 import android.net.NetworkRequest; 32 import android.os.Environment; 33 import android.os.Handler; 34 import android.os.PersistableBundle; 35 import android.os.Process; 36 import android.os.SystemClock; 37 import android.text.TextUtils; 38 import android.text.format.DateUtils; 39 import android.util.ArraySet; 40 import android.util.AtomicFile; 41 import android.util.Pair; 42 import android.util.Slog; 43 import android.util.SparseArray; 44 import android.util.SparseBooleanArray; 45 import android.util.SystemConfigFileCommitEventLogger; 46 import android.util.Xml; 47 48 import com.android.internal.annotations.GuardedBy; 49 import com.android.internal.annotations.VisibleForTesting; 50 import com.android.internal.util.ArrayUtils; 51 import com.android.internal.util.BitUtils; 52 import com.android.modules.expresslog.Histogram; 53 import com.android.modules.utils.TypedXmlPullParser; 54 import com.android.modules.utils.TypedXmlSerializer; 55 import com.android.server.AppSchedulingModuleThread; 56 import com.android.server.IoThread; 57 import com.android.server.job.JobSchedulerInternal.JobStorePersistStats; 58 import com.android.server.job.controllers.JobStatus; 59 60 import org.xmlpull.v1.XmlPullParser; 61 import org.xmlpull.v1.XmlPullParserException; 62 import org.xmlpull.v1.XmlSerializer; 63 64 import java.io.File; 65 import java.io.FileInputStream; 66 import java.io.FileNotFoundException; 67 import java.io.FileOutputStream; 68 import java.io.IOException; 69 import java.io.InputStream; 70 import java.util.ArrayList; 71 import java.util.List; 72 import java.util.Objects; 73 import java.util.Set; 74 import java.util.StringJoiner; 75 import java.util.concurrent.CountDownLatch; 76 import java.util.function.Consumer; 77 import java.util.function.Predicate; 78 79 /** 80 * Maintains the master list of jobs that the job scheduler is tracking. These jobs are compared by 81 * reference, so none of the functions in this class should make a copy. 82 * Also handles read/write of persisted jobs. 83 * 84 * Note on locking: 85 * All callers to this class must <strong>lock on the class object they are calling</strong>. 86 * This is important b/c {@link com.android.server.job.JobStore.WriteJobsMapToDiskRunnable} 87 * and {@link com.android.server.job.JobStore.ReadJobMapFromDiskRunnable} lock on that 88 * object. 89 * 90 * Test: 91 * atest $ANDROID_BUILD_TOP/frameworks/base/services/tests/servicestests/src/com/android/server/job/JobStoreTest.java 92 */ 93 public final class JobStore { 94 private static final String TAG = "JobStore"; 95 private static final boolean DEBUG = JobSchedulerService.DEBUG; 96 97 /** Threshold to adjust how often we want to write to the db. */ 98 private static final long JOB_PERSIST_DELAY = 2000L; 99 private static final long SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS = 30 * 60_000L; 100 @VisibleForTesting 101 static final String JOB_FILE_SPLIT_PREFIX = "jobs_"; 102 private static final int ALL_UIDS = -1; 103 @VisibleForTesting 104 static final int INVALID_UID = -2; 105 106 final Object mLock; 107 final Object mWriteScheduleLock; // used solely for invariants around write scheduling 108 final JobSet mJobSet; // per-caller-uid and per-source-uid tracking 109 final Context mContext; 110 111 // Bookkeeping around incorrect boot-time system clock 112 private final long mXmlTimestamp; 113 private boolean mRtcGood; 114 115 @GuardedBy("mWriteScheduleLock") 116 private boolean mWriteScheduled; 117 118 @GuardedBy("mWriteScheduleLock") 119 private boolean mWriteInProgress; 120 121 @GuardedBy("mWriteScheduleLock") 122 private boolean mSplitFileMigrationNeeded; 123 124 private static final Object sSingletonLock = new Object(); 125 private final SystemConfigFileCommitEventLogger mEventLogger; 126 private final AtomicFile mJobsFile; 127 private final File mJobFileDirectory; 128 private final SparseBooleanArray mPendingJobWriteUids = new SparseBooleanArray(); 129 /** Handler backed by IoThread for writing to disk. */ 130 private final Handler mIoHandler = IoThread.getHandler(); 131 private static JobStore sSingleton; 132 133 private boolean mUseSplitFiles = JobSchedulerService.Constants.DEFAULT_PERSIST_IN_SPLIT_FILES; 134 135 private JobStorePersistStats mPersistInfo = new JobStorePersistStats(); 136 137 /** 138 * Separately updated value of the JobSet size to avoid recalculating it frequently for logging 139 * purposes. Continue to use {@link JobSet#size()} for the up-to-date and accurate value. 140 */ 141 private int mCurrentJobSetSize = 0; 142 private int mScheduledJob30MinHighWaterMark = 0; 143 private static final Histogram sScheduledJob30MinHighWaterMarkLogger = new Histogram( 144 "job_scheduler.value_hist_scheduled_job_30_min_high_water_mark", 145 new Histogram.ScaledRangeOptions(15, 1, 99, 1.5f)); 146 private final Runnable mScheduledJobHighWaterMarkLoggingRunnable = new Runnable() { 147 @Override 148 public void run() { 149 AppSchedulingModuleThread.getHandler().removeCallbacks(this); 150 synchronized (mLock) { 151 sScheduledJob30MinHighWaterMarkLogger.logSample(mScheduledJob30MinHighWaterMark); 152 mScheduledJob30MinHighWaterMark = mJobSet.size(); 153 } 154 // The count doesn't need to be logged at exact times. Logging based on system uptime 155 // should be fine. 156 AppSchedulingModuleThread.getHandler() 157 .postDelayed(this, SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS); 158 } 159 }; 160 161 /** Used by the {@link JobSchedulerService} to instantiate the JobStore. */ get(JobSchedulerService jobManagerService)162 static JobStore get(JobSchedulerService jobManagerService) { 163 synchronized (sSingletonLock) { 164 if (sSingleton == null) { 165 sSingleton = new JobStore(jobManagerService.getContext(), 166 jobManagerService.getLock(), Environment.getDataDirectory()); 167 } 168 return sSingleton; 169 } 170 } 171 172 /** 173 * @return A freshly initialized job store object, with no loaded jobs. 174 */ 175 @VisibleForTesting initAndGetForTesting(Context context, File dataDir)176 public static JobStore initAndGetForTesting(Context context, File dataDir) { 177 JobStore jobStoreUnderTest = new JobStore(context, new Object(), dataDir); 178 jobStoreUnderTest.init(); 179 jobStoreUnderTest.clearForTesting(); 180 return jobStoreUnderTest; 181 } 182 183 /** 184 * Construct the instance of the job store. This results in a blocking read from disk. 185 */ JobStore(Context context, Object lock, File dataDir)186 private JobStore(Context context, Object lock, File dataDir) { 187 mLock = lock; 188 mWriteScheduleLock = new Object(); 189 mContext = context; 190 191 File systemDir = new File(dataDir, "system"); 192 mJobFileDirectory = new File(systemDir, "job"); 193 mJobFileDirectory.mkdirs(); 194 mEventLogger = new SystemConfigFileCommitEventLogger("jobs"); 195 mJobsFile = createJobFile(new File(mJobFileDirectory, "jobs.xml")); 196 197 mJobSet = new JobSet(); 198 199 // If the current RTC is earlier than the timestamp on our persisted jobs file, 200 // we suspect that the RTC is uninitialized and so we cannot draw conclusions 201 // about persisted job scheduling. 202 // 203 // Note that if the persisted jobs file does not exist, we proceed with the 204 // assumption that the RTC is good. This is less work and is safe: if the 205 // clock updates to sanity then we'll be saving the persisted jobs file in that 206 // correct state, which is normal; or we'll wind up writing the jobs file with 207 // an incorrect historical timestamp. That's fine; at worst we'll reboot with 208 // a *correct* timestamp, see a bunch of overdue jobs, and run them; then 209 // settle into normal operation. 210 mXmlTimestamp = mJobsFile.exists() 211 ? mJobsFile.getLastModifiedTime() : mJobFileDirectory.lastModified(); 212 mRtcGood = (sSystemClock.millis() > mXmlTimestamp); 213 214 AppSchedulingModuleThread.getHandler().postDelayed( 215 mScheduledJobHighWaterMarkLoggingRunnable, SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS); 216 } 217 init()218 private void init() { 219 readJobMapFromDisk(mJobSet, mRtcGood); 220 } 221 initAsync(CountDownLatch completionLatch)222 void initAsync(CountDownLatch completionLatch) { 223 mIoHandler.post(new ReadJobMapFromDiskRunnable(mJobSet, mRtcGood, completionLatch)); 224 } 225 createJobFile(String baseName)226 private AtomicFile createJobFile(String baseName) { 227 return createJobFile(new File(mJobFileDirectory, baseName + ".xml")); 228 } 229 createJobFile(File file)230 private AtomicFile createJobFile(File file) { 231 return new AtomicFile(file, mEventLogger); 232 } 233 jobTimesInflatedValid()234 public boolean jobTimesInflatedValid() { 235 return mRtcGood; 236 } 237 clockNowValidToInflate(long now)238 public boolean clockNowValidToInflate(long now) { 239 return now >= mXmlTimestamp; 240 } 241 242 /** 243 * Runs any necessary work asynchronously. If this is called after 244 * {@link #initAsync(CountDownLatch)}, this ensures the given work runs after 245 * the JobStore is initialized. 246 */ runWorkAsync(@onNull Runnable r)247 void runWorkAsync(@NonNull Runnable r) { 248 mIoHandler.post(r); 249 } 250 251 /** 252 * Find all the jobs that were affected by RTC clock uncertainty at boot time. Returns 253 * parallel lists of the existing JobStatus objects and of new, equivalent JobStatus instances 254 * with now-corrected time bounds. 255 */ getRtcCorrectedJobsLocked(final ArrayList<JobStatus> toAdd, final ArrayList<JobStatus> toRemove)256 public void getRtcCorrectedJobsLocked(final ArrayList<JobStatus> toAdd, 257 final ArrayList<JobStatus> toRemove) { 258 final long elapsedNow = sElapsedRealtimeClock.millis(); 259 260 // Find the jobs that need to be fixed up, collecting them for post-iteration 261 // replacement with their new versions 262 forEachJob(job -> { 263 final Pair<Long, Long> utcTimes = job.getPersistedUtcTimes(); 264 if (utcTimes != null) { 265 Pair<Long, Long> elapsedRuntimes = 266 convertRtcBoundsToElapsed(utcTimes, elapsedNow); 267 JobStatus newJob = new JobStatus(job, 268 elapsedRuntimes.first, elapsedRuntimes.second, 269 0, 0, job.getLastSuccessfulRunTime(), job.getLastFailedRunTime(), 270 job.getCumulativeExecutionTimeMs()); 271 newJob.prepareLocked(); 272 toAdd.add(newJob); 273 toRemove.add(job); 274 } 275 }); 276 } 277 278 /** 279 * Add a job to the master list, persisting it if necessary. 280 * Similar jobs to the new job will not be removed. 281 * 282 * @param jobStatus Job to add. 283 */ add(JobStatus jobStatus)284 public void add(JobStatus jobStatus) { 285 if (mJobSet.add(jobStatus)) { 286 mCurrentJobSetSize++; 287 maybeUpdateHighWaterMark(); 288 } 289 if (jobStatus.isPersisted()) { 290 mPendingJobWriteUids.put(jobStatus.getUid(), true); 291 maybeWriteStatusToDiskAsync(); 292 } 293 if (DEBUG) { 294 Slog.d(TAG, "Added job status to store: " + jobStatus); 295 } 296 } 297 298 /** 299 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 300 */ 301 @VisibleForTesting addForTesting(JobStatus jobStatus)302 public void addForTesting(JobStatus jobStatus) { 303 if (mJobSet.add(jobStatus)) { 304 mCurrentJobSetSize++; 305 maybeUpdateHighWaterMark(); 306 } 307 if (jobStatus.isPersisted()) { 308 mPendingJobWriteUids.put(jobStatus.getUid(), true); 309 } 310 } 311 containsJob(JobStatus jobStatus)312 boolean containsJob(JobStatus jobStatus) { 313 return mJobSet.contains(jobStatus); 314 } 315 size()316 public int size() { 317 return mJobSet.size(); 318 } 319 getPersistStats()320 public JobStorePersistStats getPersistStats() { 321 return mPersistInfo; 322 } 323 countJobsForUid(int uid)324 public int countJobsForUid(int uid) { 325 return mJobSet.countJobsForUid(uid); 326 } 327 328 /** 329 * Remove the provided job. Will also delete the job if it was persisted. 330 * @param removeFromPersisted If true, the job will be removed from the persisted job list 331 * immediately (if it was persisted). 332 * @return Whether or not the job existed to be removed. 333 */ remove(JobStatus jobStatus, boolean removeFromPersisted)334 public boolean remove(JobStatus jobStatus, boolean removeFromPersisted) { 335 boolean removed = mJobSet.remove(jobStatus); 336 if (!removed) { 337 if (DEBUG) { 338 Slog.d(TAG, "Couldn't remove job: didn't exist: " + jobStatus); 339 } 340 return false; 341 } 342 mCurrentJobSetSize--; 343 if (removeFromPersisted && jobStatus.isPersisted()) { 344 mPendingJobWriteUids.put(jobStatus.getUid(), true); 345 maybeWriteStatusToDiskAsync(); 346 } 347 return removed; 348 } 349 350 /** 351 * Like {@link #remove(JobStatus, boolean)}, but doesn't schedule a disk write. 352 */ 353 @VisibleForTesting removeForTesting(JobStatus jobStatus)354 public void removeForTesting(JobStatus jobStatus) { 355 if (mJobSet.remove(jobStatus)) { 356 mCurrentJobSetSize--; 357 } 358 if (jobStatus.isPersisted()) { 359 mPendingJobWriteUids.put(jobStatus.getUid(), true); 360 } 361 } 362 363 /** 364 * Remove the jobs of users not specified in the keepUserIds. 365 * @param keepUserIds Array of User IDs whose jobs should be kept and not removed. 366 */ removeJobsOfUnlistedUsers(int[] keepUserIds)367 public void removeJobsOfUnlistedUsers(int[] keepUserIds) { 368 mJobSet.removeJobsOfUnlistedUsers(keepUserIds); 369 mCurrentJobSetSize = mJobSet.size(); 370 } 371 372 /** Note a change in the specified JobStatus that necessitates writing job state to disk. */ touchJob(@onNull JobStatus jobStatus)373 void touchJob(@NonNull JobStatus jobStatus) { 374 if (!jobStatus.isPersisted()) { 375 return; 376 } 377 mPendingJobWriteUids.put(jobStatus.getUid(), true); 378 maybeWriteStatusToDiskAsync(); 379 } 380 381 @VisibleForTesting clear()382 public void clear() { 383 mJobSet.clear(); 384 mPendingJobWriteUids.put(ALL_UIDS, true); 385 mCurrentJobSetSize = 0; 386 maybeWriteStatusToDiskAsync(); 387 } 388 389 /** 390 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 391 */ 392 @VisibleForTesting clearForTesting()393 public void clearForTesting() { 394 mJobSet.clear(); 395 mPendingJobWriteUids.put(ALL_UIDS, true); 396 mCurrentJobSetSize = 0; 397 } 398 setUseSplitFiles(boolean useSplitFiles)399 void setUseSplitFiles(boolean useSplitFiles) { 400 synchronized (mLock) { 401 if (mUseSplitFiles != useSplitFiles) { 402 mUseSplitFiles = useSplitFiles; 403 migrateJobFilesAsync(); 404 } 405 } 406 } 407 408 /** 409 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 410 */ 411 @VisibleForTesting setUseSplitFilesForTesting(boolean useSplitFiles)412 public void setUseSplitFilesForTesting(boolean useSplitFiles) { 413 final boolean changed; 414 synchronized (mLock) { 415 changed = mUseSplitFiles != useSplitFiles; 416 if (changed) { 417 mUseSplitFiles = useSplitFiles; 418 mPendingJobWriteUids.put(ALL_UIDS, true); 419 } 420 } 421 if (changed) { 422 synchronized (mWriteScheduleLock) { 423 mSplitFileMigrationNeeded = true; 424 } 425 } 426 } 427 428 /** 429 * @param sourceUid Uid of the source app. 430 * @return A list of all the jobs scheduled for the source app. Never null. 431 */ 432 @NonNull getJobsBySourceUid(int sourceUid)433 public ArraySet<JobStatus> getJobsBySourceUid(int sourceUid) { 434 return mJobSet.getJobsBySourceUid(sourceUid); 435 } 436 getJobsBySourceUid(int sourceUid, @NonNull Set<JobStatus> insertInto)437 public void getJobsBySourceUid(int sourceUid, @NonNull Set<JobStatus> insertInto) { 438 mJobSet.getJobsBySourceUid(sourceUid, insertInto); 439 } 440 441 /** 442 * @param uid Uid of the requesting app. 443 * @return All JobStatus objects for a given uid from the master list. Never null. 444 */ 445 @NonNull getJobsByUid(int uid)446 public ArraySet<JobStatus> getJobsByUid(int uid) { 447 return mJobSet.getJobsByUid(uid); 448 } 449 getJobsByUid(int uid, @NonNull Set<JobStatus> insertInto)450 public void getJobsByUid(int uid, @NonNull Set<JobStatus> insertInto) { 451 mJobSet.getJobsByUid(uid, insertInto); 452 } 453 454 /** 455 * @param uid Uid of the requesting app. 456 * @param jobId Job id, specified at schedule-time. 457 * @return the JobStatus that matches the provided uId and jobId, or null if none found. 458 */ 459 @Nullable getJobByUidAndJobId(int uid, @Nullable String namespace, int jobId)460 public JobStatus getJobByUidAndJobId(int uid, @Nullable String namespace, int jobId) { 461 return mJobSet.get(uid, namespace, jobId); 462 } 463 464 /** 465 * Iterate over the set of all jobs, invoking the supplied functor on each. This is for 466 * customers who need to examine each job; we'd much rather not have to generate 467 * transient unified collections for them to iterate over and then discard, or creating 468 * iterators every time a client needs to perform a sweep. 469 */ forEachJob(Consumer<JobStatus> functor)470 public void forEachJob(Consumer<JobStatus> functor) { 471 mJobSet.forEachJob(null, functor); 472 } 473 forEachJob(@ullable Predicate<JobStatus> filterPredicate, Consumer<JobStatus> functor)474 public void forEachJob(@Nullable Predicate<JobStatus> filterPredicate, 475 Consumer<JobStatus> functor) { 476 mJobSet.forEachJob(filterPredicate, functor); 477 } 478 forEachJob(int uid, Consumer<JobStatus> functor)479 public void forEachJob(int uid, Consumer<JobStatus> functor) { 480 mJobSet.forEachJob(uid, functor); 481 } 482 forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor)483 public void forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor) { 484 mJobSet.forEachJobForSourceUid(sourceUid, functor); 485 } 486 maybeUpdateHighWaterMark()487 private void maybeUpdateHighWaterMark() { 488 if (mScheduledJob30MinHighWaterMark < mCurrentJobSetSize) { 489 mScheduledJob30MinHighWaterMark = mCurrentJobSetSize; 490 } 491 } 492 493 /** Version of the db schema. */ 494 private static final int JOBS_FILE_VERSION = 1; 495 /** 496 * For legacy reasons, this tag is used to encapsulate the entire job list. 497 */ 498 private static final String XML_TAG_JOB_INFO = "job-info"; 499 /** 500 * For legacy reasons, this tag represents a single {@link JobStatus} object. 501 */ 502 private static final String XML_TAG_JOB = "job"; 503 /** Tag corresponds to constraints this job needs. */ 504 private static final String XML_TAG_PARAMS_CONSTRAINTS = "constraints"; 505 /** Tag corresponds to execution parameters. */ 506 private static final String XML_TAG_PERIODIC = "periodic"; 507 private static final String XML_TAG_ONEOFF = "one-off"; 508 private static final String XML_TAG_EXTRAS = "extras"; 509 private static final String XML_TAG_JOB_WORK_ITEM = "job-work-item"; 510 migrateJobFilesAsync()511 private void migrateJobFilesAsync() { 512 synchronized (mLock) { 513 mPendingJobWriteUids.put(ALL_UIDS, true); 514 } 515 synchronized (mWriteScheduleLock) { 516 mSplitFileMigrationNeeded = true; 517 maybeWriteStatusToDiskAsync(); 518 } 519 } 520 521 /** 522 * Every time the state changes we write all the jobs in one swath, instead of trying to 523 * track incremental changes. 524 */ maybeWriteStatusToDiskAsync()525 private void maybeWriteStatusToDiskAsync() { 526 synchronized (mWriteScheduleLock) { 527 if (!mWriteScheduled) { 528 if (DEBUG) { 529 Slog.v(TAG, "Scheduling persist of jobs to disk."); 530 } 531 mIoHandler.postDelayed(mWriteRunnable, JOB_PERSIST_DELAY); 532 mWriteScheduled = true; 533 } 534 } 535 } 536 537 @VisibleForTesting readJobMapFromDisk(JobSet jobSet, boolean rtcGood)538 public void readJobMapFromDisk(JobSet jobSet, boolean rtcGood) { 539 new ReadJobMapFromDiskRunnable(jobSet, rtcGood).run(); 540 } 541 542 /** Write persisted JobStore state to disk synchronously. Should only be used for testing. */ 543 @VisibleForTesting writeStatusToDiskForTesting()544 public void writeStatusToDiskForTesting() { 545 synchronized (mWriteScheduleLock) { 546 if (mWriteScheduled) { 547 throw new IllegalStateException("An asynchronous write is already scheduled."); 548 } 549 550 mWriteScheduled = true; 551 mWriteRunnable.run(); 552 } 553 } 554 555 /** 556 * Wait for any pending write to the persistent store to clear 557 * @param maxWaitMillis Maximum time from present to wait 558 * @return {@code true} if I/O cleared as expected, {@code false} if the wait 559 * timed out before the pending write completed. 560 */ 561 @VisibleForTesting waitForWriteToCompleteForTesting(long maxWaitMillis)562 public boolean waitForWriteToCompleteForTesting(long maxWaitMillis) { 563 final long start = SystemClock.uptimeMillis(); 564 final long end = start + maxWaitMillis; 565 synchronized (mWriteScheduleLock) { 566 while (mWriteScheduled || mWriteInProgress) { 567 final long now = SystemClock.uptimeMillis(); 568 if (now >= end) { 569 // still not done and we've hit the end; failure 570 return false; 571 } 572 try { 573 mWriteScheduleLock.wait(now - start + maxWaitMillis); 574 } catch (InterruptedException e) { 575 // Spurious; keep waiting 576 break; 577 } 578 } 579 } 580 return true; 581 } 582 583 /** 584 * Returns a single string representation of the contents of the specified intArray. 585 * If the intArray is [1, 2, 4] as the input, the return result will be the string "1,2,4". 586 */ 587 @VisibleForTesting intArrayToString(int[] values)588 static String intArrayToString(int[] values) { 589 final StringJoiner sj = new StringJoiner(","); 590 for (final int value : values) { 591 sj.add(String.valueOf(value)); 592 } 593 return sj.toString(); 594 } 595 596 597 /** 598 * Converts a string containing a comma-separated list of decimal representations 599 * of ints into an array of int. If the string is not correctly formatted, 600 * or if any value doesn't fit into an int, NumberFormatException is thrown. 601 */ 602 @VisibleForTesting stringToIntArray(String str)603 static int[] stringToIntArray(String str) { 604 if (TextUtils.isEmpty(str)) return new int[0]; 605 final String[] arr = str.split(","); 606 final int[] values = new int[arr.length]; 607 for (int i = 0; i < arr.length; i++) { 608 values[i] = Integer.parseInt(arr[i]); 609 } 610 return values; 611 } 612 613 @VisibleForTesting extractUidFromJobFileName(@onNull File file)614 static int extractUidFromJobFileName(@NonNull File file) { 615 final String fileName = file.getName(); 616 if (fileName.startsWith(JOB_FILE_SPLIT_PREFIX)) { 617 try { 618 final int subEnd = fileName.length() - 4; // -4 for ".xml" 619 final int uid = Integer.parseInt( 620 fileName.substring(JOB_FILE_SPLIT_PREFIX.length(), subEnd)); 621 if (uid < 0) { 622 return INVALID_UID; 623 } 624 return uid; 625 } catch (Exception e) { 626 Slog.e(TAG, "Unexpected file name format", e); 627 } 628 } 629 return INVALID_UID; 630 } 631 632 /** 633 * Runnable that writes {@link #mJobSet} out to xml. 634 * NOTE: This Runnable locks on mLock 635 */ 636 private final Runnable mWriteRunnable = new Runnable() { 637 private final SparseArray<AtomicFile> mJobFiles = new SparseArray<>(); 638 private final CopyConsumer mPersistedJobCopier = new CopyConsumer(); 639 640 class CopyConsumer implements Consumer<JobStatus> { 641 private final SparseArray<List<JobStatus>> mJobStoreCopy = new SparseArray<>(); 642 private boolean mCopyAllJobs; 643 644 private void prepare() { 645 mCopyAllJobs = !mUseSplitFiles || mPendingJobWriteUids.get(ALL_UIDS); 646 if (mUseSplitFiles) { 647 // Put the set of changed UIDs in the copy list so that we update each file, 648 // especially if we've dropped all jobs for that UID. 649 if (mPendingJobWriteUids.get(ALL_UIDS)) { 650 // ALL_UIDS is only used when we switch file splitting policy or for tests, 651 // so going through the file list here shouldn't be 652 // a large performance hit on user devices. 653 654 final File[] files; 655 try { 656 files = mJobFileDirectory.listFiles(); 657 } catch (SecurityException e) { 658 Slog.wtf(TAG, "Not allowed to read job file directory", e); 659 return; 660 } 661 if (files == null) { 662 Slog.wtfStack(TAG, "Couldn't get job file list"); 663 } else { 664 for (File file : files) { 665 final int uid = extractUidFromJobFileName(file); 666 if (uid != INVALID_UID) { 667 mJobStoreCopy.put(uid, new ArrayList<>()); 668 } 669 } 670 } 671 } else { 672 for (int i = 0; i < mPendingJobWriteUids.size(); ++i) { 673 mJobStoreCopy.put(mPendingJobWriteUids.keyAt(i), new ArrayList<>()); 674 } 675 } 676 } else { 677 // Single file mode. 678 // Put the catchall UID in the copy list so that we update the single file, 679 // especially if we've dropped all persisted jobs. 680 mJobStoreCopy.put(ALL_UIDS, new ArrayList<>()); 681 } 682 } 683 684 @Override 685 public void accept(JobStatus jobStatus) { 686 final int uid = mUseSplitFiles ? jobStatus.getUid() : ALL_UIDS; 687 if (jobStatus.isPersisted() && (mCopyAllJobs || mPendingJobWriteUids.get(uid))) { 688 List<JobStatus> uidJobList = mJobStoreCopy.get(uid); 689 if (uidJobList == null) { 690 uidJobList = new ArrayList<>(); 691 mJobStoreCopy.put(uid, uidJobList); 692 } 693 uidJobList.add(new JobStatus(jobStatus)); 694 } 695 } 696 697 private void reset() { 698 mJobStoreCopy.clear(); 699 } 700 } 701 702 @Override 703 public void run() { 704 final long startElapsed = sElapsedRealtimeClock.millis(); 705 // Intentionally allow new scheduling of a write operation *before* we clone 706 // the job set. If we reset it to false after cloning, there's a window in 707 // which no new write will be scheduled but mLock is not held, i.e. a new 708 // job might appear and fail to be recognized as needing a persist. The 709 // potential cost is one redundant write of an identical set of jobs in the 710 // rare case of that specific race, but by doing it this way we avoid quite 711 // a bit of lock contention. 712 synchronized (mWriteScheduleLock) { 713 mWriteScheduled = false; 714 if (mWriteInProgress) { 715 // Another runnable is currently writing. Postpone this new write task. 716 maybeWriteStatusToDiskAsync(); 717 return; 718 } 719 mWriteInProgress = true; 720 } 721 final boolean useSplitFiles; 722 synchronized (mLock) { 723 // Clone the jobs so we can release the lock before writing. 724 useSplitFiles = mUseSplitFiles; 725 mPersistedJobCopier.prepare(); 726 mJobSet.forEachJob(null, mPersistedJobCopier); 727 mPendingJobWriteUids.clear(); 728 } 729 mPersistInfo.countAllJobsSaved = 0; 730 mPersistInfo.countSystemServerJobsSaved = 0; 731 mPersistInfo.countSystemSyncManagerJobsSaved = 0; 732 for (int i = mPersistedJobCopier.mJobStoreCopy.size() - 1; i >= 0; --i) { 733 AtomicFile file; 734 if (useSplitFiles) { 735 final int uid = mPersistedJobCopier.mJobStoreCopy.keyAt(i); 736 file = mJobFiles.get(uid); 737 if (file == null) { 738 file = createJobFile(JOB_FILE_SPLIT_PREFIX + uid); 739 mJobFiles.put(uid, file); 740 } 741 } else { 742 file = mJobsFile; 743 } 744 if (DEBUG) { 745 Slog.d(TAG, "Writing for " + mPersistedJobCopier.mJobStoreCopy.keyAt(i) 746 + " to " + file.getBaseFile().getName() + ": " 747 + mPersistedJobCopier.mJobStoreCopy.valueAt(i).size() + " jobs"); 748 } 749 writeJobsMapImpl(file, mPersistedJobCopier.mJobStoreCopy.valueAt(i)); 750 } 751 if (DEBUG) { 752 Slog.v(TAG, "Finished writing, took " + (sElapsedRealtimeClock.millis() 753 - startElapsed) + "ms"); 754 } 755 mPersistedJobCopier.reset(); 756 if (!useSplitFiles) { 757 mJobFiles.clear(); 758 } 759 // Update the last modified time of the directory to aid in RTC time verification 760 // (see the JobStore constructor). 761 mJobFileDirectory.setLastModified(sSystemClock.millis()); 762 synchronized (mWriteScheduleLock) { 763 if (mSplitFileMigrationNeeded) { 764 final File[] files = mJobFileDirectory.listFiles(); 765 for (File file : files) { 766 if (useSplitFiles) { 767 if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 768 // Delete the now unused file so there's no confusion in the future. 769 file.delete(); 770 } 771 } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 772 // Delete the now unused file so there's no confusion in the future. 773 file.delete(); 774 } 775 } 776 } 777 mWriteInProgress = false; 778 mWriteScheduleLock.notifyAll(); 779 } 780 } 781 782 private void writeJobsMapImpl(@NonNull AtomicFile file, @NonNull List<JobStatus> jobList) { 783 int numJobs = 0; 784 int numSystemJobs = 0; 785 int numSyncJobs = 0; 786 mEventLogger.setStartTime(SystemClock.uptimeMillis()); 787 try (FileOutputStream fos = file.startWrite()) { 788 TypedXmlSerializer out = Xml.resolveSerializer(fos); 789 out.startDocument(null, true); 790 out.setFeature("http://xmlpull.org/v1/doc/features.html#indent-output", true); 791 792 out.startTag(null, XML_TAG_JOB_INFO); 793 out.attribute(null, "version", Integer.toString(JOBS_FILE_VERSION)); 794 for (int i=0; i<jobList.size(); i++) { 795 JobStatus jobStatus = jobList.get(i); 796 if (DEBUG) { 797 Slog.d(TAG, "Saving job " + jobStatus.getJobId()); 798 } 799 out.startTag(null, XML_TAG_JOB); 800 addAttributesToJobTag(out, jobStatus); 801 writeConstraintsToXml(out, jobStatus); 802 writeExecutionCriteriaToXml(out, jobStatus); 803 writeBundleToXml(jobStatus.getJob().getExtras(), out); 804 writeJobWorkItemsToXml(out, jobStatus); 805 out.endTag(null, XML_TAG_JOB); 806 807 numJobs++; 808 if (jobStatus.getUid() == Process.SYSTEM_UID) { 809 numSystemJobs++; 810 if (isSyncJob(jobStatus)) { 811 numSyncJobs++; 812 } 813 } 814 } 815 out.endTag(null, XML_TAG_JOB_INFO); 816 out.endDocument(); 817 818 file.finishWrite(fos); 819 } catch (IOException e) { 820 if (DEBUG) { 821 Slog.v(TAG, "Error writing out job data.", e); 822 } 823 } catch (XmlPullParserException e) { 824 if (DEBUG) { 825 Slog.d(TAG, "Error persisting bundle.", e); 826 } 827 } finally { 828 mPersistInfo.countAllJobsSaved += numJobs; 829 mPersistInfo.countSystemServerJobsSaved += numSystemJobs; 830 mPersistInfo.countSystemSyncManagerJobsSaved += numSyncJobs; 831 } 832 } 833 834 /** 835 * Write out a tag with data comprising the required fields and bias of this job and 836 * its client. 837 */ 838 private void addAttributesToJobTag(TypedXmlSerializer out, JobStatus jobStatus) 839 throws IOException { 840 out.attribute(null, "jobid", Integer.toString(jobStatus.getJobId())); 841 out.attribute(null, "package", jobStatus.getServiceComponent().getPackageName()); 842 out.attribute(null, "class", jobStatus.getServiceComponent().getClassName()); 843 if (jobStatus.getSourcePackageName() != null) { 844 out.attribute(null, "sourcePackageName", jobStatus.getSourcePackageName()); 845 } 846 if (jobStatus.getNamespace() != null) { 847 out.attribute(null, "namespace", jobStatus.getNamespace()); 848 } 849 if (jobStatus.getSourceTag() != null) { 850 out.attribute(null, "sourceTag", jobStatus.getSourceTag()); 851 } 852 out.attribute(null, "sourceUserId", String.valueOf(jobStatus.getSourceUserId())); 853 out.attribute(null, "uid", Integer.toString(jobStatus.getUid())); 854 out.attribute(null, "bias", String.valueOf(jobStatus.getBias())); 855 out.attribute(null, "priority", String.valueOf(jobStatus.getJob().getPriority())); 856 out.attribute(null, "flags", String.valueOf(jobStatus.getFlags())); 857 if (jobStatus.getInternalFlags() != 0) { 858 out.attribute(null, "internalFlags", String.valueOf(jobStatus.getInternalFlags())); 859 } 860 861 out.attribute(null, "lastSuccessfulRunTime", 862 String.valueOf(jobStatus.getLastSuccessfulRunTime())); 863 out.attribute(null, "lastFailedRunTime", 864 String.valueOf(jobStatus.getLastFailedRunTime())); 865 866 out.attributeLong(null, "cumulativeExecutionTime", 867 jobStatus.getCumulativeExecutionTimeMs()); 868 } 869 870 private void writeBundleToXml(PersistableBundle extras, XmlSerializer out) 871 throws IOException, XmlPullParserException { 872 out.startTag(null, XML_TAG_EXTRAS); 873 PersistableBundle extrasCopy = deepCopyBundle(extras, 10); 874 extrasCopy.saveToXml(out); 875 out.endTag(null, XML_TAG_EXTRAS); 876 } 877 878 private PersistableBundle deepCopyBundle(PersistableBundle bundle, int maxDepth) { 879 if (maxDepth <= 0) { 880 return null; 881 } 882 PersistableBundle copy = (PersistableBundle) bundle.clone(); 883 Set<String> keySet = bundle.keySet(); 884 for (String key: keySet) { 885 Object o = copy.get(key); 886 if (o instanceof PersistableBundle) { 887 PersistableBundle bCopy = deepCopyBundle((PersistableBundle) o, maxDepth-1); 888 copy.putPersistableBundle(key, bCopy); 889 } 890 } 891 return copy; 892 } 893 894 /** 895 * Write out a tag with data identifying this job's constraints. If the constraint isn't here 896 * it doesn't apply. 897 * TODO: b/183455312 Update this code to use proper serialization for NetworkRequest, 898 * because currently store is not including everything (like, UIDs, bandwidth, 899 * signal strength etc. are lost). 900 */ 901 private void writeConstraintsToXml(TypedXmlSerializer out, JobStatus jobStatus) 902 throws IOException { 903 out.startTag(null, XML_TAG_PARAMS_CONSTRAINTS); 904 final JobInfo job = jobStatus.getJob(); 905 if (jobStatus.hasConnectivityConstraint()) { 906 final NetworkRequest network = jobStatus.getJob().getRequiredNetwork(); 907 out.attribute(null, "net-capabilities-csv", intArrayToString( 908 network.getCapabilities())); 909 out.attribute(null, "net-forbidden-capabilities-csv", intArrayToString( 910 network.getForbiddenCapabilities())); 911 out.attribute(null, "net-transport-types-csv", intArrayToString( 912 network.getTransportTypes())); 913 if (job.getEstimatedNetworkDownloadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 914 out.attributeLong(null, "estimated-download-bytes", 915 job.getEstimatedNetworkDownloadBytes()); 916 } 917 if (job.getEstimatedNetworkUploadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 918 out.attributeLong(null, "estimated-upload-bytes", 919 job.getEstimatedNetworkUploadBytes()); 920 } 921 if (job.getMinimumNetworkChunkBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 922 out.attributeLong(null, "minimum-network-chunk-bytes", 923 job.getMinimumNetworkChunkBytes()); 924 } 925 } 926 if (job.isRequireDeviceIdle()) { 927 out.attribute(null, "idle", Boolean.toString(true)); 928 } 929 if (job.isRequireCharging()) { 930 out.attribute(null, "charging", Boolean.toString(true)); 931 } 932 if (job.isRequireBatteryNotLow()) { 933 out.attribute(null, "battery-not-low", Boolean.toString(true)); 934 } 935 if (job.isRequireStorageNotLow()) { 936 out.attribute(null, "storage-not-low", Boolean.toString(true)); 937 } 938 if (job.isPreferBatteryNotLow()) { 939 out.attributeBoolean(null, "prefer-battery-not-low", true); 940 } 941 if (job.isPreferCharging()) { 942 out.attributeBoolean(null, "prefer-charging", true); 943 } 944 if (job.isPreferDeviceIdle()) { 945 out.attributeBoolean(null, "prefer-idle", true); 946 } 947 out.endTag(null, XML_TAG_PARAMS_CONSTRAINTS); 948 } 949 950 private void writeExecutionCriteriaToXml(XmlSerializer out, JobStatus jobStatus) 951 throws IOException { 952 final JobInfo job = jobStatus.getJob(); 953 if (jobStatus.getJob().isPeriodic()) { 954 out.startTag(null, XML_TAG_PERIODIC); 955 out.attribute(null, "period", Long.toString(job.getIntervalMillis())); 956 out.attribute(null, "flex", Long.toString(job.getFlexMillis())); 957 } else { 958 out.startTag(null, XML_TAG_ONEOFF); 959 } 960 961 // If we still have the persisted times, we need to record those directly because 962 // we haven't yet been able to calculate the usual elapsed-timebase bounds 963 // correctly due to wall-clock uncertainty. 964 Pair <Long, Long> utcJobTimes = jobStatus.getPersistedUtcTimes(); 965 if (DEBUG && utcJobTimes != null) { 966 Slog.i(TAG, "storing original UTC timestamps for " + jobStatus); 967 } 968 969 final long nowRTC = sSystemClock.millis(); 970 final long nowElapsed = sElapsedRealtimeClock.millis(); 971 if (jobStatus.hasDeadlineConstraint()) { 972 // Wall clock deadline. 973 final long deadlineWallclock = (utcJobTimes == null) 974 ? nowRTC + (jobStatus.getLatestRunTimeElapsed() - nowElapsed) 975 : utcJobTimes.second; 976 out.attribute(null, "deadline", Long.toString(deadlineWallclock)); 977 } 978 if (jobStatus.hasTimingDelayConstraint()) { 979 final long delayWallclock = (utcJobTimes == null) 980 ? nowRTC + (jobStatus.getEarliestRunTime() - nowElapsed) 981 : utcJobTimes.first; 982 out.attribute(null, "delay", Long.toString(delayWallclock)); 983 } 984 985 // Only write out back-off policy if it differs from the default. 986 // This also helps the case where the job is idle -> these aren't allowed to specify 987 // back-off. 988 if (jobStatus.getJob().getInitialBackoffMillis() != JobInfo.DEFAULT_INITIAL_BACKOFF_MILLIS 989 || jobStatus.getJob().getBackoffPolicy() != JobInfo.DEFAULT_BACKOFF_POLICY) { 990 out.attribute(null, "backoff-policy", Integer.toString(job.getBackoffPolicy())); 991 out.attribute(null, "initial-backoff", Long.toString(job.getInitialBackoffMillis())); 992 } 993 if (job.isPeriodic()) { 994 out.endTag(null, XML_TAG_PERIODIC); 995 } else { 996 out.endTag(null, XML_TAG_ONEOFF); 997 } 998 } 999 1000 private void writeJobWorkItemsToXml(@NonNull TypedXmlSerializer out, 1001 @NonNull JobStatus jobStatus) throws IOException, XmlPullParserException { 1002 // Write executing first since they're technically at the front of the queue. 1003 writeJobWorkItemListToXml(out, jobStatus.executingWork); 1004 writeJobWorkItemListToXml(out, jobStatus.pendingWork); 1005 } 1006 1007 private void writeJobWorkItemListToXml(@NonNull TypedXmlSerializer out, 1008 @Nullable List<JobWorkItem> jobWorkItems) 1009 throws IOException, XmlPullParserException { 1010 if (jobWorkItems == null) { 1011 return; 1012 } 1013 // Write the items in list order to maintain the enqueue order. 1014 final int size = jobWorkItems.size(); 1015 for (int i = 0; i < size; ++i) { 1016 final JobWorkItem item = jobWorkItems.get(i); 1017 if (item.getGrants() != null) { 1018 // We currently don't allow persisting jobs when grants are involved. 1019 // TODO(256618122): allow persisting JobWorkItems with grant flags 1020 continue; 1021 } 1022 if (item.getIntent() != null) { 1023 // Intent.saveToXml() doesn't persist everything, so we shouldn't attempt to 1024 // persist these JobWorkItems at all. 1025 Slog.wtf(TAG, "Encountered JobWorkItem with Intent in persisting list"); 1026 continue; 1027 } 1028 out.startTag(null, XML_TAG_JOB_WORK_ITEM); 1029 out.attributeInt(null, "delivery-count", item.getDeliveryCount()); 1030 if (item.getEstimatedNetworkDownloadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1031 out.attributeLong(null, "estimated-download-bytes", 1032 item.getEstimatedNetworkDownloadBytes()); 1033 } 1034 if (item.getEstimatedNetworkUploadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1035 out.attributeLong(null, "estimated-upload-bytes", 1036 item.getEstimatedNetworkUploadBytes()); 1037 } 1038 if (item.getMinimumNetworkChunkBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1039 out.attributeLong(null, "minimum-network-chunk-bytes", 1040 item.getMinimumNetworkChunkBytes()); 1041 } 1042 writeBundleToXml(item.getExtras(), out); 1043 out.endTag(null, XML_TAG_JOB_WORK_ITEM); 1044 } 1045 } 1046 }; 1047 1048 /** 1049 * Translate the supplied RTC times to the elapsed timebase, with clamping appropriate 1050 * to interpreting them as a job's delay + deadline times for alarm-setting purposes. 1051 * @param rtcTimes a Pair<Long, Long> in which {@code first} is the "delay" earliest 1052 * allowable runtime for the job, and {@code second} is the "deadline" time at which 1053 * the job becomes overdue. 1054 */ convertRtcBoundsToElapsed(Pair<Long, Long> rtcTimes, long nowElapsed)1055 private static Pair<Long, Long> convertRtcBoundsToElapsed(Pair<Long, Long> rtcTimes, 1056 long nowElapsed) { 1057 final long nowWallclock = sSystemClock.millis(); 1058 final long earliest = (rtcTimes.first > JobStatus.NO_EARLIEST_RUNTIME) 1059 ? nowElapsed + Math.max(rtcTimes.first - nowWallclock, 0) 1060 : JobStatus.NO_EARLIEST_RUNTIME; 1061 final long latest = (rtcTimes.second < JobStatus.NO_LATEST_RUNTIME) 1062 ? nowElapsed + Math.max(rtcTimes.second - nowWallclock, 0) 1063 : JobStatus.NO_LATEST_RUNTIME; 1064 return Pair.create(earliest, latest); 1065 } 1066 isSyncJob(JobStatus status)1067 private static boolean isSyncJob(JobStatus status) { 1068 return com.android.server.content.SyncJobService.class.getName() 1069 .equals(status.getServiceComponent().getClassName()); 1070 } 1071 1072 /** 1073 * Runnable that reads list of persisted job from xml. This is run once at start up, so doesn't 1074 * need to go through {@link JobStore#add(com.android.server.job.controllers.JobStatus)}. 1075 */ 1076 private final class ReadJobMapFromDiskRunnable implements Runnable { 1077 private final JobSet jobSet; 1078 private final boolean rtcGood; 1079 private final CountDownLatch mCompletionLatch; 1080 1081 /** 1082 * @param jobSet Reference to the (empty) set of JobStatus objects that back the JobStore, 1083 * so that after disk read we can populate it directly. 1084 */ ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood)1085 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood) { 1086 this(jobSet, rtcIsGood, null); 1087 } 1088 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood, @Nullable CountDownLatch completionLatch)1089 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood, 1090 @Nullable CountDownLatch completionLatch) { 1091 this.jobSet = jobSet; 1092 this.rtcGood = rtcIsGood; 1093 this.mCompletionLatch = completionLatch; 1094 } 1095 1096 @Override run()1097 public void run() { 1098 if (!mJobFileDirectory.isDirectory()) { 1099 Slog.wtf(TAG, "jobs directory isn't a directory O.O"); 1100 mJobFileDirectory.mkdirs(); 1101 return; 1102 } 1103 1104 int numJobs = 0; 1105 int numSystemJobs = 0; 1106 int numSyncJobs = 0; 1107 List<JobStatus> jobs; 1108 final File[] files; 1109 try { 1110 files = mJobFileDirectory.listFiles(); 1111 } catch (SecurityException e) { 1112 Slog.wtf(TAG, "Not allowed to read job file directory", e); 1113 return; 1114 } 1115 if (files == null) { 1116 Slog.wtfStack(TAG, "Couldn't get job file list"); 1117 return; 1118 } 1119 boolean needFileMigration = false; 1120 long nowElapsed = sElapsedRealtimeClock.millis(); 1121 int numDuplicates = 0; 1122 synchronized (mLock) { 1123 for (File file : files) { 1124 final AtomicFile aFile = createJobFile(file); 1125 try (FileInputStream fis = aFile.openRead()) { 1126 jobs = readJobMapImpl(fis, rtcGood, nowElapsed); 1127 if (jobs != null) { 1128 for (int i = 0; i < jobs.size(); i++) { 1129 JobStatus js = jobs.get(i); 1130 final JobStatus existingJob = this.jobSet.get( 1131 js.getUid(), js.getNamespace(), js.getJobId()); 1132 if (existingJob != null) { 1133 numDuplicates++; 1134 // Jobs are meant to have unique uid-namespace-jobId 1135 // combinations, but we've somehow read multiple jobs with the 1136 // combination. Drop the latter one since keeping both will 1137 // result in other issues. 1138 continue; 1139 } 1140 js.prepareLocked(); 1141 js.enqueueTime = nowElapsed; 1142 this.jobSet.add(js); 1143 1144 numJobs++; 1145 if (js.getUid() == Process.SYSTEM_UID) { 1146 numSystemJobs++; 1147 if (isSyncJob(js)) { 1148 numSyncJobs++; 1149 } 1150 } 1151 } 1152 } 1153 } catch (FileNotFoundException e) { 1154 // mJobFileDirectory.listFiles() gave us this file...why can't we find it??? 1155 Slog.e(TAG, "Could not find jobs file: " + file.getName()); 1156 } catch (XmlPullParserException | IOException e) { 1157 Slog.wtf(TAG, "Error in " + file.getName(), e); 1158 } catch (Exception e) { 1159 // Crashing at this point would result in a boot loop, so live with a 1160 // generic Exception for system stability's sake. 1161 Slog.wtf(TAG, "Unexpected exception", e); 1162 } 1163 if (mUseSplitFiles) { 1164 if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 1165 // We're supposed to be using the split file architecture, 1166 // but we still have 1167 // the old job file around. Fully migrate and remove the old file. 1168 needFileMigration = true; 1169 } 1170 } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 1171 // We're supposed to be using the legacy single file architecture, 1172 // but we still have some job split files around. Fully migrate 1173 // and remove the split files. 1174 needFileMigration = true; 1175 } 1176 } 1177 if (mPersistInfo.countAllJobsLoaded < 0) { // Only set them once. 1178 mPersistInfo.countAllJobsLoaded = numJobs; 1179 mPersistInfo.countSystemServerJobsLoaded = numSystemJobs; 1180 mPersistInfo.countSystemSyncManagerJobsLoaded = numSyncJobs; 1181 } 1182 } 1183 Slog.i(TAG, "Read " + numJobs + " jobs"); 1184 if (needFileMigration) { 1185 migrateJobFilesAsync(); 1186 } 1187 1188 if (numDuplicates > 0) { 1189 Slog.wtf(TAG, "Encountered " + numDuplicates + " duplicate persisted jobs"); 1190 } 1191 1192 // Log the count immediately after loading from boot. 1193 mCurrentJobSetSize = numJobs; 1194 mScheduledJob30MinHighWaterMark = mCurrentJobSetSize; 1195 mScheduledJobHighWaterMarkLoggingRunnable.run(); 1196 1197 if (mCompletionLatch != null) { 1198 mCompletionLatch.countDown(); 1199 } 1200 } 1201 readJobMapImpl(InputStream fis, boolean rtcIsGood, long nowElapsed)1202 private List<JobStatus> readJobMapImpl(InputStream fis, boolean rtcIsGood, long nowElapsed) 1203 throws XmlPullParserException, IOException { 1204 TypedXmlPullParser parser = Xml.resolvePullParser(fis); 1205 1206 int eventType = parser.getEventType(); 1207 while (eventType != XmlPullParser.START_TAG && 1208 eventType != XmlPullParser.END_DOCUMENT) { 1209 eventType = parser.next(); 1210 Slog.d(TAG, "Start tag: " + parser.getName()); 1211 } 1212 if (eventType == XmlPullParser.END_DOCUMENT) { 1213 if (DEBUG) { 1214 Slog.d(TAG, "No persisted jobs."); 1215 } 1216 return null; 1217 } 1218 1219 String tagName = parser.getName(); 1220 if (XML_TAG_JOB_INFO.equals(tagName)) { 1221 final List<JobStatus> jobs = new ArrayList<JobStatus>(); 1222 final int version = parser.getAttributeInt(null, "version"); 1223 // Read in version info. 1224 if (version > JOBS_FILE_VERSION || version < 0) { 1225 Slog.d(TAG, "Invalid version number, aborting jobs file read."); 1226 return null; 1227 } 1228 1229 eventType = parser.next(); 1230 do { 1231 // Read each <job/> 1232 if (eventType == XmlPullParser.START_TAG) { 1233 tagName = parser.getName(); 1234 // Start reading job. 1235 if (XML_TAG_JOB.equals(tagName)) { 1236 JobStatus persistedJob = 1237 restoreJobFromXml(rtcIsGood, parser, version, nowElapsed); 1238 if (persistedJob != null) { 1239 if (DEBUG) { 1240 Slog.d(TAG, "Read out " + persistedJob); 1241 } 1242 jobs.add(persistedJob); 1243 } else { 1244 Slog.d(TAG, "Error reading job from file."); 1245 } 1246 } 1247 } 1248 eventType = parser.next(); 1249 } while (eventType != XmlPullParser.END_DOCUMENT); 1250 return jobs; 1251 } 1252 return null; 1253 } 1254 1255 /** 1256 * @param parser Xml parser at the beginning of a "<job/>" tag. The next "parser.next()" call 1257 * will take the parser into the body of the job tag. 1258 * @return Newly instantiated job holding all the information we just read out of the xml tag. 1259 */ restoreJobFromXml(boolean rtcIsGood, TypedXmlPullParser parser, int schemaVersion, long nowElapsed)1260 private JobStatus restoreJobFromXml(boolean rtcIsGood, TypedXmlPullParser parser, 1261 int schemaVersion, long nowElapsed) throws XmlPullParserException, IOException { 1262 JobInfo.Builder jobBuilder; 1263 int uid, sourceUserId; 1264 long lastSuccessfulRunTime; 1265 long lastFailedRunTime; 1266 long cumulativeExecutionTime; 1267 int internalFlags = 0; 1268 1269 // Read out job identifier attributes and bias. 1270 try { 1271 jobBuilder = buildBuilderFromXml(parser); 1272 jobBuilder.setPersisted(true); 1273 uid = Integer.parseInt(parser.getAttributeValue(null, "uid")); 1274 1275 String val; 1276 if (schemaVersion == 0) { 1277 val = parser.getAttributeValue(null, "priority"); 1278 if (val != null) { 1279 jobBuilder.setBias(Integer.parseInt(val)); 1280 } 1281 } else if (schemaVersion >= 1) { 1282 val = parser.getAttributeValue(null, "bias"); 1283 if (val != null) { 1284 jobBuilder.setBias(Integer.parseInt(val)); 1285 } 1286 val = parser.getAttributeValue(null, "priority"); 1287 if (val != null) { 1288 jobBuilder.setPriority(Integer.parseInt(val)); 1289 } 1290 } 1291 val = parser.getAttributeValue(null, "flags"); 1292 if (val != null) { 1293 jobBuilder.setFlags(Integer.parseInt(val)); 1294 } 1295 val = parser.getAttributeValue(null, "internalFlags"); 1296 if (val != null) { 1297 internalFlags = Integer.parseInt(val); 1298 } 1299 val = parser.getAttributeValue(null, "sourceUserId"); 1300 sourceUserId = val == null ? -1 : Integer.parseInt(val); 1301 1302 val = parser.getAttributeValue(null, "lastSuccessfulRunTime"); 1303 lastSuccessfulRunTime = val == null ? 0 : Long.parseLong(val); 1304 1305 val = parser.getAttributeValue(null, "lastFailedRunTime"); 1306 lastFailedRunTime = val == null ? 0 : Long.parseLong(val); 1307 1308 cumulativeExecutionTime = 1309 parser.getAttributeLong(null, "cumulativeExecutionTime", 0); 1310 } catch (NumberFormatException e) { 1311 Slog.e(TAG, "Error parsing job's required fields, skipping"); 1312 return null; 1313 } 1314 1315 String sourcePackageName = parser.getAttributeValue(null, "sourcePackageName"); 1316 final String namespace = parser.getAttributeValue(null, "namespace"); 1317 final String sourceTag = parser.getAttributeValue(null, "sourceTag"); 1318 1319 int eventType; 1320 // Read out constraints tag. 1321 do { 1322 eventType = parser.next(); 1323 } while (eventType == XmlPullParser.TEXT); // Push through to next START_TAG. 1324 1325 if (!(eventType == XmlPullParser.START_TAG && 1326 XML_TAG_PARAMS_CONSTRAINTS.equals(parser.getName()))) { 1327 // Expecting a <constraints> start tag. 1328 return null; 1329 } 1330 try { 1331 buildConstraintsFromXml(jobBuilder, parser); 1332 } catch (NumberFormatException e) { 1333 Slog.d(TAG, "Error reading constraints, skipping."); 1334 return null; 1335 } catch (XmlPullParserException e) { 1336 Slog.d(TAG, "Error Parser Exception.", e); 1337 return null; 1338 } catch (IOException e) { 1339 Slog.d(TAG, "Error I/O Exception.", e); 1340 return null; 1341 } catch (IllegalArgumentException e) { 1342 Slog.e(TAG, "Constraints contained invalid data", e); 1343 return null; 1344 } 1345 1346 parser.next(); // Consume </constraints> 1347 1348 // Read out execution parameters tag. 1349 do { 1350 eventType = parser.next(); 1351 } while (eventType == XmlPullParser.TEXT); 1352 if (eventType != XmlPullParser.START_TAG) { 1353 return null; 1354 } 1355 1356 // Tuple of (earliest runtime, latest runtime) in UTC. 1357 final Pair<Long, Long> rtcRuntimes = buildRtcExecutionTimesFromXml(parser); 1358 1359 Pair<Long, Long> elapsedRuntimes = convertRtcBoundsToElapsed(rtcRuntimes, nowElapsed); 1360 1361 if (XML_TAG_PERIODIC.equals(parser.getName())) { 1362 try { 1363 String val = parser.getAttributeValue(null, "period"); 1364 final long periodMillis = Long.parseLong(val); 1365 val = parser.getAttributeValue(null, "flex"); 1366 final long flexMillis = (val != null) ? Long.valueOf(val) : periodMillis; 1367 jobBuilder.setPeriodic(periodMillis, flexMillis); 1368 // As a sanity check, cap the recreated run time to be no later than flex+period 1369 // from now. This is the latest the periodic could be pushed out. This could 1370 // happen if the periodic ran early (at flex time before period), and then the 1371 // device rebooted. 1372 if (elapsedRuntimes.second > nowElapsed + periodMillis + flexMillis) { 1373 final long clampedLateRuntimeElapsed = nowElapsed + flexMillis 1374 + periodMillis; 1375 final long clampedEarlyRuntimeElapsed = clampedLateRuntimeElapsed 1376 - flexMillis; 1377 Slog.w(TAG, 1378 String.format("Periodic job for uid='%d' persisted run-time is" + 1379 " too big [%s, %s]. Clamping to [%s,%s]", 1380 uid, 1381 DateUtils.formatElapsedTime(elapsedRuntimes.first / 1000), 1382 DateUtils.formatElapsedTime(elapsedRuntimes.second / 1000), 1383 DateUtils.formatElapsedTime( 1384 clampedEarlyRuntimeElapsed / 1000), 1385 DateUtils.formatElapsedTime( 1386 clampedLateRuntimeElapsed / 1000)) 1387 ); 1388 elapsedRuntimes = 1389 Pair.create(clampedEarlyRuntimeElapsed, clampedLateRuntimeElapsed); 1390 } 1391 } catch (NumberFormatException e) { 1392 Slog.d(TAG, "Error reading periodic execution criteria, skipping."); 1393 return null; 1394 } 1395 } else if (XML_TAG_ONEOFF.equals(parser.getName())) { 1396 try { 1397 if (elapsedRuntimes.first != JobStatus.NO_EARLIEST_RUNTIME) { 1398 jobBuilder.setMinimumLatency(elapsedRuntimes.first - nowElapsed); 1399 } 1400 if (elapsedRuntimes.second != JobStatus.NO_LATEST_RUNTIME) { 1401 jobBuilder.setOverrideDeadline( 1402 elapsedRuntimes.second - nowElapsed); 1403 } 1404 } catch (NumberFormatException e) { 1405 Slog.d(TAG, "Error reading job execution criteria, skipping."); 1406 return null; 1407 } 1408 } else { 1409 if (DEBUG) { 1410 Slog.d(TAG, "Invalid parameter tag, skipping - " + parser.getName()); 1411 } 1412 // Expecting a parameters start tag. 1413 return null; 1414 } 1415 maybeBuildBackoffPolicyFromXml(jobBuilder, parser); 1416 1417 parser.nextTag(); // Consume parameters end tag. 1418 1419 // Read out extras Bundle. 1420 do { 1421 eventType = parser.next(); 1422 } while (eventType == XmlPullParser.TEXT); 1423 if (!(eventType == XmlPullParser.START_TAG 1424 && XML_TAG_EXTRAS.equals(parser.getName()))) { 1425 if (DEBUG) { 1426 Slog.d(TAG, "Error reading extras, skipping."); 1427 } 1428 return null; 1429 } 1430 1431 final PersistableBundle extras; 1432 try { 1433 extras = PersistableBundle.restoreFromXml(parser); 1434 jobBuilder.setExtras(extras); 1435 } catch (IllegalArgumentException e) { 1436 Slog.e(TAG, "Persisted extras contained invalid data", e); 1437 return null; 1438 } 1439 eventType = parser.nextTag(); // Consume </extras> 1440 1441 List<JobWorkItem> jobWorkItems = null; 1442 if (eventType == XmlPullParser.START_TAG 1443 && XML_TAG_JOB_WORK_ITEM.equals(parser.getName())) { 1444 jobWorkItems = readJobWorkItemsFromXml(parser); 1445 } 1446 1447 final JobInfo builtJob; 1448 try { 1449 // Don't perform prefetch-deadline check here. Apps targeting S- shouldn't have 1450 // any prefetch-with-deadline jobs accidentally dropped. It's not worth doing 1451 // target SDK version checks here for apps targeting T+. There's no way for an 1452 // app to keep a perpetually scheduled prefetch job with a deadline. Prefetch jobs 1453 // with a deadline would run and then any newly scheduled prefetch jobs wouldn't 1454 // have a deadline. If a job is rescheduled (via jobFinished(true) or onStopJob()'s 1455 // return value), the deadline is dropped. Periodic jobs require all constraints 1456 // to be met, so there's no issue with their deadlines. 1457 // The same logic applies for other target SDK-based validation checks. 1458 builtJob = jobBuilder.build(false, false); 1459 } catch (Exception e) { 1460 Slog.w(TAG, "Unable to build job from XML, ignoring: " + jobBuilder.summarize(), e); 1461 return null; 1462 } 1463 1464 // Migrate sync jobs forward from earlier, incomplete representation 1465 if ("android".equals(sourcePackageName) 1466 && extras != null 1467 && extras.getBoolean("SyncManagerJob", false)) { 1468 sourcePackageName = extras.getString("owningPackage", sourcePackageName); 1469 if (DEBUG) { 1470 Slog.i(TAG, "Fixing up sync job source package name from 'android' to '" 1471 + sourcePackageName + "'"); 1472 } 1473 } 1474 1475 // And now we're done 1476 final int appBucket = JobSchedulerService.standbyBucketForPackage(sourcePackageName, 1477 sourceUserId, nowElapsed); 1478 JobStatus js = new JobStatus( 1479 builtJob, uid, sourcePackageName, sourceUserId, 1480 appBucket, namespace, sourceTag, 1481 elapsedRuntimes.first, elapsedRuntimes.second, 1482 lastSuccessfulRunTime, lastFailedRunTime, cumulativeExecutionTime, 1483 (rtcIsGood) ? null : rtcRuntimes, internalFlags, /* dynamicConstraints */ 0); 1484 if (jobWorkItems != null) { 1485 for (int i = 0; i < jobWorkItems.size(); ++i) { 1486 js.enqueueWorkLocked(jobWorkItems.get(i)); 1487 } 1488 } 1489 return js; 1490 } 1491 buildBuilderFromXml(TypedXmlPullParser parser)1492 private JobInfo.Builder buildBuilderFromXml(TypedXmlPullParser parser) 1493 throws XmlPullParserException { 1494 // Pull out required fields from <job> attributes. 1495 int jobId = parser.getAttributeInt(null, "jobid"); 1496 String packageName = parser.getAttributeValue(null, "package"); 1497 String className = parser.getAttributeValue(null, "class"); 1498 ComponentName cname = new ComponentName(packageName, className); 1499 1500 return new JobInfo.Builder(jobId, cname); 1501 } 1502 1503 /** 1504 * In S, there has been a change in format to make the code more robust and more 1505 * maintainable. 1506 * If the capabities are bits 4, 14, 15, the format in R, it is a long string as 1507 * netCapabilitiesLong = '49168' from the old XML file attribute "net-capabilities". 1508 * The format in S is the int array string as netCapabilitiesIntArray = '4,14,15' 1509 * from the new XML file attribute "net-capabilities-array". 1510 * For backward compatibility, when reading old XML the old format is still supported in 1511 * reading, but in order to avoid issues with OEM-defined flags, the accepted capabilities 1512 * are limited to that(maxNetCapabilityInR & maxTransportInR) defined in R. 1513 */ buildConstraintsFromXml(JobInfo.Builder jobBuilder, TypedXmlPullParser parser)1514 private void buildConstraintsFromXml(JobInfo.Builder jobBuilder, TypedXmlPullParser parser) 1515 throws XmlPullParserException, IOException { 1516 String val; 1517 String netCapabilitiesLong = null; 1518 String netForbiddenCapabilitiesLong = null; 1519 String netTransportTypesLong = null; 1520 1521 final String netCapabilitiesIntArray = parser.getAttributeValue( 1522 null, "net-capabilities-csv"); 1523 final String netForbiddenCapabilitiesIntArray = parser.getAttributeValue( 1524 null, "net-forbidden-capabilities-csv"); 1525 final String netTransportTypesIntArray = parser.getAttributeValue( 1526 null, "net-transport-types-csv"); 1527 if (netCapabilitiesIntArray == null || netTransportTypesIntArray == null) { 1528 netCapabilitiesLong = parser.getAttributeValue(null, "net-capabilities"); 1529 netForbiddenCapabilitiesLong = parser.getAttributeValue( 1530 null, "net-unwanted-capabilities"); 1531 netTransportTypesLong = parser.getAttributeValue(null, "net-transport-types"); 1532 } 1533 1534 if ((netCapabilitiesIntArray != null) && (netTransportTypesIntArray != null)) { 1535 // S+ format. No capability or transport validation since the values should be in 1536 // line with what's defined in the Connectivity mainline module. 1537 final NetworkRequest.Builder builder = new NetworkRequest.Builder() 1538 .clearCapabilities(); 1539 1540 for (int capability : stringToIntArray(netCapabilitiesIntArray)) { 1541 builder.addCapability(capability); 1542 } 1543 1544 for (int forbiddenCapability : stringToIntArray(netForbiddenCapabilitiesIntArray)) { 1545 builder.addForbiddenCapability(forbiddenCapability); 1546 } 1547 1548 for (int transport : stringToIntArray(netTransportTypesIntArray)) { 1549 builder.addTransportType(transport); 1550 } 1551 jobBuilder 1552 .setRequiredNetwork(builder.build()) 1553 .setEstimatedNetworkBytes( 1554 parser.getAttributeLong(null, 1555 "estimated-download-bytes", JobInfo.NETWORK_BYTES_UNKNOWN), 1556 parser.getAttributeLong(null, 1557 "estimated-upload-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)) 1558 .setMinimumNetworkChunkBytes( 1559 parser.getAttributeLong(null, 1560 "minimum-network-chunk-bytes", 1561 JobInfo.NETWORK_BYTES_UNKNOWN)); 1562 } else if (netCapabilitiesLong != null && netTransportTypesLong != null) { 1563 // Format used on R- builds. Drop any unexpected capabilities and transports. 1564 final NetworkRequest.Builder builder = new NetworkRequest.Builder() 1565 .clearCapabilities(); 1566 final int maxNetCapabilityInR = NET_CAPABILITY_TEMPORARILY_NOT_METERED; 1567 // We're okay throwing NFE here; caught by caller 1568 for (int capability : BitUtils.unpackBits(Long.parseLong( 1569 netCapabilitiesLong))) { 1570 if (capability <= maxNetCapabilityInR) { 1571 builder.addCapability(capability); 1572 } 1573 } 1574 for (int forbiddenCapability : BitUtils.unpackBits(Long.parseLong( 1575 netForbiddenCapabilitiesLong))) { 1576 if (forbiddenCapability <= maxNetCapabilityInR) { 1577 builder.addForbiddenCapability(forbiddenCapability); 1578 } 1579 } 1580 1581 final int maxTransportInR = TRANSPORT_TEST; 1582 for (int transport : BitUtils.unpackBits(Long.parseLong( 1583 netTransportTypesLong))) { 1584 if (transport <= maxTransportInR) { 1585 builder.addTransportType(transport); 1586 } 1587 } 1588 jobBuilder.setRequiredNetwork(builder.build()); 1589 // Estimated bytes weren't persisted on R- builds, so no point querying for the 1590 // attributes here. 1591 } else { 1592 // Read legacy values 1593 val = parser.getAttributeValue(null, "connectivity"); 1594 if (val != null) { 1595 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_ANY); 1596 } 1597 val = parser.getAttributeValue(null, "metered"); 1598 if (val != null) { 1599 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_METERED); 1600 } 1601 val = parser.getAttributeValue(null, "unmetered"); 1602 if (val != null) { 1603 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_UNMETERED); 1604 } 1605 val = parser.getAttributeValue(null, "not-roaming"); 1606 if (val != null) { 1607 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_NOT_ROAMING); 1608 } 1609 } 1610 1611 val = parser.getAttributeValue(null, "idle"); 1612 if (val != null) { 1613 jobBuilder.setRequiresDeviceIdle(true); 1614 } 1615 val = parser.getAttributeValue(null, "charging"); 1616 if (val != null) { 1617 jobBuilder.setRequiresCharging(true); 1618 } 1619 val = parser.getAttributeValue(null, "battery-not-low"); 1620 if (val != null) { 1621 jobBuilder.setRequiresBatteryNotLow(true); 1622 } 1623 val = parser.getAttributeValue(null, "storage-not-low"); 1624 if (val != null) { 1625 jobBuilder.setRequiresStorageNotLow(true); 1626 } 1627 1628 jobBuilder.setPrefersBatteryNotLow( 1629 parser.getAttributeBoolean(null, "prefer-battery-not-low", false)); 1630 jobBuilder.setPrefersCharging( 1631 parser.getAttributeBoolean(null, "prefer-charging", false)); 1632 jobBuilder.setPrefersDeviceIdle( 1633 parser.getAttributeBoolean(null, "prefer-idle", false)); 1634 } 1635 1636 /** 1637 * Builds the back-off policy out of the params tag. These attributes may not exist, depending 1638 * on whether the back-off was set when the job was first scheduled. 1639 */ maybeBuildBackoffPolicyFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser)1640 private void maybeBuildBackoffPolicyFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser) { 1641 String val = parser.getAttributeValue(null, "initial-backoff"); 1642 if (val != null) { 1643 long initialBackoff = Long.parseLong(val); 1644 val = parser.getAttributeValue(null, "backoff-policy"); 1645 int backoffPolicy = Integer.parseInt(val); // Will throw NFE which we catch higher up. 1646 jobBuilder.setBackoffCriteria(initialBackoff, backoffPolicy); 1647 } 1648 } 1649 1650 /** 1651 * Extract a job's earliest/latest run time data from XML. These are returned in 1652 * unadjusted UTC wall clock time, because we do not yet know whether the system 1653 * clock is reliable for purposes of calculating deltas from 'now'. 1654 * 1655 * @param parser 1656 * @return A Pair of timestamps in UTC wall-clock time. The first is the earliest 1657 * time at which the job is to become runnable, and the second is the deadline at 1658 * which it becomes overdue to execute. 1659 */ buildRtcExecutionTimesFromXml(TypedXmlPullParser parser)1660 private Pair<Long, Long> buildRtcExecutionTimesFromXml(TypedXmlPullParser parser) { 1661 // Pull out execution time data. 1662 final long earliestRunTimeRtc = 1663 parser.getAttributeLong(null, "delay", JobStatus.NO_EARLIEST_RUNTIME); 1664 final long latestRunTimeRtc = 1665 parser.getAttributeLong(null, "deadline", JobStatus.NO_LATEST_RUNTIME); 1666 return Pair.create(earliestRunTimeRtc, latestRunTimeRtc); 1667 } 1668 1669 @NonNull readJobWorkItemsFromXml(TypedXmlPullParser parser)1670 private List<JobWorkItem> readJobWorkItemsFromXml(TypedXmlPullParser parser) 1671 throws IOException, XmlPullParserException { 1672 List<JobWorkItem> jobWorkItems = new ArrayList<>(); 1673 1674 for (int eventType = parser.getEventType(); eventType != XmlPullParser.END_DOCUMENT; 1675 eventType = parser.next()) { 1676 final String tagName = parser.getName(); 1677 if (!XML_TAG_JOB_WORK_ITEM.equals(tagName)) { 1678 // We're no longer operating with work items. 1679 break; 1680 } 1681 try { 1682 JobWorkItem jwi = readJobWorkItemFromXml(parser); 1683 if (jwi != null) { 1684 jobWorkItems.add(jwi); 1685 } 1686 } catch (Exception e) { 1687 // If there's an issue with one JobWorkItem, drop only the one item and not the 1688 // whole job. 1689 Slog.e(TAG, "Problem with persisted JobWorkItem", e); 1690 } 1691 } 1692 1693 return jobWorkItems; 1694 } 1695 1696 @Nullable readJobWorkItemFromXml(TypedXmlPullParser parser)1697 private JobWorkItem readJobWorkItemFromXml(TypedXmlPullParser parser) 1698 throws IOException, XmlPullParserException { 1699 JobWorkItem.Builder jwiBuilder = new JobWorkItem.Builder(); 1700 1701 jwiBuilder 1702 .setDeliveryCount(parser.getAttributeInt(null, "delivery-count")) 1703 .setEstimatedNetworkBytes( 1704 parser.getAttributeLong(null, 1705 "estimated-download-bytes", JobInfo.NETWORK_BYTES_UNKNOWN), 1706 parser.getAttributeLong(null, 1707 "estimated-upload-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)) 1708 .setMinimumNetworkChunkBytes(parser.getAttributeLong(null, 1709 "minimum-network-chunk-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)); 1710 parser.next(); 1711 try { 1712 final PersistableBundle extras = PersistableBundle.restoreFromXml(parser); 1713 jwiBuilder.setExtras(extras); 1714 } catch (IllegalArgumentException e) { 1715 Slog.e(TAG, "Persisted extras contained invalid data", e); 1716 return null; 1717 } 1718 1719 try { 1720 return jwiBuilder.build(); 1721 } catch (Exception e) { 1722 Slog.e(TAG, "Invalid JobWorkItem", e); 1723 return null; 1724 } 1725 } 1726 } 1727 1728 /** Set of all tracked jobs. */ 1729 @VisibleForTesting 1730 public static final class JobSet { 1731 @VisibleForTesting // Key is the getUid() originator of the jobs in each sheaf 1732 final SparseArray<ArraySet<JobStatus>> mJobs; 1733 1734 @VisibleForTesting // Same data but with the key as getSourceUid() of the jobs in each sheaf 1735 final SparseArray<ArraySet<JobStatus>> mJobsPerSourceUid; 1736 JobSet()1737 public JobSet() { 1738 mJobs = new SparseArray<ArraySet<JobStatus>>(); 1739 mJobsPerSourceUid = new SparseArray<>(); 1740 } 1741 getJobsByUid(int uid)1742 public ArraySet<JobStatus> getJobsByUid(int uid) { 1743 ArraySet<JobStatus> matchingJobs = new ArraySet<>(); 1744 getJobsByUid(uid, matchingJobs); 1745 return matchingJobs; 1746 } 1747 getJobsByUid(int uid, Set<JobStatus> insertInto)1748 public void getJobsByUid(int uid, Set<JobStatus> insertInto) { 1749 ArraySet<JobStatus> jobs = mJobs.get(uid); 1750 if (jobs != null) { 1751 insertInto.addAll(jobs); 1752 } 1753 } 1754 1755 @NonNull getJobsBySourceUid(int sourceUid)1756 public ArraySet<JobStatus> getJobsBySourceUid(int sourceUid) { 1757 final ArraySet<JobStatus> result = new ArraySet<>(); 1758 getJobsBySourceUid(sourceUid, result); 1759 return result; 1760 } 1761 getJobsBySourceUid(int sourceUid, Set<JobStatus> insertInto)1762 public void getJobsBySourceUid(int sourceUid, Set<JobStatus> insertInto) { 1763 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.get(sourceUid); 1764 if (jobs != null) { 1765 insertInto.addAll(jobs); 1766 } 1767 } 1768 add(JobStatus job)1769 public boolean add(JobStatus job) { 1770 final int uid = job.getUid(); 1771 final int sourceUid = job.getSourceUid(); 1772 ArraySet<JobStatus> jobs = mJobs.get(uid); 1773 if (jobs == null) { 1774 jobs = new ArraySet<JobStatus>(); 1775 mJobs.put(uid, jobs); 1776 } 1777 ArraySet<JobStatus> jobsForSourceUid = mJobsPerSourceUid.get(sourceUid); 1778 if (jobsForSourceUid == null) { 1779 jobsForSourceUid = new ArraySet<>(); 1780 mJobsPerSourceUid.put(sourceUid, jobsForSourceUid); 1781 } 1782 final boolean added = jobs.add(job); 1783 final boolean addedInSource = jobsForSourceUid.add(job); 1784 if (added != addedInSource) { 1785 Slog.wtf(TAG, "mJobs and mJobsPerSourceUid mismatch; caller= " + added 1786 + " source= " + addedInSource); 1787 } 1788 return added || addedInSource; 1789 } 1790 remove(JobStatus job)1791 public boolean remove(JobStatus job) { 1792 final int uid = job.getUid(); 1793 final ArraySet<JobStatus> jobs = mJobs.get(uid); 1794 final int sourceUid = job.getSourceUid(); 1795 final ArraySet<JobStatus> jobsForSourceUid = mJobsPerSourceUid.get(sourceUid); 1796 final boolean didRemove = jobs != null && jobs.remove(job); 1797 final boolean sourceRemove = jobsForSourceUid != null && jobsForSourceUid.remove(job); 1798 if (didRemove != sourceRemove) { 1799 Slog.wtf(TAG, "Job presence mismatch; caller=" + didRemove 1800 + " source=" + sourceRemove); 1801 } 1802 if (didRemove || sourceRemove) { 1803 // no more jobs for this uid? let the now-empty set objects be GC'd. 1804 if (jobs != null && jobs.size() == 0) { 1805 mJobs.remove(uid); 1806 } 1807 if (jobsForSourceUid != null && jobsForSourceUid.size() == 0) { 1808 mJobsPerSourceUid.remove(sourceUid); 1809 } 1810 return true; 1811 } 1812 return false; 1813 } 1814 1815 /** 1816 * Removes the jobs of all users not specified by the keepUserIds of user ids. 1817 * This will remove jobs scheduled *by* and *for* any unlisted users. 1818 */ removeJobsOfUnlistedUsers(final int[] keepUserIds)1819 public void removeJobsOfUnlistedUsers(final int[] keepUserIds) { 1820 final Predicate<JobStatus> noSourceUser = 1821 job -> !ArrayUtils.contains(keepUserIds, job.getSourceUserId()); 1822 final Predicate<JobStatus> noCallingUser = 1823 job -> !ArrayUtils.contains(keepUserIds, job.getUserId()); 1824 removeAll(noSourceUser.or(noCallingUser)); 1825 } 1826 removeAll(Predicate<JobStatus> predicate)1827 private void removeAll(Predicate<JobStatus> predicate) { 1828 for (int jobSetIndex = mJobs.size() - 1; jobSetIndex >= 0; jobSetIndex--) { 1829 final ArraySet<JobStatus> jobs = mJobs.valueAt(jobSetIndex); 1830 jobs.removeIf(predicate); 1831 if (jobs.size() == 0) { 1832 mJobs.removeAt(jobSetIndex); 1833 } 1834 } 1835 for (int jobSetIndex = mJobsPerSourceUid.size() - 1; jobSetIndex >= 0; jobSetIndex--) { 1836 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.valueAt(jobSetIndex); 1837 jobs.removeIf(predicate); 1838 if (jobs.size() == 0) { 1839 mJobsPerSourceUid.removeAt(jobSetIndex); 1840 } 1841 } 1842 } 1843 contains(JobStatus job)1844 public boolean contains(JobStatus job) { 1845 final int uid = job.getUid(); 1846 ArraySet<JobStatus> jobs = mJobs.get(uid); 1847 return jobs != null && jobs.contains(job); 1848 } 1849 get(int uid, @Nullable String namespace, int jobId)1850 public JobStatus get(int uid, @Nullable String namespace, int jobId) { 1851 ArraySet<JobStatus> jobs = mJobs.get(uid); 1852 if (jobs != null) { 1853 for (int i = jobs.size() - 1; i >= 0; i--) { 1854 JobStatus job = jobs.valueAt(i); 1855 if (job.getJobId() == jobId && Objects.equals(namespace, job.getNamespace())) { 1856 return job; 1857 } 1858 } 1859 } 1860 return null; 1861 } 1862 1863 // Inefficient; use only for testing getAllJobs()1864 public List<JobStatus> getAllJobs() { 1865 ArrayList<JobStatus> allJobs = new ArrayList<JobStatus>(size()); 1866 for (int i = mJobs.size() - 1; i >= 0; i--) { 1867 ArraySet<JobStatus> jobs = mJobs.valueAt(i); 1868 if (jobs != null) { 1869 // Use a for loop over the ArraySet, so we don't need to make its 1870 // optional collection class iterator implementation or have to go 1871 // through a temporary array from toArray(). 1872 for (int j = jobs.size() - 1; j >= 0; j--) { 1873 allJobs.add(jobs.valueAt(j)); 1874 } 1875 } 1876 } 1877 return allJobs; 1878 } 1879 clear()1880 public void clear() { 1881 mJobs.clear(); 1882 mJobsPerSourceUid.clear(); 1883 } 1884 size()1885 public int size() { 1886 int total = 0; 1887 for (int i = mJobs.size() - 1; i >= 0; i--) { 1888 total += mJobs.valueAt(i).size(); 1889 } 1890 return total; 1891 } 1892 1893 // We only want to count the jobs that this uid has scheduled on its own 1894 // behalf, not those that the app has scheduled on someone else's behalf. countJobsForUid(int uid)1895 public int countJobsForUid(int uid) { 1896 int total = 0; 1897 ArraySet<JobStatus> jobs = mJobs.get(uid); 1898 if (jobs != null) { 1899 for (int i = jobs.size() - 1; i >= 0; i--) { 1900 JobStatus job = jobs.valueAt(i); 1901 if (job.getUid() == job.getSourceUid()) { 1902 total++; 1903 } 1904 } 1905 } 1906 return total; 1907 } 1908 forEachJob(@ullable Predicate<JobStatus> filterPredicate, @NonNull Consumer<JobStatus> functor)1909 public void forEachJob(@Nullable Predicate<JobStatus> filterPredicate, 1910 @NonNull Consumer<JobStatus> functor) { 1911 for (int uidIndex = mJobs.size() - 1; uidIndex >= 0; uidIndex--) { 1912 ArraySet<JobStatus> jobs = mJobs.valueAt(uidIndex); 1913 if (jobs != null) { 1914 for (int i = jobs.size() - 1; i >= 0; i--) { 1915 final JobStatus jobStatus = jobs.valueAt(i); 1916 if ((filterPredicate == null) || filterPredicate.test(jobStatus)) { 1917 functor.accept(jobStatus); 1918 } 1919 } 1920 } 1921 } 1922 } 1923 forEachJob(int callingUid, Consumer<JobStatus> functor)1924 public void forEachJob(int callingUid, Consumer<JobStatus> functor) { 1925 ArraySet<JobStatus> jobs = mJobs.get(callingUid); 1926 if (jobs != null) { 1927 for (int i = jobs.size() - 1; i >= 0; i--) { 1928 functor.accept(jobs.valueAt(i)); 1929 } 1930 } 1931 } 1932 forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor)1933 public void forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor) { 1934 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.get(sourceUid); 1935 if (jobs != null) { 1936 for (int i = jobs.size() - 1; i >= 0; i--) { 1937 functor.accept(jobs.valueAt(i)); 1938 } 1939 } 1940 } 1941 } 1942 } 1943