1 成员变量
/** Master list of jobs. */
final JobStore mJobs; //job list
/** Tracking the standby bucket state of each app */
final StandbyTracker mStandbyTracker;
/**
* Track Services that have currently active or pending jobs. The index is provided by
* {@link JobStatus#getServiceToken()}
*/
final List<JobServiceContext> mActiveServices = new ArrayList<>();
/** List of controllers that will notify this service of updates to jobs. */
final List<StateController> mControllers;
/**
* Queue of pending jobs. The JobServiceContext class will receive jobs from this list
* when ready to execute them.
*/
final ArrayList<JobStatus> mPendingJobs = new ArrayList<>();
/** Need directly for receiving thermal events */
private IThermalService mThermalService;
final JobHandler mHandler;
/**
* Whether to use heartbeats or rolling window for quota management. True will use
* heartbeats, false will use a rolling window.
*/
public boolean USE_HEARTBEATS = DEFAULT_USE_HEARTBEATS; //默认未false
// Max job counts for screen on / off, for each memory trim level.
final MaxJobCountsPerMemoryTrimLevel MAX_JOB_COUNTS_SCREEN_ON =
new MaxJobCountsPerMemoryTrimLevel(
new MaxJobCounts(
8, "max_job_total_on_normal",
6, "max_job_max_bg_on_normal",
2, "max_job_min_bg_on_normal"),
new MaxJobCounts(
8, "max_job_total_on_moderate",
4, "max_job_max_bg_on_moderate",
2, "max_job_min_bg_on_moderate"),
new MaxJobCounts(
5, "max_job_total_on_low",
1, "max_job_max_bg_on_low",
1, "max_job_min_bg_on_low"),
new MaxJobCounts(
5, "max_job_total_on_critical",
1, "max_job_max_bg_on_critical",
1, "max_job_min_bg_on_critical"));
final MaxJobCountsPerMemoryTrimLevel MAX_JOB_COUNTS_SCREEN_OFF =
new MaxJobCountsPerMemoryTrimLevel(
new MaxJobCounts(
10, "max_job_total_off_normal",
6, "max_job_max_bg_off_normal",
2, "max_job_min_bg_off_normal"),
new MaxJobCounts(
10, "max_job_total_off_moderate",
4, "max_job_max_bg_off_moderate",
2, "max_job_min_bg_off_moderate"),
new MaxJobCounts(
5, "max_job_total_off_low",
1, "max_job_max_bg_off_low",
1, "max_job_min_bg_off_low"),
new MaxJobCounts(
5, "max_job_total_off_critical",
1, "max_job_max_bg_off_critical",
1, "max_job_min_bg_off_critical"));
2 mJobs = JobStore.initAndGet(this);
//读取/data/system/job/jobs.xml中的persist的job任务
<job jobid="111007999" package="com.google.android.apps.walletnfcrel" class="com.google.android.libraries.notifications.entrypoints.scheduled.ScheduledTaskService" sourcePackageName="com.google.android.apps.walletnfcrel" sourceUserId="0" uid="10089" priority="0" flags="0" lastSuccessfulRunTime="1594621477078" lastFailedRunTime="1593691857483">
<constraints net-capabilities="94208" net-unwanted-capabilities="0" net-transport-types="0" />
<periodic period="86400000" flex="86400000" deadline="1596011419776" delay="1596011419776" />
<extras>
<string name="com.google.android.libraries.notifications.INTENT_EXTRA_TASK_HANDLER">PERIODIC_TASK</string>
</extras>
</job>
<job jobid="3" package="com.google.android.apps.turbo" class="com.google.android.apps.turbo.nudges.battery.LoggingPermissionsJobService" sourcePackageName="com.google.android.apps.turbo" sourceUserId="0" uid="10115" priority="0" flags="0" lastSuccessfulRunTime="1596024208322" lastFailedRunTime="0">
<constraints idle="true" charging="true" />
<periodic period="86400000" flex="86400000" deadline="1596184219771" delay="1596097819771" />
<extras />
</job>
<job jobid="108" package="com.google.android.apps.turbo" class="com.google.android.libraries.smartbattery.brightness.library.UpdateOfflineModelJob" sourcePackageName="com.google.android.apps.turbo" sourceUserId="0" uid="10115" priority="0" flags="0" lastSuccessfulRunTime="0" lastFailedRunTime="0">
<constraints charging="true" />
<one-off delay="1596541727614" />
<extras />
</job>
constraints: 满足条件
periodic:job的周期
3 schedule
-> scheduleAsPackage
-> 判断isAppStartModeDisabled,确认是否允许后台执行.
-> 根据jobinfo创建jobStatus
-> startTrackingJobLocked
// If the job is immediately ready to run, then we can just immediately
// put it in the pending list and try to schedule it. This is especially
// important for jobs with a 0 deadline constraint, since they will happen a fair
// amount, we want to handle them as quickly as possible, and semantically we want to
// make sure we have started holding the wake lock for the job before returning to
// the caller.
// If the job is not yet ready to run, there is nothing more to do -- we are
// now just waiting for one of its controllers to change state and schedule
// the job appropriately.
-> isReadyToBeExecutedLocked
-> maybeRunPendingJobsLocked
or -> evaluateControllerStatesLocked
3.1 判断是否可以执行job
/**
* Criteria for moving a job into the pending queue:
* - It's ready.
* - It's not pending.
* - It's not already running on a JSC.
* - Temperture is too high, and don't run job of request network and proity
* - The user that requested the job is running.
* - The job's standby bucket has come due to be runnable.
* - The component is enabled and runnable.
*/
private boolean isReadyToBeExecutedLocked(JobStatus job)
3.2 决定是否实际需要执行
/**
* ** Reconcile jobs in the pending queue against available execution contexts.**
* A controller can force a job into the pending queue even if it's already running, but
* here is where we decide whether to actually execute it.
*/
void maybeRunPendingJobsLocked() {
/**
* Takes jobs from pending queue and runs them on available contexts.
* If no contexts are available, preempts lower priority jobs to
* run higher priority ones.
* Lock on mJobs before calling this function.
*/
assignJobsToContextsLocked//核心逻辑
3.3 assignJobsToContextsLocked
private void assignJobsToContextsInternalLocked() {
if (DEBUG) {
Slog.d(TAG, printPendingQueueLocked());
}
final JobPackageTracker tracker = mService.mJobPackageTracker;
final List<JobStatus> pendingJobs = mService.mPendingJobs;//获取pending jobs
final List<JobServiceContext> activeServices = mService.mActiveServices;//获取active jobservicecontext
final List<StateController> controllers = mService.mControllers;//获取状态控制器
updateMaxCountsLocked();//更新最大counts
// To avoid GC churn, we recycle the arrays.
JobStatus[] contextIdToJobMap = mRecycledAssignContextIdToJobMap;//实际是索引ActiveServices的状态
boolean[] slotChanged = mRecycledSlotChanged;
int[] preferredUidForContext = mRecycledPreferredUidForContext;
// Initialize the work variables and also count running jobs.
mJobCountTracker.reset(
mMaxJobCounts.getMaxTotal(),
mMaxJobCounts.getMaxBg(),
mMaxJobCounts.getMinBg());
for (int i=0; i<MAX_JOB_CONTEXTS_COUNT; i++) {
final JobServiceContext js = mService.mActiveServices.get(i);
final JobStatus status = js.getRunningJobLocked();//或者running jobstatus
if ((contextIdToJobMap[i] = status) != null) {//将running jobstatus(包括为null的情况)赋值给contextIdToJobMap
mJobCountTracker.incrementRunningJobCount(isFgJob(status));
}
slotChanged[i] = false;
preferredUidForContext[i] = js.getPreferredUid();
}
if (DEBUG) {
Slog.d(TAG, printContextIdToJobMap(contextIdToJobMap, "running jobs initial"));
}
// Next, update the job priorities, and also count the pending FG / BG jobs.
for (int i = 0; i < pendingJobs.size(); i++) {
final JobStatus pending = pendingJobs.get(i);
// If job is already running, go to next job.
int jobRunningContext = findJobContextIdFromMap(pending, contextIdToJobMap);
if (jobRunningContext != -1) {
continue;
}
final int priority = mService.evaluateJobPriorityLocked(pending);//计算没有运行job的pendingjob的优先级
pending.lastEvaluatedPriority = priority;//赋值
mJobCountTracker.incrementPendingJobCount(isFgJob(pending));
}
mJobCountTracker.onCountDone();
for (int i = 0; i < pendingJobs.size(); i++) {
final JobStatus nextPending = pendingJobs.get(i);//获取pending job
// Unfortunately we need to repeat this relatively expensive check.
int jobRunningContext = findJobContextIdFromMap(nextPending, contextIdToJobMap);
if (jobRunningContext != -1) {
continue;
}
final boolean isPendingFg = isFgJob(nextPending);//是否是前台job(优先级大于TOP-APP)
// Find an available slot for nextPending. The context should be available OR
// it should have lowest priority among all running jobs
// (sharing the same Uid as nextPending)
int minPriorityForPreemption = Integer.MAX_VALUE;
int selectedContextId = -1;
boolean startingJob = false;
for (int j=0; j<MAX_JOB_CONTEXTS_COUNT; j++) {
JobStatus job = contextIdToJobMap[j];//获取running job的jobstatus
int preferredUid = preferredUidForContext[j];//获取running uid
if (job == null) {//如果当前的jobservicecontext上面没有running job
final boolean preferredUidOkay = (preferredUid == nextPending.getUid())
|| (preferredUid == JobServiceContext.NO_PREFERRED_UID);
if (preferredUidOkay && mJobCountTracker.canJobStart(isPendingFg)) {
// This slot is free, and we haven't yet hit the limit on
// concurrent jobs... we can just throw the job in to here.
selectedContextId = j;
startingJob = true;
break;
}
// No job on this context, but nextPending can't run here because
// the context has a preferred Uid or we have reached the limit on
// concurrent jobs.
continue;
}
if (job.getUid() != nextPending.getUid()) {
continue;
}
final int jobPriority = mService.evaluateJobPriorityLocked(job);//计算优先级
if (jobPriority >= nextPending.lastEvaluatedPriority) {
continue;
}
// TODO lastEvaluatedPriority should be evaluateJobPriorityLocked. (double check it)
if (minPriorityForPreemption > nextPending.lastEvaluatedPriority) {
minPriorityForPreemption = nextPending.lastEvaluatedPriority;
selectedContextId = j;
// In this case, we're just going to preempt a low priority job, we're not
// actually starting a job, so don't set startingJob.
}
}
if (selectedContextId != -1) {
contextIdToJobMap[selectedContextId] = nextPending;
slotChanged[selectedContextId] = true;
}
if (startingJob) {
// Increase the counters when we're going to start a job.
mJobCountTracker.onStartingNewJob(isPendingFg);
}
}
if (DEBUG) {
Slog.d(TAG, printContextIdToJobMap(contextIdToJobMap, "running jobs final"));
}
mJobCountTracker.logStatus();
tracker.noteConcurrency(mJobCountTracker.getTotalRunningJobCountToNote(),
mJobCountTracker.getFgRunningJobCountToNote());
for (int i=0; i<MAX_JOB_CONTEXTS_COUNT; i++) {//再次遍历
boolean preservePreferredUid = false;
if (slotChanged[i]) {//slot是否改变
JobStatus js = activeServices.get(i).getRunningJobLocked();//获取active running jobstatus
if (js != null) {
if (DEBUG) {
Slog.d(TAG, "preempting job: "
+ activeServices.get(i).getRunningJobLocked());
}
// preferredUid will be set to uid of currently running job.
activeServices.get(i).preemptExecutingJobLocked();//抢占job
preservePreferredUid = true;
} else {
final JobStatus pendingJob = contextIdToJobMap[i];
if (DEBUG) {
Slog.d(TAG, "About to run job on context "
+ i + ", job: " + pendingJob);
}
for (int ic=0; ic<controllers.size(); ic++) {
controllers.get(ic).prepareForExecutionLocked(pendingJob);//回调prepare
}
if (!activeServices.get(i).executeRunnableJob(pendingJob)) {//执行job
Slog.d(TAG, "Error executing " + pendingJob);
}
if (pendingJobs.remove(pendingJob)) {//移除job
tracker.noteNonpending(pendingJob);
}
}
}
if (!preservePreferredUid) {
activeServices.get(i).clearPreferredUid();
}
}
}