JobSchedulerService

1 成员变量

    /** Master list of jobs. */
    final JobStore mJobs;  //job list
    /** Tracking the standby bucket state of each app */
    final StandbyTracker mStandbyTracker;
    /**
     * Track Services that have currently active or pending jobs. The index is provided by
     * {@link JobStatus#getServiceToken()}
     */
    final List<JobServiceContext> mActiveServices = new ArrayList<>();
    /** List of controllers that will notify this service of updates to jobs. */
    final List<StateController> mControllers;
    /**
     * Queue of pending jobs. The JobServiceContext class will receive jobs from this list
     * when ready to execute them.
     */
    final ArrayList<JobStatus> mPendingJobs = new ArrayList<>();
    /** Need directly for receiving thermal events */
    private IThermalService mThermalService;

    final JobHandler mHandler;
    /**
     * Whether to use heartbeats or rolling window for quota management. True will use
     * heartbeats, false will use a rolling window.
     */
     public boolean USE_HEARTBEATS = DEFAULT_USE_HEARTBEATS; //默认未false
            // Max job counts for screen on / off, for each memory trim level.
     final MaxJobCountsPerMemoryTrimLevel MAX_JOB_COUNTS_SCREEN_ON =
                new MaxJobCountsPerMemoryTrimLevel(
                        new MaxJobCounts(
                                8, "max_job_total_on_normal",
                                6, "max_job_max_bg_on_normal",
                                2, "max_job_min_bg_on_normal"),
                        new MaxJobCounts(
                                8, "max_job_total_on_moderate",
                                4, "max_job_max_bg_on_moderate",
                                2, "max_job_min_bg_on_moderate"),
                        new MaxJobCounts(
                                5, "max_job_total_on_low",
                                1, "max_job_max_bg_on_low",
                                1, "max_job_min_bg_on_low"),
                        new MaxJobCounts(
                                5, "max_job_total_on_critical",
                                1, "max_job_max_bg_on_critical",
                                1, "max_job_min_bg_on_critical"));

    final MaxJobCountsPerMemoryTrimLevel MAX_JOB_COUNTS_SCREEN_OFF =
                new MaxJobCountsPerMemoryTrimLevel(
                        new MaxJobCounts(
                                10, "max_job_total_off_normal",
                                6, "max_job_max_bg_off_normal",
                                2, "max_job_min_bg_off_normal"),
                        new MaxJobCounts(
                                10, "max_job_total_off_moderate",
                                4, "max_job_max_bg_off_moderate",
                                2, "max_job_min_bg_off_moderate"),
                        new MaxJobCounts(
                                5, "max_job_total_off_low",
                                1, "max_job_max_bg_off_low",
                                1, "max_job_min_bg_off_low"),
                        new MaxJobCounts(
                                5, "max_job_total_off_critical",
                                1, "max_job_max_bg_off_critical",
                                1, "max_job_min_bg_off_critical"));

2 mJobs = JobStore.initAndGet(this);

//读取/data/system/job/jobs.xml中的persist的job任务

<job jobid="111007999" package="com.google.android.apps.walletnfcrel" class="com.google.android.libraries.notifications.entrypoints.scheduled.ScheduledTaskService" sourcePackageName="com.google.android.apps.walletnfcrel" sourceUserId="0" uid="10089" priority="0" flags="0" lastSuccessfulRunTime="1594621477078" lastFailedRunTime="1593691857483">
    <constraints net-capabilities="94208" net-unwanted-capabilities="0" net-transport-types="0" />
    <periodic period="86400000" flex="86400000" deadline="1596011419776" delay="1596011419776" />
    <extras>
        <string name="com.google.android.libraries.notifications.INTENT_EXTRA_TASK_HANDLER">PERIODIC_TASK</string>
    </extras>
</job>

<job jobid="3" package="com.google.android.apps.turbo" class="com.google.android.apps.turbo.nudges.battery.LoggingPermissionsJobService" sourcePackageName="com.google.android.apps.turbo" sourceUserId="0" uid="10115" priority="0" flags="0" lastSuccessfulRunTime="1596024208322" lastFailedRunTime="0">
    <constraints idle="true" charging="true" />
    <periodic period="86400000" flex="86400000" deadline="1596184219771" delay="1596097819771" />
    <extras />
</job>

 <job jobid="108" package="com.google.android.apps.turbo" class="com.google.android.libraries.smartbattery.brightness.library.UpdateOfflineModelJob" sourcePackageName="com.google.android.apps.turbo" sourceUserId="0" uid="10115" priority="0" flags="0" lastSuccessfulRunTime="0" lastFailedRunTime="0">
    <constraints charging="true" />
    <one-off delay="1596541727614" />
    <extras />
</job>

constraints: 满足条件
periodic:job的周期

3 schedule

-> scheduleAsPackage
        -> 判断isAppStartModeDisabled,确认是否允许后台执行.
        -> 根据jobinfo创建jobStatus
        -> startTrackingJobLocked
            // If the job is immediately ready to run, then we can just immediately
            // put it in the pending list and try to schedule it.  This is especially
            // important for jobs with a 0 deadline constraint, since they will happen a fair
            // amount, we want to handle them as quickly as possible, and semantically we want to
            // make sure we have started holding the wake lock for the job before returning to
            // the caller.
            // If the job is not yet ready to run, there is nothing more to do -- we are
            // now just waiting for one of its controllers to change state and schedule
            // the job appropriately.
        -> isReadyToBeExecutedLocked
            -> maybeRunPendingJobsLocked 
         or -> evaluateControllerStatesLocked

3.1 判断是否可以执行job

/**
     * Criteria for moving a job into the pending queue:
     *      - It's ready.
     *      - It's not pending.
     *      - It's not already running on a JSC.
     *      - Temperture is too high, and don't run job of request network and proity
     *      - The user that requested the job is running.
     *      - The job's standby bucket has come due to be runnable.
     *      - The component is enabled and runnable.
     */
    private boolean isReadyToBeExecutedLocked(JobStatus job)

3.2 决定是否实际需要执行

/**
     * ** Reconcile jobs in the pending queue against available execution contexts.**
     * A controller can force a job into the pending queue even if it's already running, but
     * here is where we decide whether to actually execute it.
     */
    void maybeRunPendingJobsLocked() {
        /**
         * Takes jobs from pending queue and runs them on available contexts.
         * If no contexts are available, preempts lower priority jobs to
         * run higher priority ones.
         * Lock on mJobs before calling this function.
         */
        assignJobsToContextsLocked//核心逻辑

3.3 assignJobsToContextsLocked

private void assignJobsToContextsInternalLocked() {
        if (DEBUG) {
            Slog.d(TAG, printPendingQueueLocked());
        }

        final JobPackageTracker tracker = mService.mJobPackageTracker;
        final List<JobStatus> pendingJobs = mService.mPendingJobs;//获取pending jobs
        final List<JobServiceContext> activeServices = mService.mActiveServices;//获取active jobservicecontext
        final List<StateController> controllers = mService.mControllers;//获取状态控制器

        updateMaxCountsLocked();//更新最大counts

        // To avoid GC churn, we recycle the arrays.
        JobStatus[] contextIdToJobMap = mRecycledAssignContextIdToJobMap;//实际是索引ActiveServices的状态
        boolean[] slotChanged = mRecycledSlotChanged;
        int[] preferredUidForContext = mRecycledPreferredUidForContext;


        // Initialize the work variables and also count running jobs.
        mJobCountTracker.reset(
                mMaxJobCounts.getMaxTotal(),
                mMaxJobCounts.getMaxBg(),
                mMaxJobCounts.getMinBg());

        for (int i=0; i<MAX_JOB_CONTEXTS_COUNT; i++) {
            final JobServiceContext js = mService.mActiveServices.get(i);
            final JobStatus status = js.getRunningJobLocked();//或者running jobstatus

            if ((contextIdToJobMap[i] = status) != null) {//将running jobstatus(包括为null的情况)赋值给contextIdToJobMap
                mJobCountTracker.incrementRunningJobCount(isFgJob(status));
            }

            slotChanged[i] = false;
            preferredUidForContext[i] = js.getPreferredUid();
        }
        if (DEBUG) {
            Slog.d(TAG, printContextIdToJobMap(contextIdToJobMap, "running jobs initial"));
        }

        // Next, update the job priorities, and also count the pending FG / BG jobs.
        for (int i = 0; i < pendingJobs.size(); i++) {
            final JobStatus pending = pendingJobs.get(i);

            // If job is already running, go to next job.
            int jobRunningContext = findJobContextIdFromMap(pending, contextIdToJobMap);
            if (jobRunningContext != -1) {
                continue;
            }

            final int priority = mService.evaluateJobPriorityLocked(pending);//计算没有运行job的pendingjob的优先级
            pending.lastEvaluatedPriority = priority;//赋值

            mJobCountTracker.incrementPendingJobCount(isFgJob(pending));
        }

        mJobCountTracker.onCountDone();

        for (int i = 0; i < pendingJobs.size(); i++) {
            final JobStatus nextPending = pendingJobs.get(i);//获取pending job

            // Unfortunately we need to repeat this relatively expensive check.
            int jobRunningContext = findJobContextIdFromMap(nextPending, contextIdToJobMap);
            if (jobRunningContext != -1) {
                continue;
            }

            final boolean isPendingFg = isFgJob(nextPending);//是否是前台job(优先级大于TOP-APP)

            // Find an available slot for nextPending. The context should be available OR
            // it should have lowest priority among all running jobs
            // (sharing the same Uid as nextPending)
            int minPriorityForPreemption = Integer.MAX_VALUE;
            int selectedContextId = -1;
            boolean startingJob = false;
            for (int j=0; j<MAX_JOB_CONTEXTS_COUNT; j++) {
                JobStatus job = contextIdToJobMap[j];//获取running job的jobstatus
                int preferredUid = preferredUidForContext[j];//获取running uid
                if (job == null) {//如果当前的jobservicecontext上面没有running job
                    final boolean preferredUidOkay = (preferredUid == nextPending.getUid())
                            || (preferredUid == JobServiceContext.NO_PREFERRED_UID);

                    if (preferredUidOkay && mJobCountTracker.canJobStart(isPendingFg)) {
                        // This slot is free, and we haven't yet hit the limit on
                        // concurrent jobs...  we can just throw the job in to here.
                        selectedContextId = j;
                        startingJob = true;
                        break;
                    }
                    // No job on this context, but nextPending can't run here because
                    // the context has a preferred Uid or we have reached the limit on
                    // concurrent jobs.
                    continue;
                }
                if (job.getUid() != nextPending.getUid()) {
                    continue;
                }

                final int jobPriority = mService.evaluateJobPriorityLocked(job);//计算优先级
                if (jobPriority >= nextPending.lastEvaluatedPriority) {
                    continue;
                }

                // TODO lastEvaluatedPriority should be evaluateJobPriorityLocked. (double check it)
                if (minPriorityForPreemption > nextPending.lastEvaluatedPriority) {
                    minPriorityForPreemption = nextPending.lastEvaluatedPriority;
                    selectedContextId = j;
                    // In this case, we're just going to preempt a low priority job, we're not
                    // actually starting a job, so don't set startingJob.
                }
            }
            if (selectedContextId != -1) {
                contextIdToJobMap[selectedContextId] = nextPending;
                slotChanged[selectedContextId] = true;
            }
            if (startingJob) {
                // Increase the counters when we're going to start a job.
                mJobCountTracker.onStartingNewJob(isPendingFg);
            }
        }
        if (DEBUG) {
            Slog.d(TAG, printContextIdToJobMap(contextIdToJobMap, "running jobs final"));
        }

        mJobCountTracker.logStatus();

        tracker.noteConcurrency(mJobCountTracker.getTotalRunningJobCountToNote(),
                mJobCountTracker.getFgRunningJobCountToNote());

        for (int i=0; i<MAX_JOB_CONTEXTS_COUNT; i++) {//再次遍历
            boolean preservePreferredUid = false;
            if (slotChanged[i]) {//slot是否改变
                JobStatus js = activeServices.get(i).getRunningJobLocked();//获取active running jobstatus
                if (js != null) {
                    if (DEBUG) {
                        Slog.d(TAG, "preempting job: "
                                + activeServices.get(i).getRunningJobLocked());
                    }
                    // preferredUid will be set to uid of currently running job.
                    activeServices.get(i).preemptExecutingJobLocked();//抢占job
                    preservePreferredUid = true;
                } else {
                    final JobStatus pendingJob = contextIdToJobMap[i];
                    if (DEBUG) {
                        Slog.d(TAG, "About to run job on context "
                                + i + ", job: " + pendingJob);
                    }
                    for (int ic=0; ic<controllers.size(); ic++) {
                        controllers.get(ic).prepareForExecutionLocked(pendingJob);//回调prepare
                    }
                    if (!activeServices.get(i).executeRunnableJob(pendingJob)) {//执行job
                        Slog.d(TAG, "Error executing " + pendingJob);
                    }
                    if (pendingJobs.remove(pendingJob)) {//移除job
                        tracker.noteNonpending(pendingJob);
                    }
                }
            }
            if (!preservePreferredUid) {
                activeServices.get(i).clearPreferredUid();
            }
        }
    }
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 219,635评论 6 508
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 93,628评论 3 396
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 165,971评论 0 356
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 58,986评论 1 295
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 68,006评论 6 394
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 51,784评论 1 307
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 40,475评论 3 420
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 39,364评论 0 276
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 45,860评论 1 317
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 38,008评论 3 338
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 40,152评论 1 351
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 35,829评论 5 346
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 41,490评论 3 331
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 32,035评论 0 22
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 33,156评论 1 272
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 48,428评论 3 373
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 45,127评论 2 356