iOS底层-26:GCD底层源码探索(一)

用来探索的源码为libdispatch-1173.60.1

创建队列

dispatch_queue_create开始
1. dispatch_queue_create

  • 搜索dispatch_queue_create
    queue.c中找到源码

    实际上调用的是_dispatch_lane_create_with_target,默认传入一个DISPATCH_TARGET_QUEUE_DEFAULT
  • 点击进入_dispatch_lane_create_with_target
DISPATCH_NOINLINE
static dispatch_queue_t
_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa,
        dispatch_queue_t tq, bool legacy)
{
    // 用dqa初始化dqai
    dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa);


    //根据一些条件,给dqai里的变量赋值
    dispatch_qos_t qos = dqai.dqai_qos;
        //赋值dqai_qos
#if !HAVE_PTHREAD_WORKQUEUE_QOS
    if (qos == DISPATCH_QOS_USER_INTERACTIVE) {
        dqai.dqai_qos = qos = DISPATCH_QOS_USER_INITIATED;
    }
    if (qos == DISPATCH_QOS_MAINTENANCE) {
        dqai.dqai_qos = qos = DISPATCH_QOS_BACKGROUND;
    }
#endif // !HAVE_PTHREAD_WORKQUEUE_QOS
        //赋值dqai_overcommit
    _dispatch_queue_attr_overcommit_t overcommit = dqai.dqai_overcommit;
    if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) {
        if (tq->do_targetq) {
            DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and "
                    "a non-global target queue");
        }
    }

    if (tq && dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
        // Handle discrepancies between attr and target queue, attributes win
        if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
            if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) {
                overcommit = _dispatch_queue_attr_overcommit_enabled;
            } else {
                overcommit = _dispatch_queue_attr_overcommit_disabled;
            }
        }
        if (qos == DISPATCH_QOS_UNSPECIFIED) {
            qos = _dispatch_priority_qos(tq->dq_priority);
        }
        tq = NULL;
    } else if (tq && !tq->do_targetq) {
        // target is a pthread or runloop root queue, setting QoS or overcommit
        // is disallowed
        if (overcommit != _dispatch_queue_attr_overcommit_unspecified) {
            DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute "
                    "and use this kind of target queue");
        }
    } else {
        if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
            // Serial queues default to overcommit!
            overcommit = dqai.dqai_concurrent ?
                    _dispatch_queue_attr_overcommit_disabled :
                    _dispatch_queue_attr_overcommit_enabled;
        }
    }
    if (!tq) {
        tq = _dispatch_get_root_queue(
                qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
                overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq;
        if (unlikely(!tq)) {
            DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
        }
    }
    
        //准备初始化队列
        //创建时legacy传入yes
    if (legacy) {
        // if any of these attributes is specified, use non legacy classes
        if (dqai.dqai_inactive || dqai.dqai_autorelease_frequency) {
            legacy = false;
        }
    }

    //vtable为OS_dispatch_queue_concurrent_class或者OS_dispatch_queue_serial_class地址
    const void *vtable;
    dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
    if (dqai.dqai_concurrent) {
        vtable = DISPATCH_VTABLE(queue_concurrent);
    } else {
        vtable = DISPATCH_VTABLE(queue_serial);
    }
    //处理dqf
    switch (dqai.dqai_autorelease_frequency) {
    case DISPATCH_AUTORELEASE_FREQUENCY_NEVER:
        dqf |= DQF_AUTORELEASE_NEVER;
        break;
    case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM:
        dqf |= DQF_AUTORELEASE_ALWAYS;
        break;
    }
    if (label) {
        const char *tmp = _dispatch_strdup_if_mutable(label);
        if (tmp != label) {
            dqf |= DQF_LABEL_NEEDS_FREE;
            label = tmp;
        }
    }
     //初始化
    dispatch_lane_t dq = _dispatch_object_alloc(vtable,
            sizeof(struct dispatch_lane_s)); // alloc
    //初始化队列
    //width传入1的时候就是串行队列
    _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
            DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
            (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0)); // init
    //label赋值
    dq->dq_label = label;
    //优先级处理
    dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos,
            dqai.dqai_relpri);
    if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
        dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
    }
    if (!dqai.dqai_inactive) {
        _dispatch_queue_priority_inherit_from_target(dq, tq);
        _dispatch_lane_inherit_wlh_from_target(dq, tq);
    }
    _dispatch_retain(tq);
    dq->do_targetq = tq;
    _dispatch_object_debug(dq, "%s", __func__);
    return _dispatch_trace_queue_create(dq)._dq;
}
  • 进入_dispatch_queue_init
void *
_dispatch_object_alloc(const void *vtable, size_t size)
{
#if OS_OBJECT_HAVE_OBJC1
    const struct dispatch_object_vtable_s *_vtable = vtable;
    dispatch_object_t dou;
    dou._os_obj = _os_object_alloc_realized(_vtable->_os_obj_objc_isa, size);
    dou._do->do_vtable = vtable;
    return dou._do;
#else
    return _os_object_alloc_realized(vtable, size);
#endif
}
  • 走_os_object_alloc_realized方法
inline _os_object_t
_os_object_alloc_realized(const void *cls, size_t size)
{
      //开辟一块空间用obj指向他
    _os_object_t obj;
    dispatch_assert(size >= sizeof(struct _os_object_s));
    while (unlikely(!(obj = calloc(1u, size)))) {
        _dispatch_temporary_resource_shortage();
    }
      //obj的isa指向类cls
    obj->os_obj_isa = cls;
    return obj;
}

由此可见,队列queue也是一个对象,由传入的参数dispatch_queue_attr_t attr通过宏定义DISPATCH_VTABLE拼接拿到类地址(OS_dispatch_queue_concurrent_classOS_dispatch_queue_serial_class),通过由attr初始化的dqai.dqai_concurrent决定init时的传入widthDISPATCH_QUEUE_WIDTH_MAX或者是1,由此决定是串行队列还是并行队列。

#define DISPATCH_QUEUE_WIDTH_FULL           0x1000ull
#define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1)
#define DISPATCH_QUEUE_WIDTH_MAX  (DISPATCH_QUEUE_WIDTH_FULL - 2)
p 0x1000ull = 4096 

2. dispatch_get_global_queue全局队列

  • 搜索dispatch_get_global_queue
dispatch_queue_global_t
dispatch_get_global_queue(long priority, unsigned long flags)
{
    dispatch_assert(countof(_dispatch_root_queues) ==
            DISPATCH_ROOT_QUEUE_COUNT);

    if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) {
        return DISPATCH_BAD_INPUT;
    }
    dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority);
#if !HAVE_PTHREAD_WORKQUEUE_QOS
    if (qos == QOS_CLASS_MAINTENANCE) {
        qos = DISPATCH_QOS_BACKGROUND;
    } else if (qos == QOS_CLASS_USER_INTERACTIVE) {
        qos = DISPATCH_QOS_USER_INITIATED;
    }
#endif
    if (qos == DISPATCH_QOS_UNSPECIFIED) {
        return DISPATCH_BAD_INPUT;
    }                                               //0x2ull = 2
    return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
}
  • 搜索_dispatch_get_root_queue
DISPATCH_ALWAYS_INLINE DISPATCH_CONST
static inline dispatch_queue_global_t
_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
{
    if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) {
        DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
    }
    return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
}

直接从全局队列数组里拿出一个。由此可见,全局队列是由系统早就创建好的,统一保存,当我们需要使用的时候,系统会从容器中拿出一个供我们使用。

3. dispatch_get_main_queue 主队列

  • 主队列在libdispatch_init()时已经生成了
#define _dispatch_get_default_queue(overcommit) \
        _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS + \
                !!(overcommit)]._as_dq

主队列也是在_dispatch_root_queues数组中直接获取的

  • 搜索dispatch_get_main_queue
#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object))  
//将object强转为type类型

dispatch_queue_main_t
dispatch_get_main_queue(void)
{
    //直接返回主队列
    return DISPATCH_GLOBAL_OBJECT(dispatch_queue_main_t, _dispatch_main_q);
}

同步和异步函数

1.异步函数原理
  • 搜索dispatch_async(dis,第一个参数为dispatch_queue_t
void
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
{
    dispatch_continuation_t dc = _dispatch_continuation_alloc();
    uintptr_t dc_flags = DC_FLAG_CONSUME;
    dispatch_qos_t qos;

    // 任务包装器 - 接受 - 保存 - 函数式
    // 保存 block 
    qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
    _dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}

_dispatch_continuation_initblock包装进qos

  • 搜索_dispatch_continuation_init(dis
static inline dispatch_qos_t
_dispatch_continuation_init(dispatch_continuation_t dc,
        dispatch_queue_class_t dqu, dispatch_block_t work,
        dispatch_block_flags_t flags, uintptr_t dc_flags)
{
    //把传进来的work拷贝一份
    void *ctxt = _dispatch_Block_copy(work);

    dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
    //unlikely表示可能性不大
    if (unlikely(_dispatch_block_has_private_data(work))) {
        dc->dc_flags = dc_flags;
        dc->dc_ctxt = ctxt;
        // will initialize all fields but requires dc_flags & dc_ctxt to be set
        return _dispatch_continuation_init_slow(dc, dqu, flags);
    }
     //将work转换为 dispatch_function_t func
    dispatch_function_t func = _dispatch_Block_invoke(work);
    if (dc_flags & DC_FLAG_CONSUME) {
        //执行block并且释放block
        func = _dispatch_call_block_and_release;
    }
    //用ctxt、func初始化一个dispatch_qos_t 对象,保存这些信息
    return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
}
  • 搜索_dispatch_continuation_init_f(dis
static inline dispatch_qos_t
_dispatch_continuation_init_f(dispatch_continuation_t dc,
        dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t f,
        dispatch_block_flags_t flags, uintptr_t dc_flags)
{     
    //将func、ctxt等保存到dc中
    pthread_priority_t pp = 0;
    dc->dc_flags = dc_flags | DC_FLAG_ALLOCATED;
    dc->dc_func = f;
    dc->dc_ctxt = ctxt;
    // in this context DISPATCH_BLOCK_HAS_PRIORITY means that the priority
    // should not be propagated, only taken from the handler if it has one
    if (!(flags & DISPATCH_BLOCK_HAS_PRIORITY)) {
        pp = _dispatch_priority_propagate();
    }
    //设置dc->dc_voucher
    _dispatch_continuation_voucher_set(dc, flags);
    //返回一个dispatch_qos_t
    return _dispatch_continuation_priority_set(dc, dqu, pp, flags);
}

这些就是一系列保存任务的函数。下面我们分析一下_dispatch_continuation_async

  • 搜索_dispatch_continuation_async(dis
static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
        dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
    if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
        //跟踪轨迹
        _dispatch_trace_item_push(dqu, dc);
    }
#else
    (void)dc_flags;
#endif
    return dx_push(dqu._dq, dc, qos);
}

主要函数为dx_push

  • 搜索dx_push在宏定义中找到


    我们关注的是dq_push做了什么?

  • 搜索dq_push


    在这里我们看到了很多DISPATCH_VTABLE_SUBCLASS_INSTANCE,他们都是vtable实例,而vtable是根据队列而来的。

    一般我们传入的是并行队列DISPATCH_QUEUE_CONCURRENT

dq_push实际执行的函数为_dispatch_lane_concurrent_push

  • 搜索_dispatch_lane_concurrent_push
void
_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou,
        dispatch_qos_t qos)
{
    // <rdar://problem/24738102&24743140> reserving non barrier width
    // doesn't fail if only the ENQUEUED bit is set (unlike its barrier
    // width equivalent), so we have to check that this thread hasn't
    // enqueued anything ahead of this call or we can break ordering
    if (dq->dq_items_tail == NULL &&
            !_dispatch_object_is_waiter(dou) &&
            !_dispatch_object_is_barrier(dou) &&
            _dispatch_queue_try_acquire_async(dq)) {
        //递归处理 dq->do_targetq
        return _dispatch_continuation_redirect_push(dq, dou, qos);
    }
        //重点方法
    _dispatch_lane_push(dq, dou, qos);
}

中间有一步递归处理,当前传入的是DISPATCH_QUEUE_CONCURRENT队列,创建队列的时候有一个公共参数,所有的queue->do_targetq都指向DISPATCH_TARGET_QUEUE_DEFAULT,递归第二次就会处理这个queue_default类似于类的加载递归处理父类和元类

  • _dispatch_continuation_redirect_push方法
DISPATCH_NOINLINE
static void
_dispatch_continuation_redirect_push(dispatch_lane_t dl,
        dispatch_object_t dou, dispatch_qos_t qos)
{
    if (likely(!_dispatch_object_is_redirection(dou))) {
        
        dou._dc = _dispatch_async_redirect_wrap(dl, dou);
    } else if (!dou._dc->dc_ctxt) {
    
        dou._dc->dc_ctxt = (void *)
        (uintptr_t)_dispatch_queue_autorelease_frequency(dl);
    }

    dispatch_queue_t dq = dl->do_targetq;
    if (!qos) qos = _dispatch_priority_qos(dq->dq_priority);
    dx_push(dq, dou, qos);
}
  • 由于他的父类是DISPATCH_TARGET_QUEUE_DEFAULT,我们搜索dq_push的时候并没有看到这个类型。源码中有一个queue_pthread_root,我们打上符号断点,看_dispatch_root_queue_push是否会在_dispatch_continuation_redirect_push方法之后调用。

  • 添加_dispatch_continuation_redirect_push_dispatch_root_queue_push两个符号断点
    _dispatch_continuation_redirect_push先调用

_dispatch_root_queue_push后调用

创建异步函数的时候_dispatch_root_queue_push确实是会后调用,我们猜测他是做底层的初始化工作。

  • 查看_dispatch_root_queue_push源码
DISPATCH_NOINLINE
void
_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou,
        dispatch_qos_t qos)
{
#if DISPATCH_USE_KEVENT_WORKQUEUE
    dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
    if (unlikely(ddi && ddi->ddi_can_stash)) {
        dispatch_object_t old_dou = ddi->ddi_stashed_dou;
        dispatch_priority_t rq_overcommit;
        rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;

        if (likely(!old_dou._do || rq_overcommit)) {
            dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq;
            dispatch_qos_t old_qos = ddi->ddi_stashed_qos;
            ddi->ddi_stashed_rq = rq;
            ddi->ddi_stashed_dou = dou;
            ddi->ddi_stashed_qos = qos;
            _dispatch_debug("deferring item %p, rq %p, qos %d",
                    dou._do, rq, qos);
            if (rq_overcommit) {
                ddi->ddi_can_stash = false;
            }
            if (likely(!old_dou._do)) {
                return;
            }
            // push the previously stashed item
            qos = old_qos;
            rq = old_rq;
            dou = old_dou;
        }
    }
#endif
//上面是一些赋值处理
#if HAVE_PTHREAD_WORKQUEUE_QOS
    if (_dispatch_root_queue_push_needs_override(rq, qos)) {
        return _dispatch_root_queue_push_override(rq, dou, qos);
    }
#else
    (void)qos;
#endif
    _dispatch_root_queue_push_inline(rq, dou, dou, 1);
}

在这里主要是两个方法,看是执行_dispatch_root_queue_push_override还是_dispatch_root_queue_push_inline,同上增加两个符号断点。

由图可见,执行的是_dispatch_root_queue_push_override方法。

  • 查看_dispatch_root_queue_push_override
DISPATCH_NOINLINE
static void
_dispatch_root_queue_push_override(dispatch_queue_global_t orig_rq,
        dispatch_object_t dou, dispatch_qos_t qos)
{
    bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
    dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit);
    dispatch_continuation_t dc = dou._dc;

    if (_dispatch_object_is_redirection(dc)) {
        // no double-wrap is needed, _dispatch_async_redirect_invoke will do
        // the right thing
        dc->dc_func = (void *)orig_rq;
    } else {
        dc = _dispatch_continuation_alloc();
        dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING);
        dc->dc_ctxt = dc;
        dc->dc_other = orig_rq;
        dc->dc_data = dou._do;
        dc->dc_priority = DISPATCH_NO_PRIORITY;
        dc->dc_voucher = DISPATCH_NO_VOUCHER;
    }
    _dispatch_root_queue_push_inline(rq, dc, dc, 1);
}

我们发现,还是要执行_dispatch_root_queue_push_inline

  • 查看_dispatch_root_queue_push_inline
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_root_queue_push_inline(dispatch_queue_global_t dq,
        dispatch_object_t _head, dispatch_object_t _tail, int n)
{
    struct dispatch_object_s *hd = _head._do, *tl = _tail._do;
    if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) {
        return _dispatch_root_queue_poke(dq, n, 0);
    }
}


_dispatch_root_queue_poke方法执行

  • 查看_dispatch_root_queue_poke
DISPATCH_NOINLINE
void
_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor)
{
    if (!_dispatch_queue_class_probe(dq)) {
        return;
    }
#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL
    if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE))
#endif
    {
        if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) {
            _dispatch_root_queue_debug("worker thread request still pending "
                    "for global queue: %p", dq);
            return;
        }
    }
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
    return _dispatch_root_queue_poke_slow(dq, n, floor);
}

  • 查看_dispatch_root_queue_poke_slow
DISPATCH_NOINLINE
static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
    int remaining = n;
    int r = ENOSYS;
       //重点方法
    _dispatch_root_queues_init();
    _dispatch_debug_root_queue(dq, __func__);
    _dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n);

#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
    if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)
#endif
    {//如果是GLOBAL_ROOT_TYPE,工作队列添加
        _dispatch_root_queue_debug("requesting new worker thread for global "
                "queue: %p", dq);
        r = _pthread_workqueue_addthreads(remaining,
                _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority));
        (void)dispatch_assume_zero(r);
        return;
    }
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL
    //计算还需要开辟的线程数量
    dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt;
    if (likely(pqc->dpq_thread_mediator.do_vtable)) {
        while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) {
            _dispatch_root_queue_debug("signaled sleeping worker for "
                    "global queue: %p", dq);
            if (!--remaining) {
                return;
            }
        }
    }

    bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
    if (overcommit) {
        os_atomic_add2o(dq, dgq_pending, remaining, relaxed);
    } else {
        if (!os_atomic_cmpxchg2o(dq, dgq_pending, 0, remaining, relaxed)) {
            _dispatch_root_queue_debug("worker thread request still pending for "
                    "global queue: %p", dq);
            return;
        }
    }

    int can_request, t_count;
    // seq_cst with atomic store to tail <rdar://problem/16932833>
    t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered);
    do {
        can_request = t_count < floor ? 0 : t_count - floor;
        if (remaining > can_request) {
            _dispatch_root_queue_debug("pthread pool reducing request from %d to %d",
                    remaining, can_request);
            os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed);
            remaining = can_request;
        }
        if (remaining == 0) {
            _dispatch_root_queue_debug("pthread pool is full for root queue: "
                    "%p", dq);
            return;
        }
    } while (!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count,
            t_count - remaining, &t_count, acquire));

#if !defined(_WIN32)
    pthread_attr_t *attr = &pqc->dpq_thread_attr;
    pthread_t tid, *pthr = &tid;
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
    if (unlikely(dq == &_dispatch_mgr_root_queue)) {
        pthr = _dispatch_mgr_root_queue_init();
    }
#endif
    do {//根据remaining数目,创建线程
        _dispatch_retain(dq); // released in _dispatch_worker_thread
        while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
            if (r != EAGAIN) {
                (void)dispatch_assume_zero(r);
            }
            _dispatch_temporary_resource_shortage();
        }
    } while (--remaining);
#else // defined(_WIN32)
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
    if (unlikely(dq == &_dispatch_mgr_root_queue)) {
        _dispatch_mgr_root_queue_init();
    }
#endif
    do {
        _dispatch_retain(dq); // released in _dispatch_worker_thread
#if DISPATCH_DEBUG
        unsigned dwStackSize = 0;
#else
        unsigned dwStackSize = 64 * 1024;
#endif
        uintptr_t hThread = 0;
        while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) {
            if (errno != EAGAIN) {
                (void)dispatch_assume(hThread);
            }
            _dispatch_temporary_resource_shortage();
        }
        if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) {
            (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE);
        }
        CloseHandle((HANDLE)hThread);
    } while (--remaining);
#endif // defined(_WIN32)
#else
    (void)floor;
#endif // DISPATCH_USE_PTHREAD_POOL
}

我们可以看到这里创建了线程

  • 查看_dispatch_root_queues_init
static inline void
_dispatch_root_queues_init(void)
{//底层的单例 执行一次
    dispatch_once_f(&_dispatch_root_queues_pred, NULL,
            _dispatch_root_queues_init_once);
}

单例调用一次_dispatch_root_queues_init_once

  • 查看_dispatch_root_queues_init_once

    _dispatch_worker_thread2回调函数设置进去了,当任务调用的时候就会走_dispatch_worker_thread2回调
  • 搜索_dispatch_lane_push
void
_dispatch_lane_push(dispatch_lane_t dq, dispatch_object_t dou,
        dispatch_qos_t qos)
{
    dispatch_wakeup_flags_t flags = 0;
    struct dispatch_object_s *prev;
     //判断当前队列是否等待
    if (unlikely(_dispatch_object_is_waiter(dou))) {
        return _dispatch_lane_push_waiter(dq, dou._dsc, qos);
    }

    dispatch_assert(!_dispatch_object_is_global(dq));
    qos = _dispatch_queue_push_qos(dq, qos);

    prev = os_mpsc_push_update_tail(os_mpsc(dq, dq_items), dou._do, do_next);
    if (unlikely(os_mpsc_push_was_empty(prev))) {
        _dispatch_retain_2_unsafe(dq);
        flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY;
    } else if (unlikely(_dispatch_queue_need_override(dq, qos))) {
        
        _dispatch_retain_2_unsafe(dq);
        flags = DISPATCH_WAKEUP_CONSUME_2;
    }
    os_mpsc_push_update_prev(os_mpsc(dq, dq_items), prev, dou._do, do_next);
    if (flags) {
        return dx_wakeup(dq, qos, flags);
    }
}

主要是调用dx_wakeup方法

  • 搜索dx_wakeup

  • 搜索dq_wakeup

  • 搜索_dispatch_lane_wakeup

DISPATCH_NOINLINE
void
_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos,
        dispatch_wakeup_flags_t flags)
{
    dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;

    if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
        return _dispatch_lane_barrier_complete(dqu, qos, flags);
    }
    if (_dispatch_queue_class_probe(dqu)) {
        target = DISPATCH_QUEUE_WAKEUP_TARGET;
    }
    return _dispatch_queue_wakeup(dqu, qos, flags, target);
}

wakeup方法主要作用是唤醒队列。

那么异步函数里的block是何时进行调用的呢?
我们在block中打一个断点,通过bt查看堆栈:


可以看到调用顺序如下:

  1. libsystem_pthread.dylib start_wqthread + 15
  2. libsystem_pthread.dylib _pthread_wqthread + 220
  3. libdispatch.dylib _dispatch_worker_thread2 + 135
  4. libdispatch.dylib _dispatch_root_queue_drain + 351
  5. libdispatch.dylib _dispatch_async_redirect_invoke + 779
  6. libdispatch.dylib _dispatch_continuation_pop + 557
  7. libdispatch.dylib _dispatch_client_callout + 8
  8. libdispatch.dylib _dispatch_call_block_and_release + 12
  9. -[ViewController viewDidLoad]_block_invoke(.block_descriptor=0x000000010a7dd048) at ViewController.m:25:9
  • 搜索_dispatch_worker_thread2

  • 搜索_dispatch_root_queue_drain


    然而在这个函数中,我们并没有看见_dispatch_async_redirect_invoke的调用,创建的一个dispatch_invoke_context_sdic变量通过_dispatch_continuation_pop_inline调用了。

  • 搜索进入_dispatch_continuation_pop_inline

static inline void
_dispatch_continuation_pop_inline(dispatch_object_t dou,
        dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
        dispatch_queue_class_t dqu)
{      
    //创建一个观察者
    dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
            _dispatch_get_pthread_root_queue_observer_hooks();
    if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq);
    flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
    if (_dispatch_object_has_vtable(dou)) {
        dx_invoke(dou._dq, dic, flags);
    } else {
        _dispatch_continuation_invoke_inline(dou, flags, dqu);
    }
    if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq);
}
  • 搜索dx_invoke
#define dx_invoke(x, y, z) dx_vtable(x)->do_invoke(x, y, z)
  • 搜索do_invoke,因为开头传入的参数是一个dispatch_queue_global_t

    image.png

  • 搜索_dispatch_object_no_invoke

#define dx_type(x) dx_vtable(x)->do_type
#define DISPATCH_INTERNAL_CRASH(c, x) do { \
        _dispatch_set_crash_log_cause_and_message((c), \
                "BUG IN LIBDISPATCH: " x); \
        _dispatch_hardware_crash(); \
    } while (0)

DISPATCH_NOINLINE
static void
_dispatch_object_no_invoke(dispatch_object_t dou,
        DISPATCH_UNUSED dispatch_invoke_context_t dic,
        DISPATCH_UNUSED dispatch_invoke_flags_t flags)
{
    DISPATCH_INTERNAL_CRASH(dx_type(dou._do), "do_invoke called");
}
  • 搜索DISPATCH_QUEUE_GLOBAL_ROOT_TYPE
    DISPATCH_QUEUE_GLOBAL_ROOT_TYPE     = DISPATCH_OBJECT_SUBTYPE(3, LANE) |
            _DISPATCH_QUEUE_ROOT_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG

我们发现这只是打印log,那么堆栈上的_dispatch_async_redirect_invoke函数到底是何时调用的呢?
我们使用符号断点,一步步调试。

  • _dispatch_continuation_pop_inline,开始


    运行发现_dispatch_continuation_pop_inline根本就没有调用。

  • 直接添加_dispatch_async_redirect_invoke符号断点


    断点确实进来了

  • 查看_dispatch_async_redirect_invoke源码

调用_dispatch_continuation_pop方法

  • 查看_dispatch_continuation_pop
void
_dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic,
        dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu)
{
    _dispatch_continuation_pop_inline(dou, dic, flags, dqu._dq);
}
  • 查看_dispatch_continuation_pop_inline
DISPATCH_ALWAYS_INLINE_NDEBUG
static inline void
_dispatch_continuation_pop_inline(dispatch_object_t dou,
        dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
        dispatch_queue_class_t dqu)
{
    dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
            _dispatch_get_pthread_root_queue_observer_hooks();
    if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq);
    flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
    if (_dispatch_object_has_vtable(dou)) {
        dx_invoke(dou._dq, dic, flags);
    } else {
        _dispatch_continuation_invoke_inline(dou, flags, dqu);
    }
    if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq);
}

又回到了_dispatch_continuation_pop_inline这个方法,这时候if条件里走的其实是_dispatch_continuation_invoke_inline

  • 查看_dispatch_continuation_invoke_inline
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_invoke_inline(dispatch_object_t dou,
        dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu)
{
    dispatch_continuation_t dc = dou._dc, dc1;
    dispatch_invoke_with_autoreleasepool(flags, {
        uintptr_t dc_flags = dc->dc_flags;
        
        _dispatch_continuation_voucher_adopt(dc, dc_flags);
        if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
            _dispatch_trace_item_pop(dqu, dou);
        }
        if (dc_flags & DC_FLAG_CONSUME) {
            dc1 = _dispatch_continuation_free_cacheonly(dc);
        } else {
            dc1 = NULL;
        }
        if (unlikely(dc_flags & DC_FLAG_GROUP_ASYNC)) {
            _dispatch_continuation_with_group_invoke(dc);
        } else {
            //调用保存的函数 block
            _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
            _dispatch_trace_item_complete(dc);
        }
        if (unlikely(dc1)) {
            _dispatch_continuation_free_to_cache_limit(dc1);
        }
    });
    _dispatch_perfmon_workitem_inc();
}

此时,创建异步函数保存的block将会被调用

2.同步函数原理
  • 搜索dispatch_sync(dis
DISPATCH_NOINLINE
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
    uintptr_t dc_flags = DC_FLAG_BLOCK;
    if (unlikely(_dispatch_block_has_private_data(work))) {
        //对私有数据处理
        return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
    }
    //重点方法  
    _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
  • 搜索_dispatch_sync_f(dis
DISPATCH_NOINLINE
static void
_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
        uintptr_t dc_flags)
{
    _dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}
  • 搜索_dispatch_sync_f_inline(dis
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    //串行队列走这里
    if (likely(dq->dq_width == 1)) {
        return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
    }
    
    if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
        DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
    }

    dispatch_lane_t dl = upcast(dq)._dl;
    //并发队列会走这个慢的方法_dispatch_sync_f_slow
        //死锁
    if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
        return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
    }

    if (unlikely(dq->do_targetq->do_targetq)) {
        return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
    }
    //这里的处理和串行队列一样
    _dispatch_introspection_sync_begin(dl);
    _dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
            _dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}

串行队列走的是栅栏函数的方法_dispatch_barrier_sync_f
下面我们分为两步:

  1. _dispatch_barrier_sync_f
  2. 死锁

1._dispatch_barrier_sync_f

  • 搜索_dispatch_barrier_sync_f(dis
DISPATCH_NOINLINE
static void
_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    _dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
}
  • 搜索_dispatch_barrier_sync_f_inline(dis
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    //拿到线程的tid,每个线程都有
    dispatch_tid tid = _dispatch_tid_self();
   
    if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
        DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
    }

    dispatch_lane_t dl = upcast(dq)._dl;
     //堵塞线程
    if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
              //死锁处理
        return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
                DC_FLAG_BARRIER | dc_flags);
    }

    if (unlikely(dl->do_targetq->do_targetq)) {
        return _dispatch_sync_recurse(dl, ctxt, func,
                DC_FLAG_BARRIER | dc_flags);
    }
    //正常情况会走到这里
    _dispatch_introspection_sync_begin(dl);
    _dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
            DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
                    dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}
  • 搜索_dispatch_introspection_sync_begin
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_introspection_sync_begin(dispatch_queue_class_t dq)
{
    if (!_dispatch_introspection.debug_queue_inversions) return;
    //排序
    _dispatch_introspection_order_record(dq._dq);
}
  • 搜索_dispatch_introspection_order_record
void
_dispatch_introspection_order_record(dispatch_queue_t top_q)
{
    dispatch_queue_t bottom_q = _dispatch_queue_get_current();
    dispatch_queue_order_entry_t e, it;
    const int pcs_skip = 1, pcs_n_max = 128;
    void *pcs[pcs_n_max];
    int pcs_n;

    if (!bottom_q || !bottom_q->do_targetq || !top_q->do_targetq) {
        return;
    }

    dispatch_queue_t top_tq = _dispatch_queue_bottom_target_queue(top_q);
    dispatch_queue_t bottom_tq = _dispatch_queue_bottom_target_queue(bottom_q);
    dispatch_queue_introspection_context_t ttqic = top_tq->do_finalizer;
    dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer;

    _dispatch_unfair_lock_lock(&ttqic->dqic_order_top_head_lock);
    LIST_FOREACH(it, &ttqic->dqic_order_top_head, dqoe_order_top_list) {
        if (it->dqoe_bottom_tq == bottom_tq) {
            // that dispatch_sync() is known and validated
            // move on
            _dispatch_unfair_lock_unlock(&ttqic->dqic_order_top_head_lock);
            return;
        }
    }
    _dispatch_unfair_lock_unlock(&ttqic->dqic_order_top_head_lock);

    _dispatch_introspection_order_check(NULL, top_q, top_tq, bottom_q, bottom_tq);
    pcs_n = MAX(backtrace(pcs, pcs_n_max) - pcs_skip, 0);

    bool copy_top_label = false, copy_bottom_label = false;
    size_t size = sizeof(struct dispatch_queue_order_entry_s)
            + (size_t)pcs_n * sizeof(void *);

    if (_dispatch_queue_label_needs_free(top_q)) {
        size += strlen(top_q->dq_label) + 1;
        copy_top_label = true;
    }
    if (_dispatch_queue_label_needs_free(bottom_q)) {
        size += strlen(bottom_q->dq_label) + 1;
        copy_bottom_label = true;
    }

    e = _dispatch_calloc(1, size);
    e->dqoe_top_tq = top_tq;
    e->dqoe_bottom_tq = bottom_tq;
    e->dqoe_pcs_n = pcs_n;
    memcpy(e->dqoe_pcs, pcs + pcs_skip, (size_t)pcs_n * sizeof(void *));
    // and then lay out the names of the queues at the end
    char *p = (char *)(e->dqoe_pcs + pcs_n);
    if (copy_top_label) {
        e->dqoe_top_label = strcpy(p, top_q->dq_label);
        p += strlen(p) + 1;
    } else {
        e->dqoe_top_label = top_q->dq_label ?: "";
    }
    if (copy_bottom_label) {
        e->dqoe_bottom_label = strcpy(p, bottom_q->dq_label);
    } else {
        e->dqoe_bottom_label = bottom_q->dq_label ?: "";
    }

    _dispatch_unfair_lock_lock(&ttqic->dqic_order_top_head_lock);
    LIST_FOREACH(it, &ttqic->dqic_order_top_head, dqoe_order_top_list) {
        if (unlikely(it->dqoe_bottom_tq == bottom_tq)) {
            // someone else validated it at the same time
            // go away quickly
            _dispatch_unfair_lock_unlock(&ttqic->dqic_order_top_head_lock);
            free(e);
            return;
        }
    }
    LIST_INSERT_HEAD(&ttqic->dqic_order_top_head, e, dqoe_order_top_list);
    _dispatch_unfair_lock_unlock(&ttqic->dqic_order_top_head_lock);

    _dispatch_unfair_lock_lock(&btqic->dqic_order_bottom_head_lock);
    LIST_INSERT_HEAD(&btqic->dqic_order_bottom_head, e, dqoe_order_bottom_list);
    _dispatch_unfair_lock_unlock(&btqic->dqic_order_bottom_head_lock);
}

做一些数据的处理工作,准备工作

  • 搜索_dispatch_lane_barrier_sync_invoke_and_complete
DISPATCH_NOINLINE
static void
_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq,
        void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc))
{
    //方法调用
    _dispatch_sync_function_invoke_inline(dq, ctxt, func);
    //trace记录
    _dispatch_trace_item_complete(dc);
    if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) {
        return _dispatch_lane_barrier_complete(dq, 0, 0);
    }

    const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK |
            DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY |
            DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER |
            DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
    uint64_t old_state, new_state;

    //任务执行完毕对线程释放 因为同步函数是堵塞线程的
    os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
        new_state  = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
        new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
        new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
        if (unlikely(old_state & fail_unlock_mask)) {
            os_atomic_rmw_loop_give_up({
                return _dispatch_lane_barrier_complete(dq, 0, 0);
            });
        }
    });
    if (_dq_state_is_base_wlh(old_state)) {
        _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
    }
}
  • 进入_dispatch_sync_function_invoke_inline
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt,
        dispatch_function_t func)
{
    dispatch_thread_frame_s dtf;
    //push进去
    _dispatch_thread_frame_push(&dtf, dq);
    //执行func
    _dispatch_client_callout(ctxt, func);
    _dispatch_perfmon_workitem_inc();
    //执行完毕pop出来
    _dispatch_thread_frame_pop(&dtf);
}
  • 搜索_dispatch_queue_try_acquire_barrier_sync
DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
static inline bool
_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_class_t dq, uint32_t tid)
{
    return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq._dl, tid, 0);
}
  • 搜索_dispatch_queue_try_acquire_barrier_sync_and_suspend

    在底层放弃掉其他任务执行,这就是造成同步的根本原因。

2.死锁_dispatch_sync_f_slow

  • 搜索_dispatch_sync_f_slow
DISPATCH_NOINLINE
static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
        dispatch_function_t func, uintptr_t top_dc_flags,
        dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
    dispatch_queue_t top_dq = top_dqu._dq;
    dispatch_queue_t dq = dqu._dq;
    if (unlikely(!dq->do_targetq)) {
        return _dispatch_sync_function_invoke(dq, ctxt, func);
    }

    pthread_priority_t pp = _dispatch_get_priority();
    //封装传进来的block
    struct dispatch_sync_context_s dsc = {
        .dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
        .dc_func     = _dispatch_async_and_wait_invoke,
        .dc_ctxt     = &dsc,
        .dc_other    = top_dq,
        .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
        .dc_voucher  = _voucher_get(),
        .dsc_func    = func,
        .dsc_ctxt    = ctxt,
        .dsc_waiter  = _dispatch_tid_self(),
    };
    //把任务加入到队列
    _dispatch_trace_item_push(top_dq, &dsc);
    //比较两个队列的tid 如果相同 相互等待 造成死锁
    __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);

    if (dsc.dsc_func == NULL) {
        // dsc_func being cleared means that the block ran on another thread ie.
        // case (2) as listed in _dispatch_async_and_wait_f_slow.
        dispatch_queue_t stop_dq = dsc.dc_other;
        return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
    }

    _dispatch_introspection_sync_begin(top_dq);
    _dispatch_trace_item_pop(top_dq, &dsc);
    _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
            DISPATCH_TRACE_ARG(&dsc));
}
  • 搜索__DISPATCH_WAIT_FOR_QUEUE__
    image.png
  • 搜索_dq_state_drain_locked_by
  • 搜索_dispatch_lock_is_locked_by
#define DLOCK_OWNER_MASK            ((dispatch_lock)0xfffffffc)

DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{
    //判断当前队列 与 要等待的队列 tid是否相同
    return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}

tid相同时,异或结果为0,与上任意数都为0__DISPATCH_WAIT_FOR_QUEUE__里面的判断即为true,程序crash,造成死锁。

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 212,080评论 6 493
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 90,422评论 3 385
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 157,630评论 0 348
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 56,554评论 1 284
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 65,662评论 6 386
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 49,856评论 1 290
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 39,014评论 3 408
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 37,752评论 0 268
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 44,212评论 1 303
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 36,541评论 2 327
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 38,687评论 1 341
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 34,347评论 4 331
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,973评论 3 315
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 30,777评论 0 21
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,006评论 1 266
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 46,406评论 2 360
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 43,576评论 2 349

推荐阅读更多精彩内容