/linux/net/ipv6/ |
A D | protocol.c | 30 return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], in inet6_add_protocol() 39 ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], in inet6_del_protocol() 54 return !cmpxchg((const struct net_offload **)&inet6_offloads[protocol], in inet6_add_offload() 63 ret = (cmpxchg((const struct net_offload **)&inet6_offloads[protocol], in inet6_del_offload()
|
A D | ip6_icmp.c | 18 return (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, NULL, fn) == NULL) ? in inet6_register_icmp_sender() 27 ret = (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, fn, NULL) == fn) ? in inet6_unregister_icmp_sender()
|
/linux/net/ipv4/ |
A D | protocol.c | 34 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol], in inet_add_protocol() 41 return !cmpxchg((const struct net_offload **)&inet_offloads[protocol], in inet_add_offload() 50 ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol], in inet_del_protocol() 63 ret = (cmpxchg((const struct net_offload **)&inet_offloads[protocol], in inet_del_offload()
|
A D | gre_demux.c | 36 return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ? in gre_add_protocol() 48 ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ? in gre_del_protocol()
|
/linux/kernel/ |
A D | task_work.c | 45 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add() 93 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel_match() 150 } while (cmpxchg(&task->task_works, work, head) != work); in task_work_run()
|
/linux/tools/include/asm-generic/ |
A D | atomic-gcc.h | 64 #define cmpxchg(ptr, oldval, newval) \ macro 69 return cmpxchg(&(v)->counter, oldval, newval); in atomic_cmpxchg()
|
/linux/drivers/gpu/drm/ |
A D | drm_lock.c | 75 prev = cmpxchg(lock, old, new); in drm_lock_take() 118 prev = cmpxchg(lock, old, new); in drm_lock_transfer() 141 prev = cmpxchg(lock, old, new); in drm_legacy_lock_free() 319 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); in drm_legacy_idlelock_release()
|
/linux/lib/ |
A D | llist.c | 33 } while (cmpxchg(&head->first, first, new_first) != first); in llist_add_batch() 63 entry = cmpxchg(&head->first, old_entry, next); in llist_del_first()
|
A D | errseq.c | 95 cur = cmpxchg(eseq, old, new); in errseq_set() 201 cmpxchg(eseq, old, new); in errseq_check_and_advance()
|
/linux/net/rxrpc/ |
A D | call_event.c | 343 cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call() 350 cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call() 357 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call() 366 cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call() 373 cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call()
|
/linux/arch/ia64/include/uapi/asm/ |
A D | cmpxchg.h | 133 #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) macro 136 #define cmpxchg_local cmpxchg
|
/linux/drivers/accessibility/speakup/ |
A D | selection.c | 71 if (cmpxchg(&speakup_sel_work.tty, NULL, tty)) { in speakup_set_selection() 124 if (cmpxchg(&speakup_paste_work.tty, NULL, tty)) { in speakup_paste_selection()
|
/linux/arch/sh/kernel/cpu/sh2/ |
A D | smp-j2.c | 28 while (cmpxchg(pmsg, messages, 0) != messages); in j2_ipi_interrupt_handler() 118 while (cmpxchg(pmsg, old, old|(1U<<message)) != old); in j2_send_ipi()
|
/linux/Documentation/locking/ |
A D | rt-mutex.rst | 42 without waiters. The optimized fastpath operations require cmpxchg 66 with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 72 To prevent a cmpxchg of the owner releasing the lock, we need to
|
/linux/drivers/dma-buf/ |
A D | dma-fence-array.c | 36 cmpxchg(&array->base.error, PENDING_ERROR, error); in dma_fence_array_set_pending_error() 42 cmpxchg(&array->base.error, PENDING_ERROR, 0); in dma_fence_array_clear_pending_error()
|
/linux/kernel/trace/ |
A D | trace_recursion_record.c | 78 old = cmpxchg(&recursed_functions[index].ip, 0, ip); in ftrace_record_recursion() 112 cmpxchg(&recursed_functions[index].ip, ip, 0); in ftrace_record_recursion()
|
/linux/fs/nfs/ |
A D | nfs3acl.c | 24 if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED) { in nfs3_prepare_get_acl() 35 if (cmpxchg(p, sentinel, acl) != sentinel) in nfs3_complete_get_acl() 44 cmpxchg(p, sentinel, ACL_NOT_CACHED); in nfs3_abort_get_acl()
|
/linux/Documentation/virt/kvm/ |
A D | locking.rst | 66 On fast page fault path, we will use cmpxchg to atomically set the spte W 69 changing these bits can be detected by cmpxchg. 76 is not changed during cmpxchg. This is a ABA problem, for example, below case 108 | if (cmpxchg(spte, old_spte, old_spte+W) | 119 kvm_vcpu_gfn_to_pfn_atomic, before the cmpxchg. After the pinning:
|
/linux/arch/nios2/include/asm/ |
A D | Kbuild | 2 generic-y += cmpxchg.h
|
/linux/arch/h8300/include/asm/ |
A D | Kbuild | 3 generic-y += cmpxchg.h
|
/linux/arch/nds32/include/asm/ |
A D | Kbuild | 3 generic-y += cmpxchg.h
|
/linux/arch/microblaze/include/asm/ |
A D | Kbuild | 3 generic-y += cmpxchg.h
|
/linux/tools/arch/x86/include/asm/ |
A D | atomic.h | 70 return cmpxchg(&v->counter, old, new); in atomic_cmpxchg()
|
/linux/arch/s390/include/asm/ |
A D | percpu.h | 38 prev__ = cmpxchg(ptr__, old__, new__); \ 141 ret__ = cmpxchg(ptr__, oval, nval); \
|
/linux/include/linux/ |
A D | bitops.h | 317 } while (cmpxchg(ptr, old__, new__) != old__); \ 333 cmpxchg(ptr, old__, new__) != old__); \
|