Home
last modified time | relevance | path

Searched refs:cmpxchg (Results 1 – 25 of 3550) sorted by relevance

12345678910>>...142

/linux/net/ipv6/
A Dprotocol.c30 return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], in inet6_add_protocol()
39 ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], in inet6_del_protocol()
54 return !cmpxchg((const struct net_offload **)&inet6_offloads[protocol], in inet6_add_offload()
63 ret = (cmpxchg((const struct net_offload **)&inet6_offloads[protocol], in inet6_del_offload()
A Dip6_icmp.c18 return (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, NULL, fn) == NULL) ? in inet6_register_icmp_sender()
27 ret = (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, fn, NULL) == fn) ? in inet6_unregister_icmp_sender()
/linux/net/ipv4/
A Dprotocol.c34 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol], in inet_add_protocol()
41 return !cmpxchg((const struct net_offload **)&inet_offloads[protocol], in inet_add_offload()
50 ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol], in inet_del_protocol()
63 ret = (cmpxchg((const struct net_offload **)&inet_offloads[protocol], in inet_del_offload()
A Dgre_demux.c36 return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ? in gre_add_protocol()
48 ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ? in gre_del_protocol()
/linux/kernel/
A Dtask_work.c45 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add()
93 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel_match()
150 } while (cmpxchg(&task->task_works, work, head) != work); in task_work_run()
/linux/tools/include/asm-generic/
A Datomic-gcc.h64 #define cmpxchg(ptr, oldval, newval) \ macro
69 return cmpxchg(&(v)->counter, oldval, newval); in atomic_cmpxchg()
/linux/drivers/gpu/drm/
A Ddrm_lock.c75 prev = cmpxchg(lock, old, new); in drm_lock_take()
118 prev = cmpxchg(lock, old, new); in drm_lock_transfer()
141 prev = cmpxchg(lock, old, new); in drm_legacy_lock_free()
319 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); in drm_legacy_idlelock_release()
/linux/lib/
A Dllist.c33 } while (cmpxchg(&head->first, first, new_first) != first); in llist_add_batch()
63 entry = cmpxchg(&head->first, old_entry, next); in llist_del_first()
A Derrseq.c95 cur = cmpxchg(eseq, old, new); in errseq_set()
201 cmpxchg(eseq, old, new); in errseq_check_and_advance()
/linux/net/rxrpc/
A Dcall_event.c343 cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call()
350 cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call()
357 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call()
366 cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call()
373 cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call()
/linux/arch/ia64/include/uapi/asm/
A Dcmpxchg.h133 #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) macro
136 #define cmpxchg_local cmpxchg
/linux/drivers/accessibility/speakup/
A Dselection.c71 if (cmpxchg(&speakup_sel_work.tty, NULL, tty)) { in speakup_set_selection()
124 if (cmpxchg(&speakup_paste_work.tty, NULL, tty)) { in speakup_paste_selection()
/linux/arch/sh/kernel/cpu/sh2/
A Dsmp-j2.c28 while (cmpxchg(pmsg, messages, 0) != messages); in j2_ipi_interrupt_handler()
118 while (cmpxchg(pmsg, old, old|(1U<<message)) != old); in j2_send_ipi()
/linux/Documentation/locking/
A Drt-mutex.rst42 without waiters. The optimized fastpath operations require cmpxchg
66 with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
72 To prevent a cmpxchg of the owner releasing the lock, we need to
/linux/drivers/dma-buf/
A Ddma-fence-array.c36 cmpxchg(&array->base.error, PENDING_ERROR, error); in dma_fence_array_set_pending_error()
42 cmpxchg(&array->base.error, PENDING_ERROR, 0); in dma_fence_array_clear_pending_error()
/linux/kernel/trace/
A Dtrace_recursion_record.c78 old = cmpxchg(&recursed_functions[index].ip, 0, ip); in ftrace_record_recursion()
112 cmpxchg(&recursed_functions[index].ip, ip, 0); in ftrace_record_recursion()
/linux/fs/nfs/
A Dnfs3acl.c24 if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED) { in nfs3_prepare_get_acl()
35 if (cmpxchg(p, sentinel, acl) != sentinel) in nfs3_complete_get_acl()
44 cmpxchg(p, sentinel, ACL_NOT_CACHED); in nfs3_abort_get_acl()
/linux/Documentation/virt/kvm/
A Dlocking.rst66 On fast page fault path, we will use cmpxchg to atomically set the spte W
69 changing these bits can be detected by cmpxchg.
76 is not changed during cmpxchg. This is a ABA problem, for example, below case
108 | if (cmpxchg(spte, old_spte, old_spte+W) |
119 kvm_vcpu_gfn_to_pfn_atomic, before the cmpxchg. After the pinning:
/linux/arch/nios2/include/asm/
A DKbuild2 generic-y += cmpxchg.h
/linux/arch/h8300/include/asm/
A DKbuild3 generic-y += cmpxchg.h
/linux/arch/nds32/include/asm/
A DKbuild3 generic-y += cmpxchg.h
/linux/arch/microblaze/include/asm/
A DKbuild3 generic-y += cmpxchg.h
/linux/tools/arch/x86/include/asm/
A Datomic.h70 return cmpxchg(&v->counter, old, new); in atomic_cmpxchg()
/linux/arch/s390/include/asm/
A Dpercpu.h38 prev__ = cmpxchg(ptr__, old__, new__); \
141 ret__ = cmpxchg(ptr__, oval, nval); \
/linux/include/linux/
A Dbitops.h317 } while (cmpxchg(ptr, old__, new__) != old__); \
333 cmpxchg(ptr, old__, new__) != old__); \

Completed in 38 milliseconds

12345678910>>...142