// This is free and unencumbered software released into the public domain. // // Anyone is free to copy, modify, publish, use, compile, sell, or // distribute this software, either in source code form or as a compiled // binary, for any purpose, commercial or non-commercial, and by any // means. // // In jurisdictions that recognize copyright laws, the author or authors // of this software dedicate any and all copyright interest in the // software to the public domain. We make this dedication for the benefit // of the public at large and to the detriment of our heirs and // successors. We intend this dedication to be an overt act of // relinquishment in perpetuity of all present and future rights to this // software under copyright law. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR // OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR // OTHER DEALINGS IN THE SOFTWARE. // // For more information, please refer to #include #include struct node { struct node * volatile next; }; // Two sentinels, the values do not matter but must be different // and unused by real addresses. static struct node * const STACK_NO_VAL = 0; static struct node * const STACK_PENDING = 1; // push a node to the stack static inline void atomic_stack_push(struct node * volatile * head, struct node * n) { /* paranoid */ assert( n->next == STACK_NO_VAL ); // Mark as pending so if it gets poped before the assignment to next // the reader knows this isn't necessarily the end of the list n->next = STACK_PENDING; // actually add the node to the list struct node * e = __atomic_exchange_n(head, n, __ATOMIC_SEQ_CST); // update the next field __atomic_store_n(&n->next, e, __ATOMIC_RELAXED); } // Pop all nodes from the stack // Once popped, nodes should be iterate on using atomic_stack_next static inline struct node * atomic_stack_pop_all(struct node * volatile * head) { // Steal the entire list for ourselves atomically // Nodes can still have pending next fields but everyone should agree // the nodes are ours. return __atomic_exchange_n(head, STACK_NO_VAL, __ATOMIC_SEQ_CST); } // from a given node, advance to the next node, waiting for pending nodes // to be resolved // if clear is set, the nodes that are advanced from are unlinked before the // previous node is returned static inline struct node * atomic_stack_next(struct node * n, bool clear) { // Wait until the next field is pending while(STACK_PENDING == __atomic_load_n(&n->next, __ATOMIC_RELAXED)) asm volatile("pause" : : :); // The field is no longer pending, any subsequent concurrent write to that field // should now be dependent on the next read. struct node * r = n->next; // For convenience, unlink the node if desired and return. if(clear) n->next = STACK_NO_VAL; return r; }