Import atomic-polyfill 1.0.1 upstream upstream/1.0.1
authorDongHun Kwak <dh0128.kwak@samsung.com>
Mon, 27 Feb 2023 02:59:05 +0000 (11:59 +0900)
committerDongHun Kwak <dh0128.kwak@samsung.com>
Mon, 27 Feb 2023 02:59:05 +0000 (11:59 +0900)
16 files changed:
.cargo_vcs_info.json [new file with mode: 0644]
.github/workflows/rust.yml [new file with mode: 0644]
.gitignore [new file with mode: 0644]
.vscode/settings.json [new file with mode: 0644]
CHANGELOG.md [new file with mode: 0644]
Cargo.toml [new file with mode: 0644]
Cargo.toml.orig [new file with mode: 0644]
LICENSE-APACHE [new file with mode: 0644]
LICENSE-MIT [new file with mode: 0644]
README.md [new file with mode: 0644]
avr-specs/avr-atmega328p.json [new file with mode: 0644]
build.rs [new file with mode: 0644]
ci.sh [new file with mode: 0755]
rust-toolchain.toml [new file with mode: 0644]
src/lib.rs [new file with mode: 0644]
src/polyfill.rs [new file with mode: 0644]

diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644 (file)
index 0000000..df07db3
--- /dev/null
@@ -0,0 +1,6 @@
+{
+  "git": {
+    "sha1": "591e44f01a1011c7a37108830402f38aac0a8e9c"
+  },
+  "path_in_vcs": ""
+}
\ No newline at end of file
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
new file mode 100644 (file)
index 0000000..822777f
--- /dev/null
@@ -0,0 +1,19 @@
+name: Rust
+
+on:
+  push:
+    branches: [main]
+  pull_request:
+    branches: [main]
+
+env:
+  CARGO_TERM_COLOR: always
+
+jobs:
+  build:
+    runs-on: ubuntu-latest
+
+    steps:
+      - uses: actions/checkout@v2
+      - name: Build
+        run: ./ci.sh
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..1de5659
--- /dev/null
@@ -0,0 +1 @@
+target
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644 (file)
index 0000000..93512a0
--- /dev/null
@@ -0,0 +1,9 @@
+{
+  "editor.formatOnSave": true,
+  "rust-analyzer.checkOnSave.allTargets": false,
+  "rust-analyzer.cargo.target": "thumbv6m-none-eabi",
+  "rust-analyzer.procMacro.enable": true,
+  "rust-analyzer.imports.granularity.group": "module",
+  "rust-analyzer.imports.granularity.enforce": true,
+  "rust-analyzer.cargo.buildScripts.enable": true,
+}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644 (file)
index 0000000..0f6481a
--- /dev/null
@@ -0,0 +1,76 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+No unreleased changes yet
+
+## 1.0.1 - 2022-08-12
+
+- Fix `AtomicPtr` accidentally not being available when not polyfilled.
+
+## 1.0.0 - 2022-08-12
+
+- Update to `critical-section` v1.0
+
+## 0.1.10 - 2022-08-12
+
+- Fix `AtomicPtr` accidentally not being available when not polyfilled.
+
+## 0.1.9 - 2022-08-12
+
+- Switch to only two polyfill levels.
+
+The "CAS" level which uses atomic load/store and critical-section based CAS was not
+sound, because `critical-section` guarantees only "no other critical section can run concurrently",
+not "no other code can run concurrently". Therefore a CS-based CAS can still race a native atomic store.
+
+## 0.1.8 - 2022-04-12
+
+- Added AVR support.
+
+## 0.1.7 - 2022-03-22
+
+- Added support for xtensa (ESP chips), with and without ESP-IDF.
+- Reexport `core::sync::atomic::*` as-is for unknown targets, to avoid build failures if they don't have full atomic support.
+
+## 0.1.6 - 2022-02-08
+
+- Add polyfill support for `thumbv4t` targets. (Nintendo Game Boy Advance)
+- Added `get_mut()` to `AtomicBool`.
+- Added `into_inner()` to all atomics
+- Added `fmt::Debug` impl to `AtomicBool`, `AtomicPtr`.
+- Added `fmt::Pointer` impl to `AtomicPtr`.
+- Added `From<*mut T>` impl to `AtomicPtr`.
+- Added `RefUnwindSafe` impl to all atomics.
+
+## 0.1.5 - 2021-11-02
+
+- Updated critical-section to v0.2.5. Fixes `std` implementation to allow reentrant (nested) critical sections. This would previously deadlock.
+
+## 0.1.4 - 2021-09-20
+
+- Added support for RISC-V.
+- Added support for "full polyfill" level, where load/stores are polyfilled, not just CAS operations.
+- Added support for `AtomicU64`, `AtomicI64`.
+
+## 0.1.3 - 2021-08-07
+
+- Only import `cortex-m` when needed (#4)
+- Fix panic on `fetch_update` due to incorrect ordering (#5)
+
+## 0.1.2 - 2021-03-29
+
+- Added missing reexport of `fence` and `compiler_fence` in polyfilled mode.
+
+## 0.1.1 - 2021-03-04
+
+- Added polyfills for AtomicU8, AtomicU16, AtomicUsize, AtomicI8, AtomicI16, AtomicI32, AtomicIsize
+
+## 0.1.0 - 2021-03-04
+
+- First release
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644 (file)
index 0000000..b1eed48
--- /dev/null
@@ -0,0 +1,29 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "atomic-polyfill"
+version = "1.0.1"
+authors = ["Dario Nieuwenhuis <dirbaio@dirbaio.net>"]
+description = "Atomic polyfills, for targets where they're not available."
+readme = "README.md"
+categories = [
+    "embedded",
+    "no-std",
+    "concurrency",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/embassy-rs/atomic-polyfill"
+resolver = "2"
+
+[dependencies.critical-section]
+version = "1.0.0"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644 (file)
index 0000000..637308f
--- /dev/null
@@ -0,0 +1,18 @@
+[package]
+authors = ["Dario Nieuwenhuis <dirbaio@dirbaio.net>"]
+description = "Atomic polyfills, for targets where they're not available."
+edition = "2021"
+license = "MIT OR Apache-2.0"
+name = "atomic-polyfill"
+readme = "README.md"
+repository = "https://github.com/embassy-rs/atomic-polyfill"
+version = "1.0.1"
+
+categories = [
+  "embedded",
+  "no-std",
+  "concurrency",
+]
+
+[dependencies]
+critical-section = "1.0.0"
diff --git a/LICENSE-APACHE b/LICENSE-APACHE
new file mode 100644 (file)
index 0000000..16fe87b
--- /dev/null
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/LICENSE-MIT b/LICENSE-MIT
new file mode 100644 (file)
index 0000000..dacc57b
--- /dev/null
@@ -0,0 +1,25 @@
+Copyright (c) 2020 Dario Nieuwenhuis
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/README.md b/README.md
new file mode 100644 (file)
index 0000000..a401091
--- /dev/null
+++ b/README.md
@@ -0,0 +1,47 @@
+# atomic-polyfill
+
+[![Documentation](https://docs.rs/atomic-polyfill/badge.svg)](https://docs.rs/atomic-polyfill)
+
+This crate polyfills atomics on targets where they're not available, using critical sections. It is intended to be a drop-in replacement for `core::sync::atomic`.
+
+There are two "levels" of polyfilling:
+- Native: No polyfilling is performed, the native `core::sync::atomic::AtomicXX` is reexported.
+- Full: Both load/store and compare-and-set operations are polyfilled.
+
+Polyfilling requires a [`critical-section`](https://github.com/rust-embedded/critical-section) implementation for the current target. Check the `critical-section` README for details.
+
+## Target support
+
+The right polyfill level is automatically picked based on the target and the atomic width:
+
+| Target             | Level            | Level for u64/i64 |
+|--------------------|------------------|-------------------|
+| thumbv4t           | Full             | Full              |
+| thumbv6m           | Full             | Full              |
+| thumbv7*, thumbv8* | Native           | Full              |
+| riscv32imc         | Full             | Full              |
+| riscv32imac        | Native           | Full              |
+| xtensa-*-espidf    | Native           | Native            |
+| xtensa-esp32-*     | Native           | Full              |
+| xtensa-esp32s2-*   | Full             | Full              |
+| xtensa-esp32s3-*   | Native           | Full              |
+| xtensa-esp8266-*   | Full             | Full              |
+| AVR                | Full             | Full              |
+
+For targets not listed above, `atomic-polyfill` assumes nothing and reexports `core::sync::atomic::*`. No polyfilling is done. PRs for polyfilling more targets are welcome :)
+
+## License
+
+This work is licensed under either of
+
+- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or
+  http://www.apache.org/licenses/LICENSE-2.0)
+- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+## Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/avr-specs/avr-atmega328p.json b/avr-specs/avr-atmega328p.json
new file mode 100644 (file)
index 0000000..e236b08
--- /dev/null
@@ -0,0 +1,27 @@
+{
+  "arch": "avr",
+  "atomic-cas": false,
+  "cpu": "atmega328p",
+  "data-layout": "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8",
+  "eh-frame-header": false,
+  "exe-suffix": ".elf",
+  "executables": true,
+  "late-link-args": {
+    "gcc": [
+      "-lgcc"
+    ]
+  },
+  "linker": "avr-gcc",
+  "linker-is-gnu": true,
+  "llvm-target": "avr-unknown-unknown",
+  "max-atomic-width": 8,
+  "no-default-libraries": false,
+  "pre-link-args": {
+    "gcc": [
+      "-mmcu=atmega328p",
+      "-Wl,--as-needed"
+    ]
+  },
+  "target-c-int-width": "16",
+  "target-pointer-width": "16"
+}
diff --git a/build.rs b/build.rs
new file mode 100644 (file)
index 0000000..0b5003e
--- /dev/null
+++ b/build.rs
@@ -0,0 +1,84 @@
+use std::env;
+use std::fmt;
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+enum PolyfillLevel {
+    // Native, ie no polyfill. Just reexport from core::sync::atomic
+    Native,
+    // Full polyfill: polyfill both load/store and CAS with critical sections
+    Polyfill,
+}
+
+impl fmt::Display for PolyfillLevel {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let s = match *self {
+            Self::Native => "native",
+            Self::Polyfill => "polyfill",
+        };
+        write!(f, "{}", s)
+    }
+}
+
+fn main() {
+    let target = env::var("TARGET").unwrap();
+
+    use PolyfillLevel::*;
+
+    let patterns = [
+        ("riscv32imac-*", (Native, Polyfill)),
+        ("riscv32gc-*", (Native, Polyfill)),
+        ("riscv32imc-*-espidf", (Native, Native)),
+        ("riscv32*", (Polyfill, Polyfill)),
+        ("avr-*", (Polyfill, Polyfill)),
+        ("thumbv4t-*", (Polyfill, Polyfill)),
+        ("thumbv6m-*", (Polyfill, Polyfill)),
+        ("thumbv7m-*", (Native, Polyfill)),
+        ("thumbv7em-*", (Native, Polyfill)),
+        ("thumbv8m.base-*", (Native, Polyfill)),
+        ("thumbv8m.main-*", (Native, Polyfill)),
+        ("xtensa-*-espidf", (Native, Native)),
+        ("xtensa-esp32-*", (Native, Polyfill)),
+        ("xtensa-esp32s2-*", (Polyfill, Polyfill)),
+        ("xtensa-esp32s3-*", (Native, Polyfill)),
+        ("xtensa-esp8266-*", (Polyfill, Polyfill)),
+    ];
+
+    if let Some((_, (level, level64))) = patterns
+        .iter()
+        .find(|(pattern, _)| matches(pattern, &target))
+    {
+        if *level == PolyfillLevel::Polyfill {
+            println!("cargo:rustc-cfg=polyfill_u8");
+            println!("cargo:rustc-cfg=polyfill_u16");
+            println!("cargo:rustc-cfg=polyfill_u32");
+            println!("cargo:rustc-cfg=polyfill_usize");
+            println!("cargo:rustc-cfg=polyfill_i8");
+            println!("cargo:rustc-cfg=polyfill_i16");
+            println!("cargo:rustc-cfg=polyfill_i32");
+            println!("cargo:rustc-cfg=polyfill_isize");
+            println!("cargo:rustc-cfg=polyfill_ptr");
+            println!("cargo:rustc-cfg=polyfill_bool");
+        }
+
+        if *level64 == PolyfillLevel::Polyfill {
+            println!("cargo:rustc-cfg=polyfill_u64");
+            println!("cargo:rustc-cfg=polyfill_i64");
+        }
+    } else {
+        // If we don't know about the target, just reexport the entire `core::atomic::*`
+        // This doesn't polyfill anything, but it's guaranteed to never fail build.
+        println!("cargo:rustc-cfg=reexport_core");
+    }
+}
+
+// tiny glob replacement to avoid pulling in more crates.
+// Supports 0 or 1 wildcards `*`
+fn matches(pattern: &str, val: &str) -> bool {
+    if let Some(p) = pattern.find('*') {
+        let prefix = &pattern[..p];
+        let suffix = &pattern[p + 1..];
+        val.len() >= prefix.len() + suffix.len() && val.starts_with(prefix) && val.ends_with(suffix)
+    } else {
+        val == pattern
+    }
+}
diff --git a/ci.sh b/ci.sh
new file mode 100755 (executable)
index 0000000..d49da2e
--- /dev/null
+++ b/ci.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -euxo pipefail
+
+cargo build
+cargo build --target thumbv6m-none-eabi
+cargo build --target thumbv7em-none-eabi
+cargo build --target riscv32imc-unknown-none-elf
+cargo build --target riscv32imac-unknown-none-elf
+cargo build --target i686-unknown-linux-gnu
+cargo build --target x86_64-unknown-linux-gnu
+cargo build --target riscv64gc-unknown-linux-gnu
+
+# without --release, it fails with "error: ran out of registers during register allocation"
+cargo build --release -Zbuild-std=core --target avr-specs/avr-atmega328p.json
diff --git a/rust-toolchain.toml b/rust-toolchain.toml
new file mode 100644 (file)
index 0000000..145ef55
--- /dev/null
@@ -0,0 +1,14 @@
+# Before upgrading check that everything is available on all tier1 targets here:
+# https://rust-lang.github.io/rustup-components-history
+[toolchain]
+channel = "nightly-2022-07-28"
+components = [ "rust-src", "rustfmt" ]
+targets = [
+    "thumbv6m-none-eabi",
+    "thumbv7em-none-eabi",
+    "riscv32imc-unknown-none-elf",
+    "riscv32imac-unknown-none-elf",
+    "i686-unknown-linux-gnu",
+    "x86_64-unknown-linux-gnu",
+    "riscv64gc-unknown-linux-gnu",
+]
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644 (file)
index 0000000..c4c109b
--- /dev/null
@@ -0,0 +1,9 @@
+#![no_std]
+
+#[cfg(reexport_core)]
+pub use core::sync::atomic::*;
+
+#[cfg(not(reexport_core))]
+mod polyfill;
+#[cfg(not(reexport_core))]
+pub use polyfill::*;
diff --git a/src/polyfill.rs b/src/polyfill.rs
new file mode 100644 (file)
index 0000000..56cbe8a
--- /dev/null
@@ -0,0 +1,462 @@
+pub use core::sync::atomic::{compiler_fence, fence, Ordering};
+
+macro_rules! atomic_int {
+    ($int_type:ident,$atomic_type:ident, $cfg:ident) => {
+        #[cfg(not($cfg))]
+        pub use core::sync::atomic::$atomic_type;
+
+        #[cfg($cfg)]
+        #[repr(transparent)]
+        pub struct $atomic_type {
+            inner: core::cell::UnsafeCell<$int_type>,
+        }
+
+        #[cfg($cfg)]
+        unsafe impl Send for $atomic_type {}
+        #[cfg($cfg)]
+        unsafe impl Sync for $atomic_type {}
+        #[cfg($cfg)]
+        impl core::panic::RefUnwindSafe for $atomic_type {}
+
+        #[cfg($cfg)]
+        impl Default for $atomic_type {
+            #[inline]
+            fn default() -> Self {
+                Self::new(Default::default())
+            }
+        }
+
+        #[cfg($cfg)]
+        impl From<$int_type> for $atomic_type {
+            #[inline]
+            fn from(v: $int_type) -> Self {
+                Self::new(v)
+            }
+        }
+
+        #[cfg($cfg)]
+        impl core::fmt::Debug for $atomic_type {
+            fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+                core::fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
+            }
+        }
+
+        #[cfg($cfg)]
+        impl $atomic_type {
+            pub const fn new(v: $int_type) -> Self {
+                Self {
+                    inner: core::cell::UnsafeCell::new(v),
+                }
+            }
+
+            pub fn into_inner(self) -> $int_type {
+                self.inner.into_inner()
+            }
+
+            pub fn get_mut(&mut self) -> &mut $int_type {
+                self.inner.get_mut()
+            }
+
+            pub fn load(&self, _order: Ordering) -> $int_type {
+                return critical_section::with(|_| unsafe { *self.inner.get() });
+            }
+
+            pub fn store(&self, val: $int_type, _order: Ordering) {
+                return critical_section::with(|_| unsafe { *self.inner.get() = val });
+            }
+
+            pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |_| val)
+            }
+
+            pub fn compare_exchange(
+                &self,
+                current: $int_type,
+                new: $int_type,
+                success: Ordering,
+                failure: Ordering,
+            ) -> Result<$int_type, $int_type> {
+                self.compare_exchange_weak(current, new, success, failure)
+            }
+
+            pub fn compare_exchange_weak(
+                &self,
+                current: $int_type,
+                new: $int_type,
+                _success: Ordering,
+                _failure: Ordering,
+            ) -> Result<$int_type, $int_type> {
+                critical_section::with(|_| {
+                    let val = unsafe { &mut *self.inner.get() };
+                    let old = *val;
+                    if old == current {
+                        *val = new;
+                        Ok(old)
+                    } else {
+                        Err(old)
+                    }
+                })
+            }
+
+            pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |old| old.wrapping_add(val))
+            }
+
+            pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |old| old.wrapping_sub(val))
+            }
+
+            pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |old| old & val)
+            }
+
+            pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |old| !(old & val))
+            }
+
+            pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |old| old | val)
+            }
+
+            pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |old| old ^ val)
+            }
+
+            pub fn fetch_update<F>(
+                &self,
+                _set_order: Ordering,
+                _fetch_order: Ordering,
+                mut f: F,
+            ) -> Result<$int_type, $int_type>
+            where
+                F: FnMut($int_type) -> Option<$int_type>,
+            {
+                critical_section::with(|_| {
+                    let val = unsafe { &mut *self.inner.get() };
+                    let old = *val;
+                    if let Some(new) = f(old) {
+                        *val = new;
+                        Ok(old)
+                    } else {
+                        Err(old)
+                    }
+                })
+            }
+
+            pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |old| old.max(val))
+            }
+
+            pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
+                self.op(order, |old| old.min(val))
+            }
+
+            fn op(&self, _order: Ordering, f: impl FnOnce($int_type) -> $int_type) -> $int_type {
+                critical_section::with(|_| {
+                    let val = unsafe { &mut *self.inner.get() };
+                    let old = *val;
+                    *val = f(old);
+                    old
+                })
+            }
+        }
+    };
+}
+
+atomic_int!(u8, AtomicU8, polyfill_u8);
+atomic_int!(u16, AtomicU16, polyfill_u16);
+atomic_int!(u32, AtomicU32, polyfill_u32);
+atomic_int!(u64, AtomicU64, polyfill_u64);
+atomic_int!(usize, AtomicUsize, polyfill_usize);
+atomic_int!(i8, AtomicI8, polyfill_i8);
+atomic_int!(i16, AtomicI16, polyfill_i16);
+atomic_int!(i32, AtomicI32, polyfill_i32);
+atomic_int!(i64, AtomicI64, polyfill_i64);
+atomic_int!(isize, AtomicIsize, polyfill_isize);
+
+#[cfg(not(polyfill_bool))]
+pub use core::sync::atomic::AtomicBool;
+
+#[cfg(polyfill_bool)]
+#[repr(transparent)]
+pub struct AtomicBool {
+    inner: core::cell::UnsafeCell<bool>,
+}
+
+#[cfg(polyfill_bool)]
+impl Default for AtomicBool {
+    /// Creates an `AtomicBool` initialized to `false`.
+    #[inline]
+    fn default() -> Self {
+        Self::new(false)
+    }
+}
+
+#[cfg(polyfill_bool)]
+impl From<bool> for AtomicBool {
+    #[inline]
+    fn from(v: bool) -> Self {
+        Self::new(v)
+    }
+}
+
+#[cfg(polyfill_bool)]
+impl core::fmt::Debug for AtomicBool {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        core::fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
+    }
+}
+
+#[cfg(polyfill_bool)]
+unsafe impl Send for AtomicBool {}
+#[cfg(polyfill_bool)]
+unsafe impl Sync for AtomicBool {}
+#[cfg(polyfill_bool)]
+impl core::panic::RefUnwindSafe for AtomicBool {}
+
+#[cfg(polyfill_bool)]
+impl AtomicBool {
+    pub const fn new(v: bool) -> AtomicBool {
+        Self {
+            inner: core::cell::UnsafeCell::new(v),
+        }
+    }
+
+    pub fn into_inner(self) -> bool {
+        self.inner.into_inner()
+    }
+
+    pub fn get_mut(&mut self) -> &mut bool {
+        self.inner.get_mut()
+    }
+
+    pub fn load(&self, _order: Ordering) -> bool {
+        return critical_section::with(|_| unsafe { *self.inner.get() });
+    }
+
+    pub fn store(&self, val: bool, _order: Ordering) {
+        return critical_section::with(|_| unsafe { *self.inner.get() = val });
+    }
+
+    pub fn swap(&self, val: bool, order: Ordering) -> bool {
+        self.op(order, |_| val)
+    }
+
+    pub fn compare_exchange(
+        &self,
+        current: bool,
+        new: bool,
+        success: Ordering,
+        failure: Ordering,
+    ) -> Result<bool, bool> {
+        self.compare_exchange_weak(current, new, success, failure)
+    }
+
+    pub fn compare_exchange_weak(
+        &self,
+        current: bool,
+        new: bool,
+        _success: Ordering,
+        _failure: Ordering,
+    ) -> Result<bool, bool> {
+        critical_section::with(|_| {
+            let val = unsafe { &mut *self.inner.get() };
+            let old = *val;
+            if old == current {
+                *val = new;
+                Ok(old)
+            } else {
+                Err(old)
+            }
+        })
+    }
+
+    pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
+        self.op(order, |old| old & val)
+    }
+
+    pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
+        self.op(order, |old| !(old & val))
+    }
+
+    pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
+        self.op(order, |old| old | val)
+    }
+
+    pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
+        self.op(order, |old| old ^ val)
+    }
+
+    pub fn fetch_update<F>(
+        &self,
+        _set_order: Ordering,
+        _fetch_order: Ordering,
+        mut f: F,
+    ) -> Result<bool, bool>
+    where
+        F: FnMut(bool) -> Option<bool>,
+    {
+        critical_section::with(|_| {
+            let val = unsafe { &mut *self.inner.get() };
+            let old = *val;
+            if let Some(new) = f(old) {
+                *val = new;
+                Ok(old)
+            } else {
+                Err(old)
+            }
+        })
+    }
+
+    pub fn fetch_max(&self, val: bool, order: Ordering) -> bool {
+        self.op(order, |old| old.max(val))
+    }
+
+    pub fn fetch_min(&self, val: bool, order: Ordering) -> bool {
+        self.op(order, |old| old.min(val))
+    }
+
+    fn op(&self, _order: Ordering, f: impl FnOnce(bool) -> bool) -> bool {
+        critical_section::with(|_| {
+            let val = unsafe { &mut *self.inner.get() };
+            let old = *val;
+            *val = f(old);
+            old
+        })
+    }
+}
+
+#[cfg(not(polyfill_ptr))]
+pub use core::sync::atomic::AtomicPtr;
+
+#[cfg(polyfill_ptr)]
+#[repr(transparent)]
+pub struct AtomicPtr<T> {
+    inner: core::cell::UnsafeCell<*mut T>,
+}
+
+#[cfg(polyfill_ptr)]
+impl<T> Default for AtomicPtr<T> {
+    /// Creates a null `AtomicPtr<T>`.
+    #[inline]
+    fn default() -> Self {
+        Self::new(core::ptr::null_mut())
+    }
+}
+
+#[cfg(polyfill_ptr)]
+impl<T> From<*mut T> for AtomicPtr<T> {
+    #[inline]
+    fn from(v: *mut T) -> Self {
+        Self::new(v)
+    }
+}
+
+#[cfg(polyfill_ptr)]
+impl<T> core::fmt::Debug for AtomicPtr<T> {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        core::fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
+    }
+}
+
+#[cfg(polyfill_ptr)]
+impl<T> core::fmt::Pointer for AtomicPtr<T> {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        core::fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
+    }
+}
+
+#[cfg(polyfill_ptr)]
+unsafe impl<T> Sync for AtomicPtr<T> {}
+#[cfg(polyfill_ptr)]
+unsafe impl<T> Send for AtomicPtr<T> {}
+#[cfg(polyfill_ptr)]
+impl<T> core::panic::RefUnwindSafe for AtomicPtr<T> {}
+
+#[cfg(polyfill_ptr)]
+impl<T> AtomicPtr<T> {
+    pub const fn new(v: *mut T) -> AtomicPtr<T> {
+        Self {
+            inner: core::cell::UnsafeCell::new(v),
+        }
+    }
+
+    pub fn into_inner(self) -> *mut T {
+        self.inner.into_inner()
+    }
+
+    pub fn get_mut(&mut self) -> &mut *mut T {
+        self.inner.get_mut()
+    }
+
+    pub fn load(&self, _order: Ordering) -> *mut T {
+        return critical_section::with(|_| unsafe { *self.inner.get() });
+    }
+
+    pub fn store(&self, val: *mut T, _order: Ordering) {
+        return critical_section::with(|_| unsafe { *self.inner.get() = val });
+    }
+
+    pub fn swap(&self, val: *mut T, order: Ordering) -> *mut T {
+        self.op(order, |_| val)
+    }
+
+    pub fn compare_exchange(
+        &self,
+        current: *mut T,
+        new: *mut T,
+        success: Ordering,
+        failure: Ordering,
+    ) -> Result<*mut T, *mut T> {
+        self.compare_exchange_weak(current, new, success, failure)
+    }
+
+    pub fn compare_exchange_weak(
+        &self,
+        current: *mut T,
+        new: *mut T,
+        _success: Ordering,
+        _failure: Ordering,
+    ) -> Result<*mut T, *mut T> {
+        critical_section::with(|_| {
+            let val = unsafe { &mut *self.inner.get() };
+            let old = *val;
+            if old == current {
+                *val = new;
+                Ok(old)
+            } else {
+                Err(old)
+            }
+        })
+    }
+
+    pub fn fetch_update<F>(
+        &self,
+        _set_order: Ordering,
+        _fetch_order: Ordering,
+        mut f: F,
+    ) -> Result<*mut T, *mut T>
+    where
+        F: FnMut(*mut T) -> Option<*mut T>,
+    {
+        critical_section::with(|_| {
+            let val = unsafe { &mut *self.inner.get() };
+            let old = *val;
+            if let Some(new) = f(old) {
+                *val = new;
+                Ok(old)
+            } else {
+                Err(old)
+            }
+        })
+    }
+
+    fn op(&self, _order: Ordering, f: impl FnOnce(*mut T) -> *mut T) -> *mut T {
+        critical_section::with(|_| {
+            let val = unsafe { &mut *self.inner.get() };
+            let old = *val;
+            *val = f(old);
+            old
+        })
+    }
+}