Skip to content

Commit

Permalink
Port AtomicU64 to compile for MIPS
Browse files Browse the repository at this point in the history
Rust's `std::sync` namespace does not provide `AtomicU64` and
`AtomicI64` for MIPS and PowerPC architectures.

This make the project not capable of being embedded into routers as
mentioned on spacejam#1070

This commit changes the references of `AtomicU64` and `AtomicI64` to use
a portable version, wich fallbacks to `crossbeam` Mutex when Atomics are
not available.

To compile against MIPS, the `block` initialization had to change, as
the Mutex is not `Copy`, and Rust's initialization of arrays only
supports `Copy` types. The `array-init` crate supports non-Copy types,
and that is why it was introduced.

It also introduces a `--features mutex` crate, which allows to compile
to x64 or any other architecture using the Mutex implementation, useful
for testing without cross-compiling.

Tests passes with:

```
cargo test --features testing
cargo test --features testing --features mutex

cross build --target mips-unknown-linux-musl
```

Cross-compiling tests fails due to sub-process spawning not getting
hooked on qemu binfmt setup and spawning with the wrong architecture
(x64 container trying to run MIPS executable).

Creating a MIPS Debian VM using QEMU compiles (although very slowly) and
runs almost all of the tests correctly (except the quiescent cpu time, which
is very slow to run on my VM).
  • Loading branch information
bltavares committed Jun 21, 2020
1 parent 28700b3 commit 2d2d11a
Show file tree
Hide file tree
Showing 6 changed files with 307 additions and 8 deletions.
2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ measure_allocs = []
pretty_backtrace = ["color-backtrace"]
io_uring = ["rio"]
docs = []
mutex = []

[dependencies]
crossbeam-epoch = "0.8.2"
Expand All @@ -47,6 +48,7 @@ parking_lot = "0.10.2"
color-backtrace = { version = "0.4.2", optional = true }
rio = { version = "0.9.3", optional = true }
backtrace = "0.3.49"
array-init = "0.1.1"

[target.'cfg(any(target_os = "linux", target_os = "macos", target_os="windows"))'.dependencies]
fs2 = "0.4.3"
Expand Down
2 changes: 1 addition & 1 deletion scripts/cross_compile.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ set -e
targets="wasm32-wasi wasm32-unknown-unknown aarch64-fuchsia aarch64-linux-android \
i686-linux-android i686-unknown-linux-gnu \
x86_64-linux-android x86_64-fuchsia \
aarch64-apple-ios"
aarch64-apple-ios mips-unknown-linux-musl"

rustup update

Expand Down
296 changes: 296 additions & 0 deletions src/atomic_shim.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,296 @@
///! Inline of https://github.com/bltavares/atomic-shim

#[cfg(not(any(
target_arch = "mips",
target_arch = "powerpc",
feature = "mutex"
)))]
pub use std::sync::atomic::{AtomicI64, AtomicU64};
#[cfg(any(target_arch = "mips", target_arch = "powerpc", feature = "mutex"))]
mod shim {
use crossbeam_utils::sync::ShardedLock;
use std::sync::atomic::Ordering;

#[derive(Debug, Default)]
pub struct AtomicU64 {
value: ShardedLock<u64>,
}

impl AtomicU64 {
pub fn new(v: u64) -> Self {
Self { value: ShardedLock::new(v) }
}

#[allow(dead_code)]
pub fn get_mut(&mut self) -> &mut u64 {
self.value.get_mut().unwrap()
}

#[allow(dead_code)]
pub fn into_inner(self) -> u64 {
self.value.into_inner().unwrap()
}

#[allow(dead_code)]
pub fn load(&self, _: Ordering) -> u64 {
*self.value.read().unwrap()
}

#[allow(dead_code)]
pub fn store(&self, value: u64, _: Ordering) {
let mut lock = self.value.write().unwrap();
*lock = value;
}

#[allow(dead_code)]
pub fn swap(&self, value: u64, _: Ordering) -> u64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = value;
prev
}

#[allow(dead_code)]
pub fn compare_and_swap(
&self,
current: u64,
new: u64,
_: Ordering,
) -> u64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
if prev == current {
*lock = new;
};
prev
}

#[allow(dead_code)]
pub fn compare_exchange(
&self,
current: u64,
new: u64,
_: Ordering,
_: Ordering,
) -> Result<u64, u64> {
let mut lock = self.value.write().unwrap();
let prev = *lock;
if prev == current {
*lock = new;
Ok(current)
} else {
Err(prev)
}
}

#[allow(dead_code)]
pub fn compare_exchange_weak(
&self,
current: u64,
new: u64,
success: Ordering,
failure: Ordering,
) -> Result<u64, u64> {
self.compare_exchange(current, new, success, failure)
}

#[allow(dead_code)]
pub fn fetch_add(&self, val: u64, _: Ordering) -> u64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev.wrapping_add(val);
prev
}

#[allow(dead_code)]
pub fn fetch_sub(&self, val: u64, _: Ordering) -> u64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev.wrapping_sub(val);
prev
}

#[allow(dead_code)]
pub fn fetch_and(&self, val: u64, _: Ordering) -> u64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev & val;
prev
}

#[allow(dead_code)]
pub fn fetch_nand(&self, val: u64, _: Ordering) -> u64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = !(prev & val);
prev
}

#[allow(dead_code)]
pub fn fetch_or(&self, val: u64, _: Ordering) -> u64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev | val;
prev
}

#[allow(dead_code)]
pub fn fetch_xor(&self, val: u64, _: Ordering) -> u64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev ^ val;
prev
}
}

impl From<u64> for AtomicU64 {
fn from(value: u64) -> Self {
AtomicU64::new(value)
}
}

#[derive(Debug, Default)]
pub struct AtomicI64 {
value: ShardedLock<i64>,
}

impl AtomicI64 {
pub fn new(v: i64) -> Self {
Self { value: ShardedLock::new(v) }
}

#[allow(dead_code)]
pub fn get_mut(&mut self) -> &mut i64 {
self.value.get_mut().unwrap()
}

#[allow(dead_code)]
pub fn into_inner(self) -> i64 {
self.value.into_inner().unwrap()
}

#[allow(dead_code)]
pub fn load(&self, _: Ordering) -> i64 {
*self.value.read().unwrap()
}

#[allow(dead_code)]
pub fn store(&self, value: i64, _: Ordering) {
let mut lock = self.value.write().unwrap();
*lock = value;
}

#[allow(dead_code)]
pub fn swap(&self, value: i64, _: Ordering) -> i64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = value;
prev
}

#[allow(dead_code)]
pub fn compare_and_swap(
&self,
current: i64,
new: i64,
_: Ordering,
) -> i64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
if prev == current {
*lock = new;
};
prev
}

#[allow(dead_code)]
pub fn compare_exchange(
&self,
current: i64,
new: i64,
_: Ordering,
_: Ordering,
) -> Result<i64, i64> {
let mut lock = self.value.write().unwrap();
let prev = *lock;
if prev == current {
*lock = new;
Ok(current)
} else {
Err(prev)
}
}

#[allow(dead_code)]
pub fn compare_exchange_weak(
&self,
current: i64,
new: i64,
success: Ordering,
failure: Ordering,
) -> Result<i64, i64> {
self.compare_exchange(current, new, success, failure)
}

#[allow(dead_code)]
pub fn fetch_add(&self, val: i64, _: Ordering) -> i64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev.wrapping_add(val);
prev
}

#[allow(dead_code)]
pub fn fetch_sub(&self, val: i64, _: Ordering) -> i64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev.wrapping_sub(val);
prev
}

#[allow(dead_code)]
pub fn fetch_and(&self, val: i64, _: Ordering) -> i64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev & val;
prev
}

#[allow(dead_code)]
pub fn fetch_nand(&self, val: i64, _: Ordering) -> i64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = !(prev & val);
prev
}

#[allow(dead_code)]
pub fn fetch_or(&self, val: i64, _: Ordering) -> i64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev | val;
prev
}

#[allow(dead_code)]
pub fn fetch_xor(&self, val: i64, _: Ordering) -> i64 {
let mut lock = self.value.write().unwrap();
let prev = *lock;
*lock = prev ^ val;
prev
}
}

impl From<i64> for AtomicI64 {
fn from(value: i64) -> Self {
AtomicI64::new(value)
}
}
}

#[cfg(any(
target_arch = "mips",
target_arch = "powerpc",
feature = "mutex"
))]
pub use shim::{AtomicI64, AtomicU64};
4 changes: 3 additions & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,7 @@ macro_rules! testing_assert {
}

mod arc;
mod atomic_shim;
mod batch;
mod binary_search;
mod concurrency_control;
Expand Down Expand Up @@ -306,6 +307,7 @@ pub use self::{
use {
self::{
arc::Arc,
atomic_shim::{AtomicI64 as AtomicLsn, AtomicU64},
binary_search::binary_search_lub,
concurrency_control::Protector,
context::Context,
Expand All @@ -330,7 +332,7 @@ use {
fmt::{self, Debug},
io::{Read, Write},
sync::atomic::{
AtomicI64 as AtomicLsn, AtomicU64, AtomicUsize,
AtomicUsize,
Ordering::{Acquire, Relaxed, Release, SeqCst},
},
},
Expand Down
5 changes: 3 additions & 2 deletions src/lru.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@

use std::convert::TryFrom;
use std::ptr;
use std::sync::atomic::{AtomicPtr, AtomicU64, AtomicUsize, Ordering};
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};

use crate::{
debug_delay,
dll::{DoublyLinkedList, Node},
fastlock::FastLock,
Guard, PageId,
atomic_shim::AtomicU64,
};

#[cfg(any(test, feature = "lock_free_delays"))]
Expand All @@ -33,7 +34,7 @@ impl Default for AccessBlock {
fn default() -> AccessBlock {
AccessBlock {
len: AtomicUsize::new(0),
block: unsafe { std::mem::transmute([0_u64; MAX_QUEUE_ITEMS]) },
block: array_init::array_init(|_| AtomicU64::default()),
next: AtomicPtr::default(),
}
}
Expand Down
6 changes: 2 additions & 4 deletions src/tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,12 @@ use std::{
borrow::Cow,
fmt::{self, Debug},
ops::{self, Deref, RangeBounds},
sync::{
atomic::{AtomicU64, Ordering::SeqCst},
},
sync::atomic::Ordering::SeqCst,
};

use parking_lot::RwLock;

use crate::{pagecache::NodeView, *};
use crate::{atomic_shim::AtomicU64, pagecache::NodeView, *};

#[derive(Debug, Clone)]
pub(crate) struct View<'g> {
Expand Down

0 comments on commit 2d2d11a

Please sign in to comment.