Skip to content

Commit

Permalink
v.1.6.8, stable rust, multi_chain switch
Browse files Browse the repository at this point in the history
  • Loading branch information
JohnnyFFM committed Dec 22, 2018
1 parent b576e81 commit 03182b1
Show file tree
Hide file tree
Showing 10 changed files with 119 additions and 39 deletions.
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "scavenger"
version = "1.6.7"
version = "1.6.8"
license = "GPL-3.0"
authors = ["PoC Consortium <bots@cryptoguru.org>"]
description = """
Expand Down
8 changes: 6 additions & 2 deletions config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ plot_dirs:
# - '/first/linux/plot/dir'
# - '/second/linux/plot/dir'

# url: 'http://pool.dev.burst-test.net:8124' # testnet pool
url: 'http://wallet.dev.burst-test.net:6876' # testnet wallet
# url: 'http://pool.dev.burst-test.net:8124' # testnet pool
# url: 'http://dummypool.megash.it' # dummypool with constant scoop number for benchmarking

hdd_reader_thread_count: 0 # default 0 (=number of disks)
Expand Down Expand Up @@ -44,10 +44,14 @@ show_progress: true # default true
show_drive_stats: false # default false
benchmark_only: 'disabled' # default disabled, options (disabled, I/O, XPU)

multi_chain: false # enable multi-chain mining
maximum_fork_difference: 1440 # maximum block-height difference to last block
minimum_block_height: 500000 # don't work blocks lower than this height

# Low noise log patterns
console_log_pattern: "{({d(%H:%M:%S)} [{l}]):16.16} {m}{n}"
logfile_log_pattern: "{({d(%Y-%m-%d %H:%M:%S)} [{l}]):26.26} {m}{n}"

# More detailed log patterns
#console_log_pattern: "{d(%H:%M:%S.%3f%z)} [{h({l}):<5}] [{T}] [{t}] - {M}:{m}{n}"
#logfile_log_pattern: "{d(%Y-%m-%dT%H:%M:%S.%3f%z)} [{h({l}):<5}] [{T}]-[{t}] [{f}:{L}] - {M}:{m}{n}"
#logfile_log_pattern: "{d(%Y-%m-%dT%H:%M:%S.%3f%z)} [{h({l}):<5}] [{T}]-[{t}] [{f}:{L}] - {M}:{m}{n}"
21 changes: 21 additions & 0 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,15 @@ pub struct Cfg {

#[serde(default = "default_benchmark_only")]
pub benchmark_only: String,

#[serde(default = "default_multi_chain")]
pub multi_chain: bool,

#[serde(default = "default_maximum_fork_difference")]
pub maximum_fork_difference: u64,

#[serde(default = "default_minimum_block_height")]
pub minimum_block_height: u64,
}

fn default_secret_phrase() -> HashMap<u64, String> {
Expand Down Expand Up @@ -193,6 +202,18 @@ fn default_benchmark_only() -> String {
"disabled".to_owned()
}

fn default_multi_chain() -> bool {
false
}

fn default_maximum_fork_difference() -> u64 {
1440
}

fn default_minimum_block_height() -> u64 {
500000
}

pub fn load_cfg(config: &str) -> Cfg {
let cfg_str = fs::read_to_string(config).expect("failed to open config");
let cfg: Cfg = serde_yaml::from_str(&cfg_str).expect("failed to parse config");
Expand Down
12 changes: 8 additions & 4 deletions src/logger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ pub fn init_logger(cfg: &Cfg) -> log4rs::Handle {
Appender::builder()
.filter(Box::new(ThresholdFilter::new(level_console)))
.build("stdout", Box::new(stdout)),
).build(Root::builder().appender("stdout").build(LevelFilter::Info))
)
.build(Root::builder().appender("stdout").build(LevelFilter::Info))
.unwrap()
} else {
let logfile = RollingFileAppender::builder()
Expand All @@ -69,16 +70,19 @@ pub fn init_logger(cfg: &Cfg) -> log4rs::Handle {
Appender::builder()
.filter(Box::new(ThresholdFilter::new(level_console)))
.build("stdout", Box::new(stdout)),
).appender(
)
.appender(
Appender::builder()
.filter(Box::new(ThresholdFilter::new(level_logfile)))
.build("logfile", Box::new(logfile)),
).build(
)
.build(
Root::builder()
.appender("stdout")
.appender("logfile")
.build(LevelFilter::Trace),
).unwrap()
)
.unwrap()
};
log4rs::init_config(config).unwrap()
}
Expand Down
40 changes: 32 additions & 8 deletions src/miner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,15 @@ pub struct Miner {
request_handler: RequestHandler,
rx_nonce_data: mpsc::Receiver<NonceData>,
target_deadline: u64,
account_id_to_target_deadline : HashMap<u64,u64>,
account_id_to_target_deadline: HashMap<u64, u64>,
state: Arc<Mutex<State>>,
reader_task_count: usize,
get_mining_info_interval: u64,
core: Core,
wakeup_after: i64,
multi_chain: bool,
maximum_fork_difference: u64,
minimum_block_height: u64,
}

pub struct State {
Expand Down Expand Up @@ -289,7 +292,7 @@ impl Miner {
cfg.send_proxy_details,
),
state: Arc::new(Mutex::new(State {
generation_signature : "".to_owned(),
generation_signature: "".to_owned(),
height: 0,
account_id_to_best_deadline: HashMap::new(),
base_target: 1,
Expand All @@ -300,6 +303,9 @@ impl Miner {
get_mining_info_interval: cfg.get_mining_info_interval,
core,
wakeup_after: cfg.hdd_wakeup_after * 1000, // ms -> s
multi_chain: cfg.multi_chain,
maximum_fork_difference: cfg.maximum_fork_difference,
minimum_block_height: cfg.minimum_block_height,
}
}

Expand All @@ -315,18 +321,29 @@ impl Miner {
// there might be a way to solve this without two nested moves
let get_mining_info_interval = self.get_mining_info_interval;
let wakeup_after = self.wakeup_after;
let multi_chain = self.multi_chain;
let maximum_fork_difference = self.maximum_fork_difference;
let minimum_block_height = self.minimum_block_height;
handle.spawn(
Interval::new(
Instant::now(),
Duration::from_millis(get_mining_info_interval),
).for_each(move |_| {
)
.for_each(move |_| {
let state = state.clone();
let reader = reader.clone();
request_handler.get_mining_info().then(move |mining_info| {
match mining_info {
Ok(mining_info) => {
let mut state = state.lock().unwrap();
if mining_info.generation_signature != state.generation_signature {
let new_block =
mining_info.generation_signature != state.generation_signature;
if (new_block && multi_chain)
|| (new_block
&& (state.height == 0 || mining_info.height
> (state.height - maximum_fork_difference))
&& mining_info.height > minimum_block_height)
{
for best_deadlines in state.account_id_to_best_deadline.values_mut()
{
*best_deadlines = u64::MAX;
Expand Down Expand Up @@ -368,7 +385,8 @@ impl Miner {
}
future::ok(())
})
}).map_err(|e| panic!("interval errored: err={:?}", e)),
})
.map_err(|e| panic!("interval errored: err={:?}", e)),
);

let target_deadline = self.target_deadline;
Expand All @@ -386,7 +404,12 @@ impl Miner {
.account_id_to_best_deadline
.get(&nonce_data.account_id)
.unwrap_or(&u64::MAX);
if best_deadline > deadline && deadline < *(account_id_to_target_deadline.get(&nonce_data.account_id).unwrap_or(&target_deadline)) {
if best_deadline > deadline
&& deadline
< *(account_id_to_target_deadline
.get(&nonce_data.account_id)
.unwrap_or(&target_deadline))
{
state
.account_id_to_best_deadline
.insert(nonce_data.account_id, deadline);
Expand All @@ -400,7 +423,7 @@ impl Miner {
0,
);
/* tradeoff between non-verbosity and information: stopped informing about
found deadlines, but reporting accepted deadlines instead.
found deadlines, but reporting accepted deadlines instead.
info!(
"deadline captured: account={}, nonce={}, deadline={}",
nonce_data.account_id, nonce_data.nonce, deadline
Expand All @@ -425,7 +448,8 @@ impl Miner {
}
}
Ok(())
}).map_err(|e| panic!("interval errored: err={:?}", e)),
})
.map_err(|e| panic!("interval errored: err={:?}", e)),
);

self.core.run(future::empty::<(), ()>()).unwrap();
Expand Down
49 changes: 34 additions & 15 deletions src/ocl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,8 @@ impl GpuBuffer {
core::MEM_READ_WRITE,
context.gdim1[0],
None,
).unwrap()
)
.unwrap()
};

let best_offset_gpu = unsafe {
Expand All @@ -183,7 +184,8 @@ impl GpuBuffer {
core::MEM_READ_ONLY | core::MEM_USE_HOST_PTR,
context.gdim1[0] * 64,
Some(&data),
).unwrap()
)
.unwrap()
};

GpuBuffer {
Expand Down Expand Up @@ -215,7 +217,8 @@ impl Buffer for GpuBuffer {
&(*locked_context).gdim1[0] * 64,
None::<Event>,
None::<&mut Event>,
).unwrap(),
)
.unwrap(),
));
}
}
Expand Down Expand Up @@ -260,7 +263,8 @@ impl GpuContext {
&CString::new("").unwrap(),
None,
None,
).unwrap();
)
.unwrap();
let queue = core::create_command_queue(&context, &device_id, None).unwrap();
let kernel1 = core::create_kernel(&program, "calculate_deadlines").unwrap();
let kernel2 = core::create_kernel(&program, "find_min").unwrap();
Expand Down Expand Up @@ -311,7 +315,8 @@ pub fn find_best_deadline_gpu(
&gensig,
None::<Event>,
None::<&mut Event>,
).unwrap();
)
.unwrap();
}

if gpu_context.mapping {
Expand All @@ -323,7 +328,8 @@ pub fn find_best_deadline_gpu(
&*temp2,
None::<Event>,
None::<&mut Event>,
).unwrap();
)
.unwrap();
} else {
unsafe {
core::enqueue_write_buffer(
Expand All @@ -334,7 +340,8 @@ pub fn find_best_deadline_gpu(
&data2,
None::<Event>,
None::<&mut Event>,
).unwrap();
)
.unwrap();
}
}

Expand All @@ -352,26 +359,35 @@ pub fn find_best_deadline_gpu(
Some(gpu_context.ldim1),
None::<Event>,
None::<&mut Event>,
).unwrap();
)
.unwrap();
}

core::set_kernel_arg(&gpu_context.kernel2, 0, ArgVal::mem(&buffer.deadlines_gpu)).unwrap();
core::set_kernel_arg(&gpu_context.kernel2, 1, ArgVal::primitive(&(nonce_count as u64))).unwrap();
core::set_kernel_arg(
&gpu_context.kernel2,
1,
ArgVal::primitive(&(nonce_count as u64)),
)
.unwrap();
core::set_kernel_arg(
&gpu_context.kernel2,
2,
ArgVal::local::<u32>(&gpu_context.ldim2[0]),
).unwrap();
)
.unwrap();
core::set_kernel_arg(
&gpu_context.kernel2,
3,
ArgVal::mem(&buffer.best_offset_gpu),
).unwrap();
)
.unwrap();
core::set_kernel_arg(
&gpu_context.kernel2,
4,
ArgVal::mem(&buffer.best_deadline_gpu),
).unwrap();
)
.unwrap();

unsafe {
core::enqueue_kernel(
Expand All @@ -383,7 +399,8 @@ pub fn find_best_deadline_gpu(
Some(gpu_context.ldim2),
None::<Event>,
None::<&mut Event>,
).unwrap();
)
.unwrap();
}

let mut best_offset = vec![0u64; 1];
Expand All @@ -398,7 +415,8 @@ pub fn find_best_deadline_gpu(
&mut best_offset,
None::<Event>,
None::<&mut Event>,
).unwrap();
)
.unwrap();
}
unsafe {
core::enqueue_read_buffer(
Expand All @@ -409,7 +427,8 @@ pub fn find_best_deadline_gpu(
&mut best_deadline,
None::<Event>,
None::<&mut Event>,
).unwrap();
)
.unwrap();
}

(best_deadline[0], best_offset[0])
Expand Down
Loading

0 comments on commit 03182b1

Please sign in to comment.