···
use bincode::Options as BincodeOptions;
use links::CollectedLink;
use metrics::{counter, describe_counter, describe_histogram, histogram, Unit};
7
+
use ratelimit::Ratelimiter;
8
+
use rocksdb::backup::{BackupEngine, BackupEngineOptions};
AsColumnFamilyRef, ColumnFamilyDescriptor, DBWithThreadMode, IteratorMode, MergeOperands,
MultiThreaded, Options, PrefixRange, ReadOptions, WriteBatch,
···
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
15
-
use std::path::Path;
17
+
use std::path::{Path, PathBuf};
atomic::{AtomicU64, Ordering},
20
-
use std::time::Instant;
23
+
use std::time::{Duration, Instant};
24
+
use tokio_util::sync::CancellationToken;
static DID_IDS_CF: &str = "did_ids";
static TARGET_IDS_CF: &str = "target_ids";
···
did_id_table: IdTable<Did, DidIdValue, true>,
target_id_table: IdTable<TargetKey, TargetId, false>,
62
+
backup_task: Arc<Option<thread::JoinHandle<Result<()>>>>,
trait IdTableValue: ValueFromRocks + Clone {
···
297
+
backup_task: None.into(),
295
-
pub fn start_backup(&self, path: impl AsRef<Path>) -> Result<()> {
296
-
use rocksdb::backup::{BackupEngine, BackupEngineOptions};
297
-
eprintln!("getting ready to start backup...");
299
-
BackupEngine::open(&BackupEngineOptions::new(path)?, &rocksdb::Env::new()?)?;
300
-
std::thread::spawn({
301
-
let db = self.db.clone();
303
-
eprintln!("backup starting.");
304
-
let t0 = Instant::now();
305
-
if let Err(e) = engine.create_new_backup(&db) {
306
-
eprintln!("oh no, backup failed: {e:?}");
301
+
pub fn start_backup(
304
+
auto: Option<(u64, Option<usize>)>,
305
+
stay_alive: CancellationToken,
307
+
let task = if let Some((interval_hrs, copies)) = auto {
308
+
eprintln!("backups: starting background task...");
309
+
self.backup_task(path, interval_hrs, copies, stay_alive)
311
+
eprintln!("backups: starting a one-off backup...");
313
+
let db = self.db.clone();
314
+
move || Self::do_backup(db, path)
317
+
self.backup_task = Arc::new(Some(task));
325
+
copies: Option<usize>,
326
+
stay_alive: CancellationToken,
327
+
) -> std::thread::JoinHandle<Result<()>> {
328
+
let db = self.db.clone();
329
+
thread::spawn(move || {
331
+
Ratelimiter::builder(1, Duration::from_secs(interval_hrs * 60 * 60)).build()?;
332
+
let minimum_sleep = Duration::from_secs(1);
335
+
if let Err(sleep) = limit.try_wait() {
336
+
eprintln!("backups: background: next backup scheduled in {sleep:?}");
337
+
let waiting = Instant::now();
339
+
let remaining = sleep - waiting.elapsed();
340
+
if stay_alive.is_cancelled() {
342
+
} else if remaining <= Duration::ZERO {
344
+
} else if remaining < minimum_sleep {
345
+
thread::sleep(remaining);
348
+
thread::sleep(minimum_sleep);
352
+
eprintln!("backups: background: starting backup...");
353
+
if let Err(e) = Self::do_backup(db.clone(), &path) {
354
+
eprintln!("backups: background: backup failed: {e:?}");
308
-
eprintln!("yay, backup worked?");
357
+
eprintln!("backups: background: backup succeeded yay");
359
+
if let Some(copies) = copies {
360
+
eprintln!("backups: background: trimming to {copies} saved backups...");
361
+
if let Err(e) = Self::trim_backups(copies, &path) {
362
+
eprintln!("backups: background: failed to trim backups: {e:?}");
364
+
eprintln!("backups: background: trimming worked!")
310
-
eprintln!("backup finished after {:.2}s", t0.elapsed().as_secs_f32());
313
-
eprintln!("backups should be happening in bg thread.");
373
+
fn do_backup(db: Arc<DBWithThreadMode<MultiThreaded>>, path: impl AsRef<Path>) -> Result<()> {
375
+
BackupEngine::open(&BackupEngineOptions::new(path)?, &rocksdb::Env::new()?)?;
376
+
eprintln!("backups: starting backup...");
377
+
let t0 = Instant::now();
378
+
if let Err(e) = engine.create_new_backup(&db) {
379
+
eprintln!("backups: oh no, backup failed: {e:?}");
381
+
eprintln!("backups: yay, backup worked?");
384
+
"backups: backup finished after {:.2}s",
385
+
t0.elapsed().as_secs_f32()
390
+
fn trim_backups(num_backups_to_keep: usize, path: impl AsRef<Path>) -> Result<()> {
392
+
BackupEngine::open(&BackupEngineOptions::new(path)?, &rocksdb::Env::new()?)?;
393
+
engine.purge_old_backups(num_backups_to_keep)?;
···
eprintln!("rocks: flushing memtables failed: {e:?}");
730
+
match Arc::get_mut(&mut self.backup_task) {
731
+
Some(maybe_task) => {
732
+
if let Some(task) = maybe_task.take() {
733
+
eprintln!("waiting for backup task to complete...");
734
+
if let Err(e) = task.join() {
735
+
eprintln!("failed to join backup task: {e:?}");
739
+
None => eprintln!("rocks: failed to get backup task, likely a bug."),
self.db.cancel_all_background_work(true);