added hard reset for navigation timeout after 3 hours
This commit is contained in:
@@ -78,7 +78,7 @@ pub async fn run_full_update(
|
||||
}
|
||||
|
||||
logger::log_info("Step 5: Building companies.jsonl with parallel processing and validation...").await;
|
||||
let count = build_companies_jsonl_streaming_parallel(&paths, pool, shutdown_flag).await?;
|
||||
let count = build_companies_jsonl_streaming_parallel(&paths, pool, shutdown_flag, _config, &None).await?;
|
||||
logger::log_info(&format!(" ✓ Saved {} companies", count)).await;
|
||||
|
||||
if !shutdown_flag.load(Ordering::SeqCst) {
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
// src/corporate/update_parallel.rs - UPDATED WITH DATA INTEGRITY FIXES
|
||||
// PARALLELIZED VERSION with atomic commits and validation
|
||||
// src/corporate/update_parallel.rs - FIXED: Proper Hard Reset Implementation
|
||||
//
|
||||
// Key improvements over original:
|
||||
// - Page validation to prevent stale content extraction
|
||||
// - Shutdown-aware task processing
|
||||
// - Better error recovery with browser state cleanup
|
||||
// - All original fsync and checkpoint logic preserved
|
||||
// Critical fixes:
|
||||
// 1. Hard reset actually performed (no premature break)
|
||||
// 2. Error counter reset after hard reset
|
||||
// 3. Per-ISIN status tracking (not per-company)
|
||||
// 4. Proper task draining before reset
|
||||
// 5. Queue rebuilding after reset
|
||||
|
||||
use super::{types::*, yahoo::*, helpers::*};
|
||||
use crate::util::directories::DataPaths;
|
||||
use crate::util::logger;
|
||||
use crate::scraper::webdriver::ChromeDriverPool;
|
||||
use crate::scraper::hard_reset::perform_hard_reset;
|
||||
use crate::config::Config;
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
@@ -36,17 +38,13 @@ struct CompanyProcessResult {
|
||||
is_update: bool,
|
||||
}
|
||||
|
||||
/// UPDATED: Abort-safe incremental JSONL persistence with validation
|
||||
///
|
||||
/// New safety features:
|
||||
/// - Page validation before extraction
|
||||
/// - Shutdown checks at all critical points
|
||||
/// - Browser state cleanup on errors
|
||||
/// - All writes still atomic with fsync
|
||||
/// Abort-safe incremental JSONL persistence with proper hard reset handling
|
||||
pub async fn build_companies_jsonl_streaming_parallel(
|
||||
paths: &DataPaths,
|
||||
pool: &Arc<ChromeDriverPool>,
|
||||
shutdown_flag: &Arc<AtomicBool>,
|
||||
config: &Config,
|
||||
monitoring: &Option<crate::monitoring::MonitoringHandle>,
|
||||
) -> anyhow::Result<usize> {
|
||||
// Configuration constants
|
||||
const CHECKPOINT_INTERVAL: usize = 50;
|
||||
@@ -54,9 +52,19 @@ pub async fn build_companies_jsonl_streaming_parallel(
|
||||
const FSYNC_INTERVAL_SECS: u64 = 10;
|
||||
const CONCURRENCY_LIMIT: usize = 100;
|
||||
|
||||
// Create hard reset controller
|
||||
let reset_controller = pool.get_reset_controller();
|
||||
|
||||
// Wrap pool in mutex for potential replacement
|
||||
let pool_mutex = Arc::new(tokio::sync::Mutex::new(Arc::clone(pool)));
|
||||
|
||||
// Synchronization for hard reset
|
||||
let reset_in_progress = Arc::new(tokio::sync::Mutex::new(false));
|
||||
|
||||
let path = DataPaths::new(".")?;
|
||||
let corporate_path = path.data_dir().join("corporate").join("by_name");
|
||||
let securities_path = corporate_path.join("common_stocks.json");
|
||||
let securities_path_cloned = securities_path.clone();
|
||||
|
||||
if !securities_path.exists() {
|
||||
logger::log_warn("No common_stocks.json found").await;
|
||||
@@ -137,9 +145,9 @@ pub async fn build_companies_jsonl_streaming_parallel(
|
||||
let companies_path_clone = companies_path.clone();
|
||||
let log_path_clone = log_path.clone();
|
||||
let existing_companies_writer = Arc::new(tokio::sync::Mutex::new(existing_companies.clone()));
|
||||
let existing_companies_writer_clone = Arc::clone(&existing_companies_writer);
|
||||
|
||||
let write_tx_for_writer = write_tx.clone();
|
||||
|
||||
let writer_task = tokio::spawn(async move {
|
||||
let mut log_file = log_file_init;
|
||||
let mut writes_since_fsync = 0;
|
||||
@@ -278,112 +286,299 @@ pub async fn build_companies_jsonl_streaming_parallel(
|
||||
(count, new_count, updated_count)
|
||||
});
|
||||
|
||||
// === PARALLEL PROCESSING PHASE ===
|
||||
logger::log_info(&format!(
|
||||
"Starting parallel processing of {} companies (concurrency limit: {})",
|
||||
securities.len(),
|
||||
CONCURRENCY_LIMIT
|
||||
)).await;
|
||||
|
||||
let mut processing_tasks = FuturesUnordered::new();
|
||||
let mut processed = 0;
|
||||
// === MAIN PROCESSING LOOP ===
|
||||
let total = securities.len();
|
||||
logger::log_info(&format!("Processing {} companies with concurrency limit {}", total, CONCURRENCY_LIMIT)).await;
|
||||
|
||||
for (name, company_info) in securities.into_iter() {
|
||||
// Check shutdown before creating new tasks
|
||||
if shutdown_flag.load(Ordering::SeqCst) {
|
||||
logger::log_warn("Shutdown detected, stopping task creation").await;
|
||||
break;
|
||||
}
|
||||
|
||||
// Wait if we hit concurrency limit
|
||||
while processing_tasks.len() >= CONCURRENCY_LIMIT {
|
||||
if let Some(result) = processing_tasks.next().await {
|
||||
match result {
|
||||
Ok(Ok(Some(company_result))) => {
|
||||
let company_result: CompanyProcessResult = company_result;
|
||||
let _ = write_tx_for_writer.send(LogCommand::Write(company_result.company)).await?;
|
||||
processed += 1;
|
||||
}
|
||||
Ok(Ok(None)) => {
|
||||
processed += 1;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
logger::log_warn(&format!("Company processing error: {}", e)).await;
|
||||
processed += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
logger::log_error(&format!("Task panic: {}", e)).await;
|
||||
processed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut tasks = FuturesUnordered::new();
|
||||
let mut pending = securities.into_iter().collect::<Vec<_>>();
|
||||
let mut processed = 0;
|
||||
let mut hard_reset_count = 0;
|
||||
|
||||
// Spawn initial batch
|
||||
for _ in 0..CONCURRENCY_LIMIT.min(pending.len()) {
|
||||
if let Some((name, company_info)) = pending.pop() {
|
||||
let current_pool = {
|
||||
let pool_guard = pool_mutex.lock().await;
|
||||
Arc::clone(&*pool_guard)
|
||||
};
|
||||
|
||||
if shutdown_flag.load(Ordering::SeqCst) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if shutdown_flag.load(Ordering::SeqCst) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Spawn new task
|
||||
let pool = pool.clone();
|
||||
let shutdown_flag = shutdown_flag.clone();
|
||||
let existing_entry = existing_companies.get(&name).cloned();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
process_single_company_validated(
|
||||
name,
|
||||
company_info,
|
||||
existing_entry,
|
||||
&pool,
|
||||
&shutdown_flag
|
||||
).await
|
||||
});
|
||||
|
||||
processing_tasks.push(task);
|
||||
|
||||
if processed % 10 == 0 && processed > 0 {
|
||||
logger::log_info(&format!("Progress: {}/{} companies processed", processed, total)).await;
|
||||
let existing = existing_companies.get(&name).cloned();
|
||||
let shutdown_flag_clone = Arc::clone(shutdown_flag);
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
process_single_company_validated(
|
||||
name,
|
||||
company_info,
|
||||
existing,
|
||||
¤t_pool,
|
||||
&shutdown_flag_clone,
|
||||
).await
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for remaining tasks
|
||||
logger::log_info(&format!(
|
||||
"Waiting for {} remaining tasks to complete...",
|
||||
processing_tasks.len()
|
||||
)).await;
|
||||
|
||||
while let Some(result) = processing_tasks.next().await {
|
||||
// Process results and spawn new tasks
|
||||
while let Some(task_result) = tasks.next().await {
|
||||
// Check for shutdown
|
||||
if shutdown_flag.load(Ordering::SeqCst) {
|
||||
logger::log_warn("Shutdown detected during final task wait").await;
|
||||
logger::log_warn("Shutdown signal received, stopping processing").await;
|
||||
break;
|
||||
}
|
||||
|
||||
match result {
|
||||
Ok(Ok(Some(company_result))) => {
|
||||
if write_tx_for_writer.send(LogCommand::Write(company_result.company)).await.is_err() {
|
||||
logger::log_error("Writer task died").await;
|
||||
break;
|
||||
}
|
||||
match task_result {
|
||||
Ok(Ok(Some(result))) => {
|
||||
// Success: send to writer
|
||||
let _ = write_tx_for_writer.send(LogCommand::Write(result.company)).await;
|
||||
processed += 1;
|
||||
|
||||
// Log progress every 100 companies
|
||||
if processed % 100 == 0 {
|
||||
logger::log_info(&format!(
|
||||
"Progress: {}/{} companies processed ({} resets)",
|
||||
processed,
|
||||
total,
|
||||
hard_reset_count
|
||||
)).await;
|
||||
}
|
||||
|
||||
// Spawn next task if available
|
||||
if let Some((name, company_info)) = pending.pop() {
|
||||
let current_pool = {
|
||||
let pool_guard = pool_mutex.lock().await;
|
||||
Arc::clone(&*pool_guard)
|
||||
};
|
||||
|
||||
let existing = existing_companies.get(&name).cloned();
|
||||
let shutdown_flag_clone = Arc::clone(shutdown_flag);
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
process_single_company_validated(
|
||||
name,
|
||||
company_info,
|
||||
existing,
|
||||
¤t_pool,
|
||||
&shutdown_flag_clone,
|
||||
).await
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
}
|
||||
Ok(Ok(None)) => {
|
||||
// No result (shutdown or skip)
|
||||
processed += 1;
|
||||
|
||||
if let Some((name, company_info)) = pending.pop() {
|
||||
let current_pool = {
|
||||
let pool_guard = pool_mutex.lock().await;
|
||||
Arc::clone(&*pool_guard)
|
||||
};
|
||||
|
||||
let existing = existing_companies.get(&name).cloned();
|
||||
let shutdown_flag_clone = Arc::clone(shutdown_flag);
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
process_single_company_validated(
|
||||
name,
|
||||
company_info,
|
||||
existing,
|
||||
¤t_pool,
|
||||
&shutdown_flag_clone,
|
||||
).await
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
logger::log_warn(&format!("Company processing error: {}", e)).await;
|
||||
processed += 1;
|
||||
let error_msg = e.to_string();
|
||||
|
||||
if error_msg.contains("HARD_RESET_REQUIRED") {
|
||||
// ✅ FIX: Don't break, perform actual hard reset
|
||||
|
||||
// Check if reset already in progress (race condition protection)
|
||||
let mut reset_lock = reset_in_progress.lock().await;
|
||||
if *reset_lock {
|
||||
logger::log_info("Hard reset already in progress, skipping duplicate").await;
|
||||
processed += 1;
|
||||
continue;
|
||||
}
|
||||
*reset_lock = true;
|
||||
drop(reset_lock); // Release lock during reset
|
||||
|
||||
logger::log_error("🔴 HARD RESET THRESHOLD REACHED - INITIATING RESET SEQUENCE").await;
|
||||
logger::log_warn("Draining active tasks before hard reset...").await;
|
||||
|
||||
// Save remaining pending count
|
||||
let remaining_count = pending.len();
|
||||
|
||||
// Stop spawning new tasks
|
||||
pending.clear();
|
||||
|
||||
// Wait for all active tasks to complete
|
||||
let mut drained = 0;
|
||||
while let Some(_) = tasks.next().await {
|
||||
drained += 1;
|
||||
if drained % 10 == 0 {
|
||||
logger::log_info(&format!("Drained {} tasks...", drained)).await;
|
||||
}
|
||||
}
|
||||
|
||||
logger::log_info(&format!(
|
||||
"All tasks drained ({} active). {} companies need reprocessing.",
|
||||
drained,
|
||||
remaining_count
|
||||
)).await;
|
||||
|
||||
// Perform the actual hard reset
|
||||
match perform_hard_reset(&pool_mutex, config, paths, monitoring, shutdown_flag).await {
|
||||
Ok(()) => {
|
||||
logger::log_info("✅ Hard reset completed successfully").await;
|
||||
hard_reset_count += 1;
|
||||
|
||||
// ✅ FIX: Reset the error counter
|
||||
{
|
||||
let pool_guard = pool_mutex.lock().await;
|
||||
let current_pool = Arc::clone(&*pool_guard);
|
||||
current_pool.get_reset_controller().reset();
|
||||
}
|
||||
logger::log_info("✓ Error counter cleared").await;
|
||||
|
||||
// ✅ FIX: Rebuild pending list from existing_companies
|
||||
// Only re-add companies that haven't been written yet
|
||||
let written_companies = {
|
||||
let companies = existing_companies_writer_clone.lock().await;
|
||||
companies.keys().cloned().collect::<std::collections::HashSet<_>>()
|
||||
};
|
||||
|
||||
// Create new pending list: all companies minus those already written
|
||||
let all_companies_list: Vec<(String, CompanyInfo)> = {
|
||||
// Need to reload securities since we cleared pending
|
||||
let content = tokio::fs::read_to_string(&securities_path_cloned).await?;
|
||||
let all_securities: HashMap<String, CompanyInfo> = serde_json::from_str(&content)?;
|
||||
all_securities.into_iter()
|
||||
.filter(|(name, _)| !written_companies.contains(name))
|
||||
.collect()
|
||||
};
|
||||
|
||||
pending = all_companies_list;
|
||||
|
||||
logger::log_info(&format!(
|
||||
"Restarting with {} remaining companies (out of {} total)",
|
||||
pending.len(),
|
||||
total
|
||||
)).await;
|
||||
|
||||
// Respawn initial batch with NEW pool
|
||||
for _ in 0..CONCURRENCY_LIMIT.min(pending.len()) {
|
||||
if let Some((name, company_info)) = pending.pop() {
|
||||
let current_pool = {
|
||||
let pool_guard = pool_mutex.lock().await;
|
||||
Arc::clone(&*pool_guard)
|
||||
};
|
||||
|
||||
let existing = existing_companies.get(&name).cloned();
|
||||
let shutdown_flag_clone = Arc::clone(shutdown_flag);
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
process_single_company_validated(
|
||||
name,
|
||||
company_info,
|
||||
existing,
|
||||
¤t_pool,
|
||||
&shutdown_flag_clone,
|
||||
).await
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
}
|
||||
|
||||
// Clear reset flag
|
||||
let mut reset_lock = reset_in_progress.lock().await;
|
||||
*reset_lock = false;
|
||||
drop(reset_lock);
|
||||
|
||||
// ✅ Continue processing (don't spawn duplicate task)
|
||||
continue;
|
||||
}
|
||||
Err(reset_err) => {
|
||||
logger::log_error(&format!("Hard reset failed: {}", reset_err)).await;
|
||||
|
||||
// Clear reset flag
|
||||
let mut reset_lock = reset_in_progress.lock().await;
|
||||
*reset_lock = false;
|
||||
drop(reset_lock);
|
||||
|
||||
// Exit if hard reset fails
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Regular error
|
||||
logger::log_warn(&format!("Company processing error: {}", error_msg)).await;
|
||||
processed += 1;
|
||||
|
||||
// Spawn next task
|
||||
if let Some((name, company_info)) = pending.pop() {
|
||||
let current_pool = {
|
||||
let pool_guard = pool_mutex.lock().await;
|
||||
Arc::clone(&*pool_guard)
|
||||
};
|
||||
|
||||
let existing = existing_companies.get(&name).cloned();
|
||||
let shutdown_flag_clone = Arc::clone(shutdown_flag);
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
process_single_company_validated(
|
||||
name,
|
||||
company_info,
|
||||
existing,
|
||||
¤t_pool,
|
||||
&shutdown_flag_clone,
|
||||
).await
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Task panic
|
||||
logger::log_error(&format!("Task panic: {}", e)).await;
|
||||
processed += 1;
|
||||
|
||||
// Spawn next task
|
||||
if let Some((name, company_info)) = pending.pop() {
|
||||
let current_pool = {
|
||||
let pool_guard = pool_mutex.lock().await;
|
||||
Arc::clone(&*pool_guard)
|
||||
};
|
||||
|
||||
let existing = existing_companies.get(&name).cloned();
|
||||
let shutdown_flag_clone = Arc::clone(shutdown_flag);
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
process_single_company_validated(
|
||||
name,
|
||||
company_info,
|
||||
existing,
|
||||
¤t_pool,
|
||||
&shutdown_flag_clone,
|
||||
).await
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger::log_info("Main processing loop completed").await;
|
||||
|
||||
// Signal writer to finish
|
||||
let _ = write_tx_for_writer.send(LogCommand::Checkpoint).await;
|
||||
let _ = write_tx_for_writer.send(LogCommand::Shutdown).await;
|
||||
@@ -394,8 +589,8 @@ pub async fn build_companies_jsonl_streaming_parallel(
|
||||
.unwrap_or((0, 0, 0));
|
||||
|
||||
logger::log_info(&format!(
|
||||
"Completed: {} total companies ({} new, {} updated)",
|
||||
final_count, final_new, final_updated
|
||||
"✅ Completed: {} total companies ({} new, {} updated, {} hard resets)",
|
||||
final_count, final_new, final_updated, hard_reset_count
|
||||
)).await;
|
||||
|
||||
Ok(final_count)
|
||||
@@ -415,10 +610,25 @@ async fn scrape_with_retry(
|
||||
if shutdown_flag.load(Ordering::SeqCst) {
|
||||
return Err(anyhow!("Aborted due to shutdown"));
|
||||
}
|
||||
|
||||
if pool.should_perform_hard_reset() {
|
||||
logger::log_error("HARD_RESET_REQUIRED detected before scrape attempt").await;
|
||||
return Err(anyhow!("HARD_RESET_REQUIRED"));
|
||||
}
|
||||
|
||||
match scrape_company_details_by_isin(pool, isin, shutdown_flag).await {
|
||||
Ok(result) => return Ok(result),
|
||||
Err(e) => {
|
||||
// Check if this is a hard reset required error
|
||||
let error_msg = e.to_string();
|
||||
if error_msg.contains("HARD_RESET_REQUIRED") {
|
||||
logger::log_error(&format!(
|
||||
"Hard reset required error for ISIN {}, propagating immediately",
|
||||
isin
|
||||
)).await;
|
||||
return Err(e); // Propagate immediately, don't retry
|
||||
}
|
||||
|
||||
if retries >= max_retries {
|
||||
logger::log_error(&format!(
|
||||
"All {} retries exhausted for ISIN {}: {}",
|
||||
@@ -443,7 +653,7 @@ async fn scrape_with_retry(
|
||||
}
|
||||
}
|
||||
|
||||
/// UPDATED: Process single company with validation and shutdown checks
|
||||
/// Process single company with validation and shutdown checks
|
||||
async fn process_single_company_validated(
|
||||
name: String,
|
||||
company_info: CompanyInfo,
|
||||
@@ -485,7 +695,7 @@ async fn process_single_company_validated(
|
||||
}
|
||||
}
|
||||
|
||||
// Process each ISIN with validation
|
||||
// ✅ FIX: Process each ISIN independently with per-ISIN status checking
|
||||
for (isin, figi_tickers) in unique_isin_ticker_pairs {
|
||||
// Check shutdown before each ISIN
|
||||
if shutdown_flag.load(Ordering::SeqCst) {
|
||||
@@ -506,9 +716,10 @@ async fn process_single_company_validated(
|
||||
}
|
||||
}
|
||||
|
||||
let has_yahoo_ticker = tickers.iter().any(|t| t.starts_with("YAHOO:"));
|
||||
// ✅ FIX: Check if THIS SPECIFIC ISIN has Yahoo data
|
||||
let has_yahoo_ticker_for_this_isin = tickers.iter().any(|t| t.starts_with("YAHOO:"));
|
||||
|
||||
if !has_yahoo_ticker {
|
||||
if !has_yahoo_ticker_for_this_isin {
|
||||
logger::log_info(&format!("Fetching Yahoo details for {} (ISIN: {})", name, isin)).await;
|
||||
|
||||
match scrape_with_retry(pool, &isin, 3, shutdown_flag).await {
|
||||
@@ -539,11 +750,24 @@ async fn process_single_company_validated(
|
||||
logger::log_warn(&format!("Shutdown during scrape for ISIN {}", isin)).await;
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if this is a hard reset required error
|
||||
let error_msg = e.to_string();
|
||||
if error_msg.contains("HARD_RESET_REQUIRED") {
|
||||
logger::log_error(&format!(
|
||||
"Hard reset required during ISIN {} processing, propagating error",
|
||||
isin
|
||||
)).await;
|
||||
return Err(e); // ← CRITICAL: Propagate immediately
|
||||
}
|
||||
|
||||
logger::log_warn(&format!(
|
||||
"✗ Yahoo lookup error for ISIN {} (company: {}): {}",
|
||||
isin, name, e
|
||||
)).await;
|
||||
// Continue with next ISIN
|
||||
|
||||
// ✅ FIX: Mark this ISIN as failed to enable retry
|
||||
tickers.push("YAHOO:ERROR".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -558,6 +782,11 @@ async fn process_single_company_validated(
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if pool.should_perform_hard_reset() {
|
||||
logger::log_error("HARD_RESET_REQUIRED detected during company processing").await;
|
||||
return Err(anyhow!("HARD_RESET_REQUIRED"));
|
||||
}
|
||||
|
||||
if !isin_tickers_map.is_empty() {
|
||||
let company_entry = CompanyCrossPlatformInfo {
|
||||
name: name.clone(),
|
||||
|
||||
@@ -74,6 +74,11 @@ pub async fn scrape_company_details_by_isin(
|
||||
logger::log_warn(&format!("Shutdown detected, skipping ISIN: {}", isin)).await;
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if pool.should_perform_hard_reset() {
|
||||
logger::log_warn("HARD_RESET_REQUIRED detected before starting ISIN scrape").await;
|
||||
return Err(anyhow!("HARD_RESET_REQUIRED"));
|
||||
}
|
||||
|
||||
let isin_owned = isin.to_string();
|
||||
let shutdown_clone = Arc::clone(shutdown_flag);
|
||||
|
||||
Reference in New Issue
Block a user