added parallelized scraping instances for company yahoo ticker seeding

This commit is contained in:
2025-12-18 13:05:23 +01:00
parent d26e833d93
commit 9c66f0d361
7 changed files with 842 additions and 68 deletions

View File

@@ -14,11 +14,27 @@ use util::directories::DataPaths;
use util::{logger, opnv};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::process::Command;
#[tokio::main]
async fn main() -> Result<()> {
let output = if cfg!(target_os = "windows") {
Command::new("cmd")
.args(["/C", "docker desktop start"])
.output()
.expect("failed to execute process")
} else {
Command::new("sh")
.arg("-c")
.arg("echo hello")
.output()
.expect("failed to execute process")
};
let _start_docker_desktop = output.stdout;
cleanup_all_proxy_containers().await.ok();
let config = Config::load().map_err(|err| {
eprintln!("Failed to load config: {}", err);
err
@@ -40,7 +56,7 @@ async fn main() -> Result<()> {
// === Step 1: Fetch VPNBook configs ===
let proxy_pool: Option<Arc<DockerVpnProxyPool>> = if config.enable_vpn_rotation {
logger::log_info("VPN Rotation Enabled Fetching latest VPNBook configs").await;
let temp_pool = Arc::new(ChromeDriverPool::new_with_proxy_and_task_limit(config.max_parallel_instances, None, config.max_tasks_per_instance).await?);
let temp_pool = Arc::new(ChromeDriverPool::new_with_proxy_and_task_limit(config.max_parallel_instances, None, 1).await?);
let (username, password, _files) = opnv::fetch_vpnbook_configs(&temp_pool, paths.cache_dir()).await?;
logger::log_info(&format!("VPNBook credentials → User: {}", username)).await;