File size: 1,297 Bytes
9c35031
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import concurrent.futures
import requests
import gzip
import os
import sys

def download_file(url, path):
    url = "https://data.commoncrawl.org/" + url
    file_name = url.split('/')[-1] 
    file_path = os.path.join(path, file_name)

    with requests.get(url, stream=True) as response:
        with open(file_path, "wb") as file:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    file.write(chunk)
    return f"Downloaded {file_path}"

def main(file_path, num_workers, directory):
    with gzip.open(file_path, 'rt', encoding='utf-8') as file:
        urls = file.read().splitlines()

    with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
        future_to_url = {executor.submit(download_file, url, directory): url for url in urls}
        for future in concurrent.futures.as_completed(future_to_url):
            url = future_to_url[future]
            try:
                result = future.result()
                print(result)
            except Exception as e:
                print(f"Failed to download {url}: {e}")

if __name__ == "__main__":
    gzipped_file_path = sys.argv[1]
    num_workers = int(sys.argv[2])
    directory = sys.argv[3]
    main(gzipped_file_path, num_workers, directory)