|
import concurrent.futures |
|
import requests |
|
import gzip |
|
import os |
|
import sys |
|
|
|
def download_file(url, path): |
|
url = "https://data.commoncrawl.org/" + url |
|
file_name = url.split('/')[-1] |
|
file_path = os.path.join(path, file_name) |
|
|
|
with requests.get(url, stream=True) as response: |
|
with open(file_path, "wb") as file: |
|
for chunk in response.iter_content(chunk_size=8192): |
|
if chunk: |
|
file.write(chunk) |
|
return f"Downloaded {file_path}" |
|
|
|
def main(file_path, num_workers, directory): |
|
with gzip.open(file_path, 'rt', encoding='utf-8') as file: |
|
urls = file.read().splitlines() |
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor: |
|
future_to_url = {executor.submit(download_file, url, directory): url for url in urls} |
|
for future in concurrent.futures.as_completed(future_to_url): |
|
url = future_to_url[future] |
|
try: |
|
result = future.result() |
|
print(result) |
|
except Exception as e: |
|
print(f"Failed to download {url}: {e}") |
|
|
|
if __name__ == "__main__": |
|
gzipped_file_path = sys.argv[1] |
|
num_workers = int(sys.argv[2]) |
|
directory = sys.argv[3] |
|
main(gzipped_file_path, num_workers, directory) |
|
|