1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465 |
- """Downloads all domain data sets from FAOSTAT website."""
- from faostat_data_primap.download import (
- download_all_domains,
- )
- # def download_all_domains(
- # domains: list[tuple[str]] = domains,
- # downloaded_data_path: str = downloaded_data_path,
- # ) -> list[str]:
- # """
- # Download and unpack all climate-related domains from the FAO stat website.
- #
- # Extract the date when the data set was last updated and create a directory
- # with the same name. Download the zip files for each domain if
- # it does not already exist. Unpack the zip file and save in
- # the same directory.
- #
- # Parameters
- # ----------
- # sources
- # Name of data set, url to domain overview,
- # and download url
- #
- # Returns
- # -------
- # List of input files that have been fetched or found locally.
- #
- # """
- # downloaded_files = []
- # for ds_name, urls in domains.items():
- # url = urls["url_domain"]
- # url_download = urls["url_download"]
- # url_methodology = urls["url_methodology"]
- #
- # soup = get_html_content(url)
- #
- # last_updated = get_last_updated_date(soup, url)
- #
- # if not downloaded_data_path.exists():
- # downloaded_data_path.mkdir()
- #
- # ds_path = downloaded_data_path / ds_name
- # if not ds_path.exists():
- # ds_path.mkdir()
- #
- # local_data_dir = ds_path / last_updated
- # if not local_data_dir.exists():
- # local_data_dir.mkdir()
- #
- # download_methodology(save_path=local_data_dir, url_download=url_methodology)
- #
- # local_filename = local_data_dir / f"{ds_name}.zip"
- #
- # download_file(url_download=url_download, save_path=local_filename)
- #
- # downloaded_files.append(str(local_filename))
- #
- # unzip_file(local_filename)
- #
- # return downloaded_files
- if __name__ == "__main__":
- download_all_domains()
|