Skip to content

Commit

Permalink
ft: separated Model class into subclasses TimeIndependent and TimeDep…
Browse files Browse the repository at this point in the history
…endent, with corresponding methods related to their execution nature.

tests: adapted test to new Model subclasses
sty: formatted code to black -l 96, flake8 and pydocstring
ft: created a ModelFactory class, which should handle the type of model instantiation.
refac: changed TimeIndependentModel arg use_db to store_db
rep: removed obsolete files
build: homogenized dev requirements in setup.cfg and requirements_dev (used for tox)
examples: removed requirement version of case_f/pymock
dep: added new developer dependencies or linting/testing
  • Loading branch information
pabloitu committed Jul 28, 2024
1 parent 5795d21 commit 84f8e71
Show file tree
Hide file tree
Showing 23 changed files with 2,075 additions and 8,267 deletions.
6 changes: 3 additions & 3 deletions examples/case_e/models.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
- ALM:
path: models/gulia-wiemer.ALM.italy.10yr.2010-01-01.xml
forecast_unit: 10
use_db: True
store_db: True
- MPS04:
path: models/meletti.MPS04.italy.10yr.2010-01-01.xml
forecast_unit: 10
use_db: True
store_db: True
- TripleS-CPTI:
path: models/zechar.TripleS-CPTI.italy.10yr.2010-01-01.xml
forecast_unit: 10
use_db: True
store_db: True
4 changes: 2 additions & 2 deletions examples/case_f/pymock/setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ url = https://git.gfz-potsdam.de/csep/it_experiment/models/pymock
packages =
pymock
install_requires =
numpy==1.23.4
matplotlib==3.4.3
numpy
matplotlib
python_requires = >=3.7
zip_safe = no

Expand Down
2 changes: 1 addition & 1 deletion floatcsep/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@
from floatcsep import model
from floatcsep import readers

__version__ = '0.1.3'
__version__ = "0.1.4"
182 changes: 100 additions & 82 deletions floatcsep/accessors.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,25 +14,32 @@
TIMEOUT = 180


def query_gcmt(start_time, end_time, min_magnitude=5.0,
max_depth=None,
catalog_id=None,
min_latitude=None, max_latitude=None,
min_longitude=None, max_longitude=None):

eventlist = _query_gcmt(start_time=start_time,
end_time=end_time,
min_magnitude=min_magnitude,
min_latitude=min_latitude,
max_latitude=max_latitude,
min_longitude=min_longitude,
max_longitude=max_longitude,
max_depth=max_depth)

catalog = CSEPCatalog(data=eventlist,
name='gCMT',
catalog_id=catalog_id,
date_accessed=utc_now_datetime())
def query_gcmt(
start_time,
end_time,
min_magnitude=5.0,
max_depth=None,
catalog_id=None,
min_latitude=None,
max_latitude=None,
min_longitude=None,
max_longitude=None,
):

eventlist = _query_gcmt(
start_time=start_time,
end_time=end_time,
min_magnitude=min_magnitude,
min_latitude=min_latitude,
max_latitude=max_latitude,
min_longitude=min_longitude,
max_longitude=max_longitude,
max_depth=max_depth,
)

catalog = CSEPCatalog(
data=eventlist, name="gCMT", catalog_id=catalog_id, date_accessed=utc_now_datetime()
)
return catalog


Expand All @@ -51,25 +58,23 @@ def from_zenodo(record_id, folder, force=False):
"""
# Grab the urls and filenames and checksums
r = requests.get(f"https://zenodo.org/api/records/{record_id}")
download_urls = [f['links']['self'] for f in r.json()['files']]
filenames = [(f['key'], f['checksum']) for f in r.json()['files']]
r = requests.get(f"https://zenodo.org/api/records/{record_id}", timeout=3)
download_urls = [f["links"]["self"] for f in r.json()["files"]]
filenames = [(f["key"], f["checksum"]) for f in r.json()["files"]]

# Download and verify checksums
for (fname, checksum), url in zip(filenames, download_urls):
full_path = os.path.join(folder, fname)
if os.path.exists(full_path):
value, digest = _check_hash(full_path, checksum)
if value != digest:
print(
f"Checksum is different: re-downloading {fname}"
f" from Zenodo...")
print(f"Checksum is different: re-downloading {fname}" f" from Zenodo...")
_download_file(url, full_path)
elif force:
print(f"Re-downloading {fname} from Zenodo...")
_download_file(url, full_path)
else:
print(f'Found file {fname}. Checksum OK.')
print(f"Found file {fname}. Checksum OK.")

else:
print(f"Downloading {fname} from Zenodo...")
Expand All @@ -96,24 +101,31 @@ def from_git(url, path, branch=None, depth=1, **kwargs):
the pygit repository
"""

kwargs.update({'depth': depth})
kwargs.update({"depth": depth})
git.refresh()

try:
repo = git.Repo(path)
except (git.NoSuchPathError, git.InvalidGitRepositoryError):
repo = git.Repo.clone_from(url, path, branch=branch, **kwargs)
git_dir = os.path.join(path, '.git')
git_dir = os.path.join(path, ".git")
if os.path.isdir(git_dir):
shutil.rmtree(git_dir)

return repo


def _query_gcmt(start_time, end_time, min_magnitude=3.50,
min_latitude=None, max_latitude=None,
min_longitude=None, max_longitude=None,
max_depth=1000, extra_gcmt_params=None):
def _query_gcmt(
start_time,
end_time,
min_magnitude=3.50,
min_latitude=None,
max_latitude=None,
min_longitude=None,
max_longitude=None,
max_depth=1000,
extra_gcmt_params=None,
):
"""
Return GCMT eventlist from IRIS web service.
For details see "https://service.iris.edu/fdsnws/event/1/"
Expand All @@ -134,38 +146,44 @@ def _query_gcmt(start_time, end_time, min_magnitude=3.50,
"""
extra_gcmt_params = extra_gcmt_params or {}

eventlist = gcmt_search(minmagnitude=min_magnitude,
minlatitude=min_latitude,
maxlatitude=max_latitude,
minlongitude=min_longitude,
maxlongitude=max_longitude,
starttime=start_time.isoformat(),
endtime=end_time.isoformat(),
maxdepth=max_depth, **extra_gcmt_params)
eventlist = gcmt_search(
minmagnitude=min_magnitude,
minlatitude=min_latitude,
maxlatitude=max_latitude,
minlongitude=min_longitude,
maxlongitude=max_longitude,
starttime=start_time.isoformat(),
endtime=end_time.isoformat(),
maxdepth=max_depth,
**extra_gcmt_params,
)

return eventlist

def gcmt_search(format='text',
starttime=None,
endtime=None,
updatedafter=None,
minlatitude=None,
maxlatitude=None,
minlongitude=None,
maxlongitude=None,
latitude=None,
longitude=None,
maxradius=None,
catalog='GCMT',
contributor=None,
maxdepth=1000,
maxmagnitude=10.0,
mindepth=-100,
minmagnitude=0,
offset=1,
orderby='time-asc',
host=None,
verbose=False):

def gcmt_search(
format="text",
starttime=None,
endtime=None,
updatedafter=None,
minlatitude=None,
maxlatitude=None,
minlongitude=None,
maxlongitude=None,
latitude=None,
longitude=None,
maxradius=None,
catalog="GCMT",
contributor=None,
maxdepth=1000,
maxmagnitude=10.0,
mindepth=-100,
minmagnitude=0,
offset=1,
orderby="time-asc",
host=None,
verbose=False,
):
"""Search the IRIS database for events matching input criteria.
This search function is a wrapper around the ComCat Web API described here:
https://service.iris.edu/fdsnws/event/1/
Expand Down Expand Up @@ -225,16 +243,16 @@ def gcmt_search(format='text',

for key, value in inputargs.items():
if value is True:
newargs[key] = 'true'
newargs[key] = "true"
continue
if value is False:
newargs[key] = 'false'
newargs[key] = "false"
continue
if value is None:
continue
newargs[key] = value

del newargs['verbose']
del newargs["verbose"]

events = _search_gcmt(**newargs)

Expand All @@ -249,11 +267,11 @@ def _search_gcmt(**_newargs):
paramstr = urlencode(_newargs)
url = HOST_CATALOG + paramstr
fh = request.urlopen(url, timeout=TIMEOUT)
data = fh.read().decode('utf8').split('\n')
data = fh.read().decode("utf8").split("\n")
fh.close()
eventlist = []
for line in data[1:]:
line_ = line.split('|')
line_ = line.split("|")
if len(line_) != 1:
id_ = line_[0]
time_ = datetime.fromisoformat(line_[1])
Expand All @@ -280,47 +298,47 @@ def _download_file(url: str, filename: str) -> None:
progress_bar_length = 72
block_size = 1024

r = requests.get(url, stream=True)
total_size = r.headers.get('content-length', False)
r = requests.get(url, timeout=3, stream=True)
total_size = r.headers.get("content-length", False)
if not total_size:
with requests.head(url) as h:
try:
total_size = int(h.headers.get('Content-Length', 0))
total_size = int(h.headers.get("Content-Length", 0))
except TypeError:
total_size = 0
else:
total_size = int(total_size)
download_size = 0
if total_size:
print(
f'Downloading file with size of {total_size / block_size:.3f} kB')
print(f"Downloading file with size of {total_size / block_size:.3f} kB")
else:
print(f'Downloading file with unknown size')
with open(filename, 'wb') as f:
print(f"Downloading file with unknown size")
with open(filename, "wb") as f:
for data in r.iter_content(chunk_size=block_size):
download_size += len(data)
f.write(data)
if total_size:
progress = int(
progress_bar_length * download_size / total_size)
progress = int(progress_bar_length * download_size / total_size)
sys.stdout.write(
'\r[{}{}] {:.1f}%'.format('█' * progress, '.' *
(progress_bar_length - progress),
100 * download_size / total_size)
"\r[{}{}] {:.1f}%".format(
"█" * progress,
"." * (progress_bar_length - progress),
100 * download_size / total_size,
)
)
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.write("\n")


def _check_hash(filename, checksum):
"""
Checks if existing file hash matches checksum from url
"""
algorithm, value = checksum.split(':')
algorithm, value = checksum.split(":")
if not os.path.exists(filename):
return value, 'invalid'
return value, "invalid"
h = hashlib.new(algorithm)
with open(filename, 'rb') as f:
with open(filename, "rb") as f:
while True:
data = f.read(4096)
if not data:
Expand Down
Loading

0 comments on commit 84f8e71

Please sign in to comment.