diff --git a/docs/releases/1.6.2.md b/docs/releases/1.6.2.md new file mode 100644 index 0000000..536d3f9 --- /dev/null +++ b/docs/releases/1.6.2.md @@ -0,0 +1,11 @@ +# Jetstream v1.6.2 Release Notes + +# Major changes + +- Fixed issue #131 +- Fixed parsing of account info when the cluster does not supply accounting info +- Fixed "RuntimeError: generator ignored GeneratorExit" exception handling + +# Dev Notes + +- Security issues resolved from dependabot diff --git a/jetstream/__init__.py b/jetstream/__init__.py index 43780b6..43cf3c1 100644 --- a/jetstream/__init__.py +++ b/jetstream/__init__.py @@ -8,7 +8,7 @@ __author__ = 'Ryan Richholt' __email__ = 'rrichholt@tgen.org' -__version__ = '1.6.1' +__version__ = '1.6.2' # Configure parallel library dependencies (Used by numpy) diff --git a/jetstream/backends/slurm.py b/jetstream/backends/slurm.py index 0f1aec6..e5003bd 100755 --- a/jetstream/backends/slurm.py +++ b/jetstream/backends/slurm.py @@ -446,11 +446,11 @@ def launch_sacct(*job_ids, delimiter=sacct_delimiter, raw=False): def parse_sacct(data, delimiter=sacct_delimiter, id_pattern=job_id_pattern): """Parse stdout from sacct to a dictionary of job ids and data.""" jobs = dict() - lines = iter(data.strip().splitlines()) - header = next(lines).strip().split(delimiter) + lines = iter(data.splitlines()) + header = next(lines).split(delimiter) for line in lines: - row = dict(zip(header, line.strip().split(delimiter))) + row = dict(zip(header, line.split(delimiter))) try: match = id_pattern.match(row['JobID']) diff --git a/jetstream/pipelines.py b/jetstream/pipelines.py index 7f00b2a..4110497 100644 --- a/jetstream/pipelines.py +++ b/jetstream/pipelines.py @@ -163,7 +163,7 @@ def find_pipelines(*dirs): p = Pipeline(path) log.debug(f'Found {p} at {path}') yield p - except : + except Exception: log.debug(f'Failed to load: {path}') yield from find_pipelines(path) diff --git a/jetstream/runner.py b/jetstream/runner.py index ad9e275..9def43e 100644 --- a/jetstream/runner.py +++ b/jetstream/runner.py @@ -237,8 +237,7 @@ def process_exec_directives(self, task): exec_directive = task.directives.get('exec') if exec_directive: - env = {'runner': self, 'task': task} - exec(exec_directive, None, env) + exec(exec_directive) self._workflow_graph = self.workflow.reload_graph() self._workflow_iterator = iter(self.workflow.graph) diff --git a/requirements.txt b/requirements.txt index a7879ec..cb77929 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,8 @@ confuse==1.0.0 decorator==4.4.0 filelock==3.0.12 -Jinja2==2.10.1 +Jinja2==2.11.3 MarkupSafe==1.1.1 networkx==2.3 -PyYAML==5.1.1 +PyYAML==5.4 ulid-py==0.0.9 \ No newline at end of file diff --git a/tests/templates/dependencies_3.jst b/tests/templates/dependencies_3.jst index 010b03e..b79931e 100644 --- a/tests/templates/dependencies_3.jst +++ b/tests/templates/dependencies_3.jst @@ -1,16 +1,16 @@ -# This template tests dynamic features with the exec directive. +# This template tests dynamic features with the exec directive. # During a run, add_tasks will add a new to the workflow. The -# last task will wait for the new task to complete via the +# last task will wait for the new task to complete via the # after-re directive. # Expected stdout: "Hello, world! All done!" - name: start - cmd: printf 'Hello, ' + cmd: printf 'Hello, ' - name: add_tasks after: start exec: | - runner.workflow.new_task( + self.workflow.new_task( name='dynamic_task', cmd="printf 'world! '" )