diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index f22f642..18138a5 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -1,7 +1,13 @@
name: flake8 Lint
-on: [push, pull_request]
-
+on:
+ push:
+ paths:
+ - "cyto_ml"
+ pull_request:
+ paths:
+ - "cyto_ml"
+
jobs:
flake8-lint:
runs-on: ubuntu-latest
diff --git a/.github/workflows/pages_graphs.yml b/.github/workflows/pages_graphs.yml
new file mode 100644
index 0000000..fc087ff
--- /dev/null
+++ b/.github/workflows/pages_graphs.yml
@@ -0,0 +1,59 @@
+name: Pages and Graphviz re-render
+on:
+ push:
+ paths: 'docs/**/*'
+
+ # Allows you to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+jobs:
+ build:
+ name: Rebuild graphs and pages
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: docs
+ steps:
+ - uses: actions/checkout@v4
+ - name: Setup Ruby
+ uses: ruby/setup-ruby@v1
+ with:
+ ruby-version: '3.3' # Not needed with a .ruby-version file
+ bundler-cache: true # runs 'bundle install' and caches installed gems automatically
+ cache-version: 0 # Increment this number if you need to re-download cached gems
+ working-directory: '${{ github.workspace }}/docs'
+ - name: Setup Pages
+ id: pages
+ uses: actions/configure-pages@v3
+ - name: Build with Jekyll
+ # Outputs to the './_site' directory by default
+ # Will this copy the diagrams tho
+ run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
+ env:
+ JEKYLL_ENV: production
+ - uses: ts-graphviz/setup-graphviz@v2
+ - name: Diagrams
+ run: chmod +x ../scripts/render_diagrams.sh; bash ../scripts/render_diagrams.sh
+ - name: Upload artifact
+ # Automatically uploads an artifact from the './_site' directory by default
+ uses: actions/upload-pages-artifact@v1
+ with:
+ path: "docs/_site"
+
+ # Deployment job
+ deploy:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ needs: build
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v2
\ No newline at end of file
diff --git a/.github/workflows/pytest_coverage.yml b/.github/workflows/pytest_coverage.yml
index 559f1b7..37418c8 100644
--- a/.github/workflows/pytest_coverage.yml
+++ b/.github/workflows/pytest_coverage.yml
@@ -2,6 +2,7 @@
on:
pull_request:
branches: [ "main" ]
+ paths: "cyto_ml"
jobs:
tests:
diff --git a/docs/Gemfile b/docs/Gemfile
new file mode 100644
index 0000000..7139f47
--- /dev/null
+++ b/docs/Gemfile
@@ -0,0 +1,35 @@
+source "https://rubygems.org"
+# Hello! This is where you manage which Jekyll version is used to run.
+# When you want to use a different version, change it below, save the
+# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
+#
+# bundle exec jekyll serve
+#
+# This will help ensure the proper Jekyll version is running.
+# Happy Jekylling!
+#gem "jekyll", "~> 4.3.3"
+# This is the default theme for new Jekyll sites. You may change this to anything you like.
+gem "minima", "~> 2.5"
+# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
+# uncomment the line below. To upgrade, run `bundle update github-pages`.
+gem "github-pages", "~> 231", group: :jekyll_plugins
+gem "webrick"
+gem "just-the-docs"
+# If you have any plugins, put them here!
+group :jekyll_plugins do
+ gem "jekyll-feed", "~> 0.12"
+end
+
+# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
+# and associated library.
+platforms :mingw, :x64_mingw, :mswin, :jruby do
+ gem "tzinfo", ">= 1", "< 3"
+ gem "tzinfo-data"
+end
+
+# Performance-booster for watching directories on Windows
+gem "wdm", "~> 0.1.1", :platforms => [:mingw, :x64_mingw, :mswin]
+
+# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
+# do not have a Java counterpart.
+gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]
\ No newline at end of file
diff --git a/docs/_config.yml b/docs/_config.yml
new file mode 100644
index 0000000..abaea44
--- /dev/null
+++ b/docs/_config.yml
@@ -0,0 +1,12 @@
+title: Plankton ML / pipelines
+email: jowals@ceh.ac.uk
+description: >- # this means to ignore newlines until "baseurl:"
+ This repository contains code, proof of concepts, test cases and workflows for low-investment methods to apply image machine learning to plankton characterisation.
+baseurl: "" # the subpath of your site, e.g. /blog
+url: "" # the base hostname & protocol for your site, e.g. http://example.com
+github_username: metazool
+
+# Build settings
+theme: just-the-docs
+plugins:
+ - jekyll-feed
diff --git a/docs/diagrams/as_is/instrument_to_store.dot b/docs/diagrams/as_is/instrument_to_store.dot
new file mode 100644
index 0000000..964949c
--- /dev/null
+++ b/docs/diagrams/as_is/instrument_to_store.dot
@@ -0,0 +1,34 @@
+# http://www.graphviz.org/content/cluster
+
+digraph G {
+ rankdir=LR;
+ graph [fontname = "Handlee"];
+ node [fontname = "Handlee"];
+ edge [fontname = "Handlee"];
+
+ bgcolor=transparent;
+
+ scope [shape=rect label="Microscope \n(FlowCam)"];
+ pc [shape=rect label="Local PC"]
+
+ scope2 [shape=rect label="Laser Imaging \n(Flow Cytometer)"];
+ pc2 [shape=rect label="Local PC"]
+
+ san [shape=cylinder label="SAN \nprivate cloud"]
+ vm [shape=rect label="VM \nprivate cloud"]
+ store [shape=cylinder label="S3 \nobject store"]
+
+ vm->store [label="triggered by app?" fontsize=10];
+ scope->pc
+ scope2->pc2
+
+ pc2->san [label="physically, via USB stick", fontsize=10];
+ pc->san [label="physically, via USB stick", fontsize=10];
+
+
+ san->vm [dir=back] [label="manually run script" fontsize=10];
+
+}
+
+
+
diff --git a/docs/diagrams/could_be/instrument_to_store.dot b/docs/diagrams/could_be/instrument_to_store.dot
new file mode 100644
index 0000000..7ae69ee
--- /dev/null
+++ b/docs/diagrams/could_be/instrument_to_store.dot
@@ -0,0 +1,33 @@
+# http://www.graphviz.org/content/cluster
+
+digraph G {
+ rankdir=LR;
+ graph [fontname = "Handlee"];
+ node [fontname = "Handlee"];
+ edge [fontname = "Handlee"];
+
+ bgcolor=transparent;
+
+ scope [shape=rect label="Microscope \n(FlowCam)"];
+ pc [shape=rect label="Local PC"]
+
+ scope2 [shape=rect label="Laser imaging \n(Flow Cytometer)"];
+ pc2 [shape=rect label="Local PC"]
+
+ san [shape=cylinder label="SAN \nprivate cloud"]
+ engine [shape=rect label="Workflow engine"]
+ tasks [label="Task graph"]
+ store [shape=cylinder label="S3 \nobject store"]
+
+ engine->tasks
+ tasks->san;
+ tasks->store [];
+ scope->pc
+ scope2->pc2
+
+ pc2->san [label="pull on a schedule?", dir=back,fontsize=10];
+
+ pc->san [label="push on a schedule?", fontsize=10];
+
+}
+
diff --git a/docs/diagrams/could_be/task_graph.dot b/docs/diagrams/could_be/task_graph.dot
new file mode 100644
index 0000000..216984d
--- /dev/null
+++ b/docs/diagrams/could_be/task_graph.dot
@@ -0,0 +1,22 @@
+# http://www.graphviz.org/content/cluster
+
+digraph G {
+ rankdir=LR;
+
+ edge [fontname = "Handlee"];
+
+ graph [fontsize=10 fontname="Handlee"];
+ node [shape=record fontsize=10 fontname="Handlee"];
+
+ bgcolor=transparent;
+
+ subgraph cluster_0 {
+ style=filled;
+ color=lightgrey;
+ node [color=white,style=filled];
+ store -> chunk -> sift -> profile -> upload;
+ label = "Task flow";
+ fontsize = 20;
+ }
+}
+
diff --git a/docs/diagrams/index.md b/docs/diagrams/index.md
new file mode 100644
index 0000000..6d7c8fb
--- /dev/null
+++ b/docs/diagrams/index.md
@@ -0,0 +1,33 @@
+---
+# Feel free to add content and custom Front Matter to this file.
+# To modify the layout, see https://jekyllrb.com/docs/themes/#overriding-theme-defaults
+
+layout: home
+title: Plankton ML - workflow diagrams
+---
+
+# Workflow Diagrams
+
+Views of the flow of data from the imaging instrument to cloud-accessible storage
+
+### As is
+
+Data saved during a session with the microscope is downloaded onto a USB key, then uploaded from a researcher's laptop into a shared storage area on a site-specific SAN.
+
+Later, a data scientist logs into a virtual machine in the on-premise "private cloud" and runs more than one script to read the data, process it for analysis, and then upload to s3 storage hosted at JASMIN. Authorisation in this chain requires personal credentials.
+
+
+
+There are file naming conventions including metadata which doesn't follow the same path as the data, and there are spatio-temporal properties of the samples which could be recorded.
+
+### Could be
+
+PC that drives the instrument is connected to the storage network, but not the internet (for security standards compliance reasons). What are the current precedents for either directly saving output to shared storage, or a watcher process that either pulls or pushes data from a lab PC to networked storage?
+
+Automated workflow (could be Apache Airflow or Beam based - FDRI project is trialling components) which watches for new source data, distributes the preprocessing with Dask or Spark if necessary, and publishes analysis-ready data _and metadata_ to cloud storage, continuously.
+
+
+
+
diff --git a/docs/flowcam/images/20240725_154135.jpg b/docs/flowcam/images/20240725_154135.jpg
new file mode 100644
index 0000000..1e5b396
Binary files /dev/null and b/docs/flowcam/images/20240725_154135.jpg differ
diff --git a/docs/flowcam/images/20240725_154320.jpg b/docs/flowcam/images/20240725_154320.jpg
new file mode 100644
index 0000000..87d07d8
Binary files /dev/null and b/docs/flowcam/images/20240725_154320.jpg differ
diff --git a/docs/flowcam/images/20240725_154511.jpg b/docs/flowcam/images/20240725_154511.jpg
new file mode 100644
index 0000000..1ce0d81
Binary files /dev/null and b/docs/flowcam/images/20240725_154511.jpg differ
diff --git a/docs/flowcam/images/20240725_154600.jpg b/docs/flowcam/images/20240725_154600.jpg
new file mode 100644
index 0000000..85b0c5a
Binary files /dev/null and b/docs/flowcam/images/20240725_154600.jpg differ
diff --git a/docs/flowcam/images/20240725_154812.jpg b/docs/flowcam/images/20240725_154812.jpg
new file mode 100644
index 0000000..5dce502
Binary files /dev/null and b/docs/flowcam/images/20240725_154812.jpg differ
diff --git a/docs/flowcam/images/20240725_155207.jpg b/docs/flowcam/images/20240725_155207.jpg
new file mode 100644
index 0000000..7f11743
Binary files /dev/null and b/docs/flowcam/images/20240725_155207.jpg differ
diff --git a/docs/flowcam/images/20240725_155433.jpg b/docs/flowcam/images/20240725_155433.jpg
new file mode 100644
index 0000000..e480e92
Binary files /dev/null and b/docs/flowcam/images/20240725_155433.jpg differ
diff --git a/docs/flowcam/images/20240725_161537.jpg b/docs/flowcam/images/20240725_161537.jpg
new file mode 100644
index 0000000..093724d
Binary files /dev/null and b/docs/flowcam/images/20240725_161537.jpg differ
diff --git a/docs/flowcam/images/20240725_161806.jpg b/docs/flowcam/images/20240725_161806.jpg
new file mode 100644
index 0000000..e5ba236
Binary files /dev/null and b/docs/flowcam/images/20240725_161806.jpg differ
diff --git a/docs/flowcam/images/20240725_162442.jpg b/docs/flowcam/images/20240725_162442.jpg
new file mode 100644
index 0000000..c0de46d
Binary files /dev/null and b/docs/flowcam/images/20240725_162442.jpg differ
diff --git a/docs/flowcam/images/20240725_163521.jpg b/docs/flowcam/images/20240725_163521.jpg
new file mode 100644
index 0000000..ef52e78
Binary files /dev/null and b/docs/flowcam/images/20240725_163521.jpg differ
diff --git a/docs/flowcam/images/YouCut_20240729_123745260.mp4 b/docs/flowcam/images/YouCut_20240729_123745260.mp4
new file mode 100644
index 0000000..216f7db
Binary files /dev/null and b/docs/flowcam/images/YouCut_20240729_123745260.mp4 differ
diff --git a/docs/flowcam/images/YouCut_20240729_124027460.mp4 b/docs/flowcam/images/YouCut_20240729_124027460.mp4
new file mode 100644
index 0000000..f1fb11a
Binary files /dev/null and b/docs/flowcam/images/YouCut_20240729_124027460.mp4 differ
diff --git a/docs/flowcam/index.md b/docs/flowcam/index.md
new file mode 100644
index 0000000..3fb20a2
--- /dev/null
+++ b/docs/flowcam/index.md
@@ -0,0 +1,79 @@
+---
+# Feel free to add content and custom Front Matter to this file.
+# To modify the layout, see https://jekyllrb.com/docs/themes/#overriding-theme-defaults
+
+layout: home
+title: Plankton ML - FlowCam walkthrough
+---
+
+# FlowCam walkthrough
+
+Report of a visit to the FlowCam instrument in Lancaster on 25/07/2024 to see the flow from specimen to analysis data, see the interpretation through researcher's eyes, understand the problem better.
+
+## Sampling
+
+The samples are collected once every two weeks. At this stage the plankton are suspended in ether. The ones we analysed had been collected a day or two earlier.
+
+
+They're washed through a very fine sieve and then diluted back into a beaker of water.
+
+
+
+The diluted sample runs through the hose and between this flat section with two glass plates, which is where the camera points.
+
+
+Sampling through analysis involves swirling a hose through the water while it pumps through the flat section at an adjustable pressure.
+
+
+
+
+At this stage the researchers are looking for relative volumes of half a dozen plankton genus, with an index to their types and typical features helpfully printed out and stuck to the window.
+
+
+
+An onboard model does basic object detection of specific plankton as they pass through the flat section. You can see that it's picking up _a lot_ of empty space - either slightly out of sync with the flow through the device, or with a very low threshold of acceptance for what it perceives as possibly plankton, or both.
+
+
+
+
+This results in a set of "collage" images which are what we see _at the start of_ the pipeline in this project. In the sample we recorded, there were 350 pages of these and over 250 of them were purely blank images.
+
+
+
+There's quite a lot of computer vision happening onboard the FlowCam. Here it's doing edge detection and deriving a set of metrics for shapes (area, circularity, complexity) which could be used for shallow ML approaches.
+
+
+
+We don't see a data dump of these in a way that can be recoupled with the exported collage images - I'm told that involves a licence that we don't have resource for - or any sign of a programmatic interface for development on the FlowCam.
+
+
+
+There's an amount of potentially interesting intermediate image data left behind on the device - including snapshots of the raw flow through the camera, and all the binary masks of the collages from which the shape analysis is done.
+
+
+
+The exported collage images are managed using a file naming convention which includes geographic location (WGS84 lon/lat) and sample depth as well as date. This is getting detached from the single-plankton images in the current workflow, and we very much need to preserve it. I'd wondered if depending so heavily on file naming conventions was an overhead, but it looks like a good affordance for the researcher's workflow in the FlowCam application; they take the previous session and tweak a small part of the filename.
+
+
+
+The FlowCam unit has a built in PC running Windows, rather than attaching to an external one. I didn't ask what version, whether there's any lifecycle for it receiving updates, or whether support through updates has an extra manufacturer cost.
+
+
+
+It's got ethernet available, but isn't connected to the network, the reason cited being the default implementation of Cyber Essentials Plus for risk management - transfer of data is done via USB stick, and when the disk fills with intermediate images they're just deleted.
+
+
+
+## Next steps
+
+* [Diagrams](../diagrams/) show the as-is and could-be versions of a data pipeline which takes the exported FlowCam images, breaks them back down into single plankton samples and publishes them to an object store for use with model building
+* The workflow loses metadata at too many points, though a lot of it's knitted up by the file naming convention
+* The arduous part from the researcher POV is paging through images classifying them by hand and eye. A model interpretation of them retrospectively, done in the cloud, isn't going to reduce the immediate work in the lab; at worst it risks casualising it, by reducing the _apparent_ need for expertise in interpretation at the point of sampling
+* Recommendation is to take small steps (a "hello world" python application, then `scikit-image`, then a pytorch based classifier) to run models on the instrument itself as a direct assistance to the researcher. For a single-purpose application that's hard to justify, but as a feasibility test of general approach influencing future design (there's already precedent for [running .net applications directly on a flow cytometer](https://github.com/OBAMANEXT/cyz2json)) it's quite interesting
+
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..92bf977
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,20 @@
+---
+# Feel free to add content and custom Front Matter to this file.
+# To modify the layout, see https://jekyllrb.com/docs/themes/#overriding-theme-defaults
+
+layout: home
+title: Plankton ML
+---
+
+# Plankton ML
+
+This is a small experimental project on automating the analysis of plankton images
+
+* Inform related work on reproducible analytical pipelines for bioimage machine learning by grounding them in a concrete use case
+* Evaluate reusable components (e.g. the Cefas plankton model from scivision) and associated trade-offs
+* Evolve a shared template for similar smaller projects undertaken by members of the RSE group in the Environmental Data Service, UK Centre for Ecology and Hydrology
+
+Please see the associated Github repository which has [outline tasks in Issues](https://github.com/NERC-CEH/plankton_ml/issues) and [prototype work in pull requests](https://github.com/NERC-CEH/plankton_ml/pulls)
+
+
+
diff --git a/scripts/render_diagrams.sh b/scripts/render_diagrams.sh
new file mode 100644
index 0000000..dadb863
--- /dev/null
+++ b/scripts/render_diagrams.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copilot generated script to render diagrams as SVG
+
+# Set the directory path
+DIR="./diagrams/"
+SITE="_site/"
+
+# Loop through each subdirectory
+for sub_dir in "$DIR"*/; do
+ # Loop through each dot file in the subdirectory
+ for dotfile in "$sub_dir"*.dot; do
+ # Get the base name without extension
+ base_name=$(basename "$dotfile" .dot)
+ dir_path=${sub_dir//diagrams/_site\/diagrams}
+ mkdir -p $dir_path
+ output="$dir_path$base_name.svg"
+
+ # Render the dot file to SVG
+ dot -Tsvg "$dotfile" -o $output
+
+ # Print a success message
+ echo "Rendered $dotfile to $output"
+ done
+done
+
+