From 8749b5f560d855ba6ee9a7157ec1492d7d6f0089 Mon Sep 17 00:00:00 2001 From: Thomas Weber Date: Wed, 10 Jan 2024 15:57:00 +0000 Subject: [PATCH] 2.2.5 with Dockerfile --- docs/workshop.md | 217 +++++++------ .../Dockerfile-2.2.5.dockerfile | 301 ++++++++++++++++++ workflow/envs/scNOVA/scNOVA_DL.yaml | 2 + 3 files changed, 419 insertions(+), 101 deletions(-) create mode 100644 github-actions-runner/Dockerfile-2.2.5.dockerfile diff --git a/docs/workshop.md b/docs/workshop.md index d9938ee5..618f2caf 100644 --- a/docs/workshop.md +++ b/docs/workshop.md @@ -1,5 +1,6 @@ # Mosaicatcher workshop +## AFAC Context: You are working on a fancy project and Jan is suggesting at some point to generate some Strand-seq data @@ -7,8 +8,6 @@ That's the first time you are working with Strand-seq and you are starting to pa But You remember that you heard that a complex tool was developed in the lab in order to process in a systematic way Strand-seq data: tadam MOSAICATCHER - - Prerequisite: I asked you to select a sample to process during today's workshop TD: feedback on how they traced back the name of the sample, the associated run/flowcell, the date when it was sequenced ... @@ -17,9 +16,7 @@ So here's the plan for today: Small intro (~20/30 min) about Mosaicatcher, the different steps, options, branches, possibilities Outputs examples - SV trustfullness - - +SV trustfullness Web report analysis of RPE-MIXTURE @@ -43,28 +40,39 @@ Hands on: pipeline install, module load, test data execution vim scNOVA_input_user/input_subclonality.txt - - - - - Then trigger the pipeline on YOUR data Once this is running, web report analysis together with questions Then, Strand-scape - Still in beta, some microservices instable, main application for QC and web report consultation - Remove MC trigger, too complex in the backend - Cell selection with username +Still in beta, some microservices instable, main application for QC and web report consultation +Remove MC trigger, too complex in the backend +Cell selection with username cp --preserve=timestamps FROM_ TO_ snakemake ... +## Technical prerequisites +- SSHFS/SFTP connection to visualise/download/access files created (WinSCP/FileZilla/Cyberduck) +- Functional terminal connected to the EMBL cluster (if not follow SSH key configuration here: https://www.embl.org/internal-information/it-services/hpc-resources-heidelberg/) +- Have a workspace on /g/korbel +## Workshop prerequisites ---- +- Pick a sample name to be processed +- Download this MosaiCatcher report: https://oc.embl.de/index.php/s/WBgrzBjyzdYdVJA/download + +## EMBL cheatsheet + +### connect to seneca + +ssh USERNAME@seneca.embl.de + +### connect to login nodes + +ssh USERNAME@login0[1,2,3,4].embl.de (login01 to login04) **ℹ️ Important Note** @@ -86,6 +94,19 @@ Snakemake important arguments/options --rerun-triggers --touch +## MosaiCatcher important files + +- Counts: PARENT_FOLDER/SAMPLE_NAME/counts/SAMPLE_NAME.txt.raw.gz +- Counts statistics: PARENT_FOLDER/SAMPLE_NAME/counts/SAMPLE_NAME.info_raw +- Ashleys predictions: PARENT_FOLDER/SAMPLE_NAME/cell_selection/labels.tsv +- Counts plot: PARENT_FOLDER/SAMPLE_NAME/plots/CountComplete.raw.pdf +- Count normalied plot: PARENT_FOLDER/SAMPLE_NAME/plots/CountComplete.normalised.pdf +- Phased W/C regions: PARENT_FOLDER/SAMPLE_NAME/strandphaser/strandphaser_phased_haps_merged.txt +- SV calls (stringent): PARENT_FOLDER/SAMPLE_NAME/mosaiclassifier/sv_calls/stringent_filterTRUE.tsv +- SV calls (lenient): PARENT_FOLDER/SAMPLE_NAME/mosaiclassifier/sv_calls/lenient_filterFALSE.tsv +- Plots folder: PARENT_FOLDER/SAMPLE_NAME/plots/ +- scNOVA outputs: + ## CLI usage of the pipeline ### Quick Start @@ -102,147 +123,141 @@ Notes - Config definition is crucial / via command line or via YAML file, will define where to stop, which mode, which branch, which options to be used - Profile -2. +2. Load snakemake A. Use module load OR create a dedicated conda environment ```bash -module load snakemake ... +module load snakemake/7.32.4-foss-2022b ``` -## +```` -**Reminder:** You will need to verify that this conda environment is activated and provide the right snakemake before each execution (`which snakemake` command should output like \/\/[ana|mini]conda3/envs/snakemake/bin/snakemake) -3. Run on example data on only one small chromosome (`` must be replaced by your disk letter/name) +1. Run on example data on only one small chromosome (`` must be replaced by your disk letter/name) First using the `--dry-run` option of snakemake to make sure the Graph of Execution is properly connected. (In combination with `--dry-run`, we use the `local/conda` profile as snakemake still present a bug by looking for the singularity container). + + ```bash snakemake \ --cores 6 \ --configfile .tests/config/simple_config.yaml \ - --profile workflow/snakemake_profiles/local/conda/ \ - --dry-run + --config \ + data_location=.tests/data_CHR17 \ # DATA LOCATION + ashleys_pipeline=True \ # DOWNLOAD & TRIGGER ASHLEYS QC UPSTREAM MODULE + ashleys_pipeline_only=True \ # STOP AFTER ASHLEYS QC - VALIDATION PURPOSE + multistep_normalisation=True \ # TRIGGER MARCO'S MULTISTEP NORMALISATION + MultiQC=True \ # TRIGGER samtools stats, FastQC & MultiQC reporting + --profile workflow/snakemake_profiles/local/conda/ \ # EXECUTION PROFILE TO BE USED + --dry-run # ONLY CHECK IF EVERYTHING CONNECTS WELL AND READY FOR COMPUTING ```` If no error message, you are good to go! ```bash -# Snakemake Profile: if singularity installed: workflow/snakemake_profiles/local/conda_singularity/ -# Snakemake Profile: if singularity NOT installed: workflow/snakemake_profiles/local/conda/ snakemake \ --cores 6 \ --configfile .tests/config/simple_config.yaml \ - --profile workflow/snakemake_profiles/local/conda_singularity/ \ - --singularity-args "-B /disk:/disk" + --config \ + data_location=.tests/data_CHR17 \ # DATA LOCATION + ashleys_pipeline=True \ # DOWNLOAD & TRIGGER ASHLEYS QC UPSTREAM MODULE + ashleys_pipeline_only=True \ # STOP AFTER ASHLEYS QC - VALIDATION PURPOSE + multistep_normalisation=True \ # TRIGGER MARCO'S MULTISTEP NORMALISATION + MultiQC=True \ # TRIGGER samtools stats, FastQC & MultiQC reporting + --profile workflow/snakemake_profiles/HPC/slurm_EMBL/ \ + --cores 24 ``` -4. Generate report on example data - ```bash -snakemake \ - --cores 6 \ - --configfile .tests/config/simple_config.yaml \ - --profile workflow/snakemake_profiles/local/conda_singularity/ \ - --singularity-args "-B /disk:/disk" \ - --report report.zip \ - --report-stylesheet workflow/report/custom-stylesheet.css +cat .tests/data_CHR17/RPE-BM510/counts/RPE-BM510.info_raw +zcat .tests/data_CHR17/RPE-BM510/counts/RPE-BM510.txt.raw.gz | less +cat .tests/data_CHR17/RPE-BM510/cell_selection/labels.tsv ``` ---- +Look at the plots -**ℹ️ Note** - -- Steps 0 - 2 are required only during first execution -- After the first execution, do not forget to go in the git repository and to activate the snakemake environment +.tests/data_CHR17/RPE-BM510/plots ---- - ---- - -**ℹ️ Note for 🇪🇺 EMBL users** - -- Use the following profile to run on EMBL cluster: `--profile workflow/snakemake_profiles/HPC/slurm_EMBL` - ---- - -## 🔬​ Start running your own analysis - -Following commands show you an example using local execution (not HPC or cloud) - -1. Start running your own Strand-Seq analysis +REPORT ```bash snakemake \ - --cores \ - --config \ - data_location= \ - --profile workflow/snakemake_profiles/local/conda_singularity/ - -``` - -2. Generate report - -```bash -snakemake \ - --cores \ + --cores 6 \ + --configfile .tests/config/simple_config.yaml \ --config \ - data_location= \ - --profile workflow/snakemake_profiles/local/conda_singularity/ \ - --report / \ + data_location=.tests/data_CHR17 \ # DATA LOCATION + ashleys_pipeline=True \ # DOWNLOAD & TRIGGER ASHLEYS QC UPSTREAM MODULE + ashleys_pipeline_only=False \ # STOP AFTER ASHLEYS QC - VALIDATION PURPOSE + multistep_normalisation=True \ # TRIGGER MARCO'S MULTISTEP NORMALISATION + MultiQC=True \ # TRIGGER samtools stats, FastQC & MultiQC reporting + --profile workflow/snakemake_profiles/HPC/slurm_EMBL/ \ + --cores 24 \ + --report TEST_DATA_REPORT.zip \ --report-stylesheet workflow/report/custom-stylesheet.css ``` -## System requirements - -This workflow is meant to be run in a Unix-based operating system (tested on Ubuntu 18.04 & CentOS 7). - -Minimum system requirements vary based on the use case. We highly recommend running it in a server environment with 32+GB RAM and 12+ cores. +Questions??? -- [Conda install instructions](https://conda.io/miniconda.html) -- [Singularity install instructions](https://sylabs.io/guides/3.0/user-guide/quick_start.html#quick-installation-steps) -## Detailed usage -### 🐍 1. Mosaicatcher basic conda environment install -MosaiCatcher leverages snakemake built-in features such as execution within container and conda predefined modular environments. That's why it is only necessary to create an environment that relies on [snakemake](https://github.com/snakemake/snakemake) (to execute the pipeline) and [pandas](https://github.com/pandas-dev/pandas) (to handle basic configuration). If you plan to generate HTML Web report including plots, it is also necessary to install [imagemagick](https://github.com/ImageMagick/ImageMagick). +SCNOVA -If possible, it is also highly recommended to install and use `mamba` package manager instead of `conda`, which is much more efficient. +mkdir -p .tests/data_CHR17/RPE-BM510/scNOVA_input_user +awk 'BEGIN {FS=OFS="\t"} NR==1 {print "Filename", "Subclonality"} NR>1 && $2==1 {sub(/\.sort\.mdup\.bam/, "", $1); print $1, "clone"}' .tests/data_CHR17/RPE-BM510/cell_selection/labels.tsv > .tests/data_CHR17/RPE-BM510/scNOVA_input_user/input_subclonality.txt ```bash -conda install -c conda-forge mamba -mamba create -n snakemake -c bioconda -c conda-forge -c defaults -c anaconda snakemake -conda activate mosaicatcher_env +snakemake \ + --cores 6 \ + --configfile .tests/config/simple_config.yaml \ + --config \ + data_location=.tests/data_CHR17 \ # DATA LOCATION + ashleys_pipeline=True \ # DOWNLOAD & TRIGGER ASHLEYS QC UPSTREAM MODULE + ashleys_pipeline_only=False \ # CONTINUES AFTER ASHLEYS QC - VALIDATION PURPOSE + multistep_normalisation=True \ # TRIGGER MARCO'S MULTISTEP NORMALISATION + MultiQC=True \ # TRIGGER samtools stats, FastQC & MultiQC reporting + scNOVA=True \ + --profile workflow/snakemake_profiles/HPC/slurm_EMBL/ \ + --cores 24 ``` -### ⤵️ 2. Clone repository & go into workflow directory -After cloning the repo, go into the `workflow` directory which correspond to the pipeline entry point. +######################################################################## -```bash -git clone --recurse-submodules https://github.com/friendsofstrandseq/mosaicatcher-pipeline.git -cd mosaicatcher-pipeline -``` ### ⚙️ 3. MosaiCatcher execution (without preprocessing) diff --git a/github-actions-runner/Dockerfile-2.2.5.dockerfile b/github-actions-runner/Dockerfile-2.2.5.dockerfile new file mode 100644 index 00000000..f3f52ebc --- /dev/null +++ b/github-actions-runner/Dockerfile-2.2.5.dockerfile @@ -0,0 +1,301 @@ +FROM condaforge/mambaforge:latest +LABEL io.github.snakemake.containerized="true" +LABEL io.github.snakemake.conda_env_hash="d4782fccfb377c4218ce7ef33158c7aa5e1737dea84ae3d06d9b1ea685e74870" + +# Step 1: Retrieve conda environments + +# Conda environment: +# source: https://github.com/snakemake/snakemake-wrappers/raw/v1.7.0/bio/bwa/index/environment.yaml +# prefix: /conda-envs/5681728a49bd83ceed09ba194330c858 +# channels: +# - bioconda +# - conda-forge +# - defaults +# dependencies: +# - bwa ==0.7.17 +RUN mkdir -p /conda-envs/5681728a49bd83ceed09ba194330c858 +ADD https://github.com/snakemake/snakemake-wrappers/raw/v1.7.0/bio/bwa/index/environment.yaml /conda-envs/5681728a49bd83ceed09ba194330c858/environment.yaml + +# Conda environment: +# source: https://github.com/snakemake/snakemake-wrappers/raw/v1.7.0/bio/fastqc/environment.yaml +# prefix: /conda-envs/08d4368302a4bdf7eda6b536495efe7d +# channels: +# - bioconda +# - conda-forge +# - defaults +# dependencies: +# - fastqc ==0.11.9 +RUN mkdir -p /conda-envs/08d4368302a4bdf7eda6b536495efe7d +ADD https://github.com/snakemake/snakemake-wrappers/raw/v1.7.0/bio/fastqc/environment.yaml /conda-envs/08d4368302a4bdf7eda6b536495efe7d/environment.yaml + +# Conda environment: +# source: https://raw.githubusercontent.com/friendsofstrandseq/ashleys-qc-pipeline/2.2.5/workflow/envs/ashleys_base.yaml +# prefix: /conda-envs/87c04f5d115eff742eca84455513deba +# name: ashleys_base +# channels: +# - conda-forge +# - bioconda +# dependencies: +# - samtools +# - tabix +# - bwa +# - sambamba +# - mosaicatcher +# # - alfred +# - ashleys-qc +# - pandas +# # PUBLISHDIR +# - rsync +# # MULTIQC +# - multiqc +# # Fix sklearn update +# - scikit-learn=1.2.2 +RUN mkdir -p /conda-envs/87c04f5d115eff742eca84455513deba +ADD https://raw.githubusercontent.com/friendsofstrandseq/ashleys-qc-pipeline/2.2.5/workflow/envs/ashleys_base.yaml /conda-envs/87c04f5d115eff742eca84455513deba/environment.yaml + +# Conda environment: +# source: https://raw.githubusercontent.com/friendsofstrandseq/ashleys-qc-pipeline/2.2.5/workflow/envs/ashleys_rtools.yaml +# prefix: /conda-envs/9b847fc31baae8e01dfb7ce438a56b71 +# name: rtools +# channels: +# - conda-forge +# - bioconda +# - r +# - anaconda +# dependencies: +# # - bioconductor-biocparallel +# # - bioconductor-bsgenome +# # - bioconductor-bsgenome.hsapiens.ucsc.hg19 +# # - bioconductor-bsgenome.hsapiens.ucsc.hg38 +# # - bioconductor-fastseg +# # - bioconductor-genomicalignments +# - bioconductor-genomicranges +# # - bioconductor-rsamtools +# # - bioconductor-s4vectors +# - r-assertthat +# - r-base +# # - r-biocmanager +# - r-cowplot +# - r-data.table +# # - r-devtools +# # - r-doparallel +# # - r-foreach +# - r-ggplot2 +# # - r-gtools +# - r-reshape2 +# # - r-zoo +# # - r-dplyr +# # - r-mc2d +# # - r-pheatmap +# # - bioconductor-complexheatmap +# # - r-gplots +# - r-scales +# - r-rcolorbrewer +# # - r-stringr +# - r-cairo +# - fonts-anaconda +# # NEW +# - bioconductor-edger +# - r-r.utils +# # PLATE PLOT +# - r-dplyr +# - r-platetools +# - r-viridis +# # GC_correction +# - r-tidyr +# - r-ggpubr +# # SOLVE R lib issue +# - r-stringi=1.7.12 +RUN mkdir -p /conda-envs/9b847fc31baae8e01dfb7ce438a56b71 +ADD https://raw.githubusercontent.com/friendsofstrandseq/ashleys-qc-pipeline/2.2.5/workflow/envs/ashleys_rtools.yaml /conda-envs/9b847fc31baae8e01dfb7ce438a56b71/environment.yaml + +# Conda environment: +# source: workflow/envs/mc_base.yaml +# prefix: /conda-envs/c80307395eddf442c2fb6870f40d822b +# name: mc-base +# channels: +# - conda-forge +# - bioconda +# dependencies: +# - pandas +# - intervaltree +# - scipy +# - pysam +# - tqdm +# - perl +# - pypdf2 +# - parmap +# # NEW +# - pyyaml +# - seaborn +# - matplotlib +# # SOLVE se-pe detection +# - samtools +# # ArbiGent Hufsah deps +# - pytables +# - xopen +RUN mkdir -p /conda-envs/c80307395eddf442c2fb6870f40d822b +COPY workflow/envs/mc_base.yaml /conda-envs/c80307395eddf442c2fb6870f40d822b/environment.yaml + +# Conda environment: +# source: workflow/envs/mc_bioinfo_tools.yaml +# prefix: /conda-envs/f251d84cdc9f25d0e14b48e780261d66 +# name: mc-bioinfo-tools +# channels: +# - conda-forge +# - bioconda +# dependencies: +# - bcftools +# - freebayes +# - mosaicatcher +# - samtools +# - tabix +# - whatshap +RUN mkdir -p /conda-envs/f251d84cdc9f25d0e14b48e780261d66 +COPY workflow/envs/mc_bioinfo_tools.yaml /conda-envs/f251d84cdc9f25d0e14b48e780261d66/environment.yaml + +# Conda environment: +# source: workflow/envs/rtools.yaml +# prefix: /conda-envs/598c87b6c764d05e0c66953cc67f2931 +# name: rtools +# channels: +# - bioconda +# - conda-forge +# - r +# - anaconda +# dependencies: +# # # NEW +# - strandphaser +# # ############### +# - bioconductor-biocparallel +# - bioconductor-bsgenome +# - bioconductor-bsgenome.hsapiens.ucsc.hg38 +# - bioconductor-complexheatmap +# # - bioconductor-fastseg +# - bioconductor-genomicalignments +# - bioconductor-genomicranges +# - bioconductor-rsamtools +# # - bioconductor-s4vectors +# - fonts-anaconda +# - r-assertthat +# - r-base +# - r-biocmanager +# - r-cairo +# - r-cowplot +# - r-data.table +# - r-devtools +# - r-doparallel +# - r-dplyr +# - r-foreach +# - r-ggplot2 +# - r-gplots +# - r-gtools +# - r-mc2d +# - r-rcolorbrewer +# - r-reshape2 +# - r-scales +# - r-stringr +# # SV_CALLS_DEV +# # - r-zoo +# - r-r.utils +# - r-ggnewscale +# # HEATMAP +# - r-tidyr +# # ARBIGENT +# - r-reshape +# - r-optparse +# - r-tidyr +# - r-ggbeeswarm +# - r-pheatmap +# # GC_correction +# - r-ggpubr +# - bioconductor-edger +# # SOLVE R lib issue +# - r-stringi=1.7.12 +RUN mkdir -p /conda-envs/598c87b6c764d05e0c66953cc67f2931 +COPY workflow/envs/rtools.yaml /conda-envs/598c87b6c764d05e0c66953cc67f2931/environment.yaml + +# Conda environment: +# source: workflow/envs/scNOVA/scNOVA_DL.yaml +# prefix: /conda-envs/667f8c581ac0b8e1ef11912ad74bf264 +# name: scNOVA_DL +# channels: +# - conda-forge +# - anaconda +# dependencies: +# - tensorflow=1.15.0 +# - scikit-learn=0.21.3 +# - python=3.7.4 +# - matplotlib=3.1.1 +# - pandas=0.25.3 +# - h5py=2.10.0 +# - numpy +# # scNOVA archive +# - unzip +# # Fix +# - pip: +# - cffi +RUN mkdir -p /conda-envs/667f8c581ac0b8e1ef11912ad74bf264 +COPY workflow/envs/scNOVA/scNOVA_DL.yaml /conda-envs/667f8c581ac0b8e1ef11912ad74bf264/environment.yaml + +# Conda environment: +# source: workflow/envs/scNOVA/scNOVA_R.yaml +# prefix: /conda-envs/193f60d48796dd17eb847ea689b863a9 +# name: scNOVA +# channels: +# - bioconda +# - conda-forge +# - r +# dependencies: +# - bioconductor-deseq2=1.30.0 +# - r-matrixstats=0.58.0 +# - r-pheatmap=1.0.12 +# - r-gplots=3.1.1 +# - r-umap=0.2.7.0 +# - r-rtsne=0.15 +# - r-factoextra=1.0.7 +# - r-pracma=2.3.3 +# - bioconductor-chromvar=1.12.0 +# - r-nabor=0.5.0 +# - bioconductor-motifmatchr=1.12.0 +# - bioconductor-bsgenome.hsapiens.ucsc.hg38=1.4.3 +# - bioconductor-jaspar2016=1.18.0 +# - r-codetools=0.2_18 +# - r-fitdistrplus +# - r-doparallel +# - r-foreach +RUN mkdir -p /conda-envs/193f60d48796dd17eb847ea689b863a9 +COPY workflow/envs/scNOVA/scNOVA_R.yaml /conda-envs/193f60d48796dd17eb847ea689b863a9/environment.yaml + +# Conda environment: +# source: workflow/envs/scNOVA/scNOVA_bioinfo_tools.yaml +# prefix: /conda-envs/ca9641251a8cb0057003875ad776c49f +# name: scNOVA_bioinfo_tools +# channels: +# - conda-forge +# - bioconda +# - anaconda +# dependencies: +# - samtools +# - biobambam +# - bedtools +RUN mkdir -p /conda-envs/ca9641251a8cb0057003875ad776c49f +COPY workflow/envs/scNOVA/scNOVA_bioinfo_tools.yaml /conda-envs/ca9641251a8cb0057003875ad776c49f/environment.yaml + +# Step 2: Generate conda environments + +RUN mamba env create --prefix /conda-envs/5681728a49bd83ceed09ba194330c858 --file /conda-envs/5681728a49bd83ceed09ba194330c858/environment.yaml && \ + mamba env create --prefix /conda-envs/08d4368302a4bdf7eda6b536495efe7d --file /conda-envs/08d4368302a4bdf7eda6b536495efe7d/environment.yaml && \ + mamba env create --prefix /conda-envs/87c04f5d115eff742eca84455513deba --file /conda-envs/87c04f5d115eff742eca84455513deba/environment.yaml && \ + mamba env create --prefix /conda-envs/9b847fc31baae8e01dfb7ce438a56b71 --file /conda-envs/9b847fc31baae8e01dfb7ce438a56b71/environment.yaml && \ + mamba env create --prefix /conda-envs/c80307395eddf442c2fb6870f40d822b --file /conda-envs/c80307395eddf442c2fb6870f40d822b/environment.yaml && \ + mamba env create --prefix /conda-envs/f251d84cdc9f25d0e14b48e780261d66 --file /conda-envs/f251d84cdc9f25d0e14b48e780261d66/environment.yaml && \ + mamba env create --prefix /conda-envs/598c87b6c764d05e0c66953cc67f2931 --file /conda-envs/598c87b6c764d05e0c66953cc67f2931/environment.yaml && \ + mamba env create --prefix /conda-envs/667f8c581ac0b8e1ef11912ad74bf264 --file /conda-envs/667f8c581ac0b8e1ef11912ad74bf264/environment.yaml && \ + mamba env create --prefix /conda-envs/193f60d48796dd17eb847ea689b863a9 --file /conda-envs/193f60d48796dd17eb847ea689b863a9/environment.yaml && \ + mamba env create --prefix /conda-envs/ca9641251a8cb0057003875ad776c49f --file /conda-envs/ca9641251a8cb0057003875ad776c49f/environment.yaml && \ + mamba clean --all -y +# CUSTOM PART +RUN wget https://zenodo.org/record/7697400/files/BSgenome.T2T.CHM13.V2_1.0.0.tar.gz -P /workflow/data/ref_genomes/ +COPY /workflow/scripts/utils/install_R_package.R /conda-envs/ +RUN chmod -R 0777 /conda-envs/598c87b6c764d05e0c66953cc67f2931/lib/R/library && /conda-envs/598c87b6c764d05e0c66953cc67f2931/bin/Rscript /conda-envs/install_R_package.R /workflow/data/ref_genomes/BSgenome.T2T.CHM13.V2_1.0.0.tar.gz diff --git a/workflow/envs/scNOVA/scNOVA_DL.yaml b/workflow/envs/scNOVA/scNOVA_DL.yaml index 775c36d8..a9cef6ce 100644 --- a/workflow/envs/scNOVA/scNOVA_DL.yaml +++ b/workflow/envs/scNOVA/scNOVA_DL.yaml @@ -13,3 +13,5 @@ dependencies: # scNOVA archive - unzip # Fix + - pip: + - cffi