-
Notifications
You must be signed in to change notification settings - Fork 0
/
GM_28Jun2023.html
1533 lines (1397 loc) · 74.1 KB
/
GM_28Jun2023.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Group Meeting 28 June 2023</title>
<script type="module">
// Load the three.js for 3d object visualization
import * as THREE from "https://cdn.jsdelivr.net/npm/three@0.127.0/build/three.module.js"
import { OrbitControls } from "https://cdn.jsdelivr.net/npm/three@0.127.0/examples/jsm/controls/OrbitControls.js"
import { PLYLoader } from "https://cdn.jsdelivr.net/npm/three@0.127.0/examples/jsm/loaders/PLYLoader.js"
window.THREE = THREE;
window.OrbitControls = OrbitControls;
window.PLYLoader = PLYLoader
</script>
<!-- Load reveal.js -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.6.0/css/reveal.min.css" integrity="sha512-V5fKCVKOFy36w8zJmLzPH5R6zU6KvuHOvxfMRczx2ZeqTjKRGSBO9yiZjCKEJS3n6EmENwrH/xvSwXqxje+VVA==" crossorigin="anonymous" referrerpolicy="no-referrer" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.6.0/js/reveal.min.js" integrity="sha512-QYXU3Cojl94ZRiZRjUZpyg1odj9mKTON9MsTMzGNx/L3JqvMA3BQNraZwsZ83UeisO+QMVfFa83SyuYYJzR9hw==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
<!-- Load jQuery-->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/1.9.1/jquery.min.js" integrity="sha512-jGR1T3dQerLCSm/IGEGbndPwzszJBlKQ5Br9vuB0Pw2iyxOy+7AK+lJcCC8eaXyz/9du+bkCy4HXxByhxkHf+w==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
<!-- Load 3DMol.js -->
<script src="https://3Dmol.org/build/3Dmol-min.js"></script>
<script src="https://3Dmol.org/build/3Dmol.ui-min.js"></script>
<!-- Load MathJax for labex representation -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.9/MathJax.js?config=TeX-AMS_HTML"></script>
<style>
body {
margin: 0;
}
section {
position: absolute !important;
top:0 !important;
left:0 !important;
height: 100% !important;
/*width: 100% !important;*/
}
canvas {
display: block;
}
.presentation_title {
position: absolute !important;
font-size: 45px !important;
font-weight: bold !important;
line-height: 1.25 !important;
padding-bottom: 20px !important;
}
.presentation_title2 {
padding-right: 50px !important;
line-height: 1.25 !important;
padding-bottom: 15px !important;
text-align: right !important;
font-size: 20px !important;
}
.page-number {
position: absolute;
bottom: 35px;
right: 8%;
color: black;
font-size: 36px;
}
.uzh_logo{
position: absolute !important;
top: 2.5vh !important;
left: 2.5vw !important;
height: 10vh;
width: auto;
}
.symbol_fig{
position: absolute;
top: 2.5vh;
right: 2.5vw;
height: 10vh;
/*filter: blur(2px); */
}
.reveal .progress{
height: 8px !important;
background-color: #F3E99F !important;
}
.reveal .progress span {
background-color: #FF6D60 !important;
transition: width 0.5s ease-out !important;
opacity: 1 !important;
z-index: 0 !important;
}
.laser-pointer {
position: absolute;
width: 20px;
height: 20px;
background-color: red;
border-radius: 50%;
opacity: 0;
pointer-events: none;
z-index: 1000;
transition: opacity 0.3s;
}
</style>
<style>
.slide-title {
position: absolute !important;
top: 12.5% !important;
left: 2.5% !important;
padding: 10px !important;
font-size: 25px !important;
font-weight: bold !important;
z-index:auto !important;
}
/* Left text and right figure style pair*/
.slide_content_style1{
position: absolute !important;
top: 20% !important;
left: 2.5% !important;
width: 45% !important;
padding: 10px !important;
font-size: 20px !important;
text-align: left !important;
}
.slide_item_style1{
padding-bottom: 20px !important;
font-size: 20px !important;
}
.image-style1{
position: absolute !important;
top: 20% !important;
left: 50% !important;
width: 50% !important;
height: 65% !important;
}
.tablecell_style1{
font-size: 15px !important;
}
.highlight_style1{
color: darkred !important;
font-size: 25px !important;
padding-top: 15px !important;
padding-bottom: 15px !important;
font-weight: bold !important;
}
.image-style2{
position: absolute !important;
top: 60% !important;
/*left: 50% !important;*/
width: 100% !important;
}
.image-style3{
position: absolute !important;
top: 60% !important;
left: 0% !important;
width: 50% !important;
}
.caption_style{
font-size: 15px !important;
position: relative;
/*display: block;*/
width:50% !important;
}
.reference_style{
position: absolute !important;
bottom: 8% !important;
left: 0 !important;
color: rgb(108, 108, 108) !important;
font-size: 8px !important;
width:80%;
text-align: left;
}
.mol-container{
position: relative !important;
padding-top: 100%;
}
.content_subtitle{
background: #e6f0b680 !important;
font-size: 15px !important;
padding: 5px !important;
margin-top: 10px !important;
margin-bottom: 20px !important;
background: radial-gradient(circle at center, #e6f0b680, #e6f0b630) !important;
border-radius: 10px !important;
background-color: #e6f0b680 !important;
}
.mol-container canvas {
position: absolute !important;
top: 0 !important;
bottom: 0 !important;
left: 0 !important;
right: 0 !important;
padding: 0 !important;
}
ul li{
padding-bottom:10px !important;
font-size: 25px !important;
}
.chapter_break{
position: absolute !important;
top : 50% !important;
left: 50% !important;
width:100% !important;
font-size: 45px !important;
font-weight: bold !important;
line-height: 1.25 !important;
transform: translate(-50%, -50%) !important
}
.image-gray {
filter: grayscale(100%);
}
</style>
<style>
.gallery {
position: relative;
display: flex;
flex-wrap: wrap;
justify-content: center;
align-items: center;
}
.gallery-item {
position: relative !important;
margin-right: 40px !important;
margin-left: 40px !important;
margin-top: 20px !important;
margin-bottom:50px !important;
}
.gallery-item img {
width: auto;
height: 150px;
transition: all 0.3s ease-out;
}
.gallery-item:hover img {
transform: scale(3);
}
.gallery-item_style2:hover img {
transform: scale(1.5) !important;
}
.gallery-item_style3:hover img {
transform: scale(2) translateY(20%) translateX(20%) !important;
}
.gallery-item_style4:hover img {
transform: scale(2) translateX(-10%) !important;
}
.gallery-item figcaption {
position: absolute;
bottom: -20px;
left: 0;
width: 100%;
text-align: center;
border-radius: 10px;
background-color: #fff0dbd0;
padding: 10px;
transform: translateY(100%);
transition: transform 0.3s ease-out;
}
.gallery-item:hover figcaption {
transform: translateY(170px);
}
</style>
</head>
<body>
<div class="reveal">
<div class="laser-pointer"></div>
<div class="slides" id="slide_container">
<!-- Title page -->
<section data-state="Title_Page">
<img id="greeting_bgimage" src="/interval_background.png" style="position: absolute; top:10px; right:-300px; opacity:0.25; transform: rotate(-30deg); filter: blur(2px); height: 700px; width: auto;">
<div style="position:relative; height:300px; width:100%; top:5%"><p class="presentation_title" style="top:50px; width:100%">Improving molecular interaction recognition with molecular dynamics</p></div>
<p class="presentation_title2">Yang Zhang</p>
<p class="presentation_title2">28\(^{th}\) June, 2023</p>
<p class="presentation_title2">Caflisch group Meeting</p>
<p class="presentation_title2">Department of Biochemistry, UZH</p>
</section>
<!--Template page-->
<!--<section data-state="slide_">-->
<!-- <p class="slide-title">This is a Template Slide for Demonstration</p>-->
<!-- <div class="slide_content_style1">-->
<!-- <p>This is a Template Main Context1</p>-->
<!-- <p>This is a Template Main Context2</p>-->
<!-- <p>This is a Template Main Context3</p>-->
<!-- </div>-->
<!-- <p class="reference_style">This is a template reference.</p>-->
<!--</section>-->
<section data-state="slide_OutlinePage">
<p class="slide-title">Outline</p>
<div class="slide_content_style1" style="width: 100% !important;">
<p class="slide_item_style1" style="padding-left: 10%; ">1. Introduction </p>
<p class="slide_item_style1" style="padding-left: 10%; ">2. Interface between ML and affinity prediction / MD simulation </p>
<p class="slide_item_style1" style="padding-left: 10%; ">3. Data-driven method to re-construct the binding pocket landscape </p>
<p class="slide_item_style1" style="padding-left: 10%; ">4. Augmenting prediction with molecular dynamics data </p>
<p class="slide_item_style1" style="padding-left: 10%; ">5. Progress and future work </p>
</div>
<div class="image-style1">
<img id="outline_bgimage" src="/interval_background.png" style="position: absolute; top:10px; right:-300px; opacity:0.25; transform: rotate(-30deg); height: 700px; width: auto; filter: blur(5px);" >
</div>
</section>
<section data-state="slide_VoxelBasedMethod">
<p class="slide-title">Introduction to Graph-based and 3D voxel-based methods</p>
<div class="slide_content_style1">
<table>
<tr>
<th class="content_subtitle" style="background: radial-gradient(circle at center, #95E1D380, #95E1D330) !important; width:50%; text-align: center">Pros</th>
<th class="content_subtitle" style="background: radial-gradient(circle at center, #F3818180, #F3818130) !important; width:50%; text-align: center">Cons</th>
</tr>
<tr ><td class="content_subtitle" colspan="2">Graph-based method</td></tr>
<tr>
<td style="padding-left:15px; height: 90px; background: radial-gradient(circle at center, #95E1D380, #95E1D330) !important">
<ul>
<li class="tablecell_style1">Invariance to rotation and translation</li>
<li class="tablecell_style1">Lower computational cost</li>
<li class="tablecell_style1">Greater scalability and interpretability</li>
</ul>
</td>
<td style="padding-left:15px; height: 90px; background: radial-gradient(circle at center, #F3818180, #F3818130) !important">
<ul>
<li class="tablecell_style1">Loss of precise spatial relationships</li>
<li class="tablecell_style1">Difficulty handling conformational changes</li>
<li class="tablecell_style1">Limited representation of molecular properties</li>
</ul>
</td>
</tr>
<tr><td class="content_subtitle" colspan="2">Voxel-based method</tr>
<tr>
<td style="padding-left:15px; height: 90px; background: radial-gradient(circle at center, #95E1D380, #95E1D330) !important">
<ul>
<li class="tablecell_style1">Straight-forward and intuitive structural representation</li>
<li class="tablecell_style1">Retains spatial configuration</li>
<li class="tablecell_style1">Incorporates multiple molecular properties</li>
<li class="tablecell_style1">Fine control over structural resolution</li>
</ul>
</td>
<td style="padding-left:15px; height: 90px; background: radial-gradient(circle at center, #F3818180, #F3818130) !important">
<ul>
<li class="tablecell_style1">Sensitive to rotation and translation</li>
<li class="tablecell_style1">High computational complexity</li>
<li class="tablecell_style1">High demand for memory capacity</li>
</ul>
</td>
</tr>
</table>
<p class="highlight_style1">The degree of complexity of molecular patterns needs to be reduced</p>
</div>
<div class="image-style1">
<img id="graphexample_img" src="/Example_GraphBasedMethod.png" style="height:40% !important"><br>
<p for="graphexample_img" class="content_subtitle">Fig1: Example graph-based method</p><br>
<img id="voxelexample_img" src="/Example_VoxelBasedMethod.png" style="height:40% !important; "><br>
<p for="voxelexample_img" class="content_subtitle">Fig2: Example voxel-based method</p>
</div>
<p class="reference_style">
Ragoza, Matthew, et al. "Protein–ligand scoring with convolutional neural networks." Journal of chemical information and modeling 57.4 (2017): 942-957.<br>
Duvenaud, David K., et al. "Convolutional networks on graphs for learning molecular fingerprints." Advances in neural information processing systems 28 (2015).<br>
Li, Yanjun, et al. "DeepAtom: A framework for protein-ligand binding affinity prediction." 2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM). IEEE, 2019.
</p>
</section>
<section data-state="">
<div class="chapter_break_container">
<p class="chapter_break">Interface between ML and affinity prediction / MD simulation</p>
<img title="Needed for the ">
</div>
</section>
<section data-state="slide_">
<p class="slide-title">Current methods: MD simulation in machine learning </p>
<div class="slide_content_style1">
<img src="/current_MD_method1.png" style="width:100% !important; ">
<p class="content_subtitle" style="width:100% !important; padding-bottom: 30px">Method1: Encode the atoms with their 2D descriptor and 3D descriptor of trajectory</p>
<img src="/current_MD_method3.png" style="width:100% !important; ">
<p class="content_subtitle" style="width:100% !important;">Method2: For each atom, encode their potential energy \(E_{i}\) with neural network </p>
</div>
<div class="image-style1">
<img src="/current_MD_method2.png" style="width:100% !important; ">
<p class="content_subtitle" style="width:100% !important;">Method3: Encode the 3D coordinate of atoms to RGB chennels of 2D images</p>
</div>
<p class="reference_style">
Ash, Jeremy, and Denis Fourches. "Characterizing the chemical space of ERK2 kinase inhibitors using descriptors computed from molecular dynamics trajectories." Journal of chemical information and modeling 57.6 (2017): 1286-1299.<br>
Gastegger, Michael, Jörg Behler, and Philipp Marquetand. "Machine learning molecular dynamics for the simulation of infrared spectra." Chemical science 8.10 (2017): 6924-6935.<br>
Plante, Ambrose, et al. "A machine learning approach for the discovery of ligand-specific functional mechanisms of GPCRs." Molecules 24.11 (2019): 2097.
</p>
</section>
<section data-state="slide_">
<p class="slide-title">3D voxel-based binding affinity prediction</p>
<div class="slide_content_style1">
<p class="slide_item_style1">Training set: PDBBind-refined set is used as high quality training dataset and test set</p>
<p class="slide_item_style1">Learning method: 3D convolutional neural network</p>
<p class="slide_item_style1">Feature engineering: Both simple and complex features are used differently</p>
<p class="slide_item_style1">Feature engineering: Different combinations/arrangement of features</p>
<p class="highlight_style1">Predicted results are biased</p>
<p class="highlight_style1">No shared architecture for systematic tests</p>
</div>
<div class="image-style1">
<img src="/pafnucy_input.jpeg" style="height:40% !important; "><br>
<p class="content_subtitle">Fig1. Input tensor represented with a 4D tensor in pafnucy</p>
<img src="/gnina_atomdensity.jpeg" style="height:40% !important; ">
<p class="content_subtitle">Fig2. Visualization of atom densities used as input to CNN scoring for Gnina</p>
</div>
<p class="reference_style">
Stepniewska-Dziubinska, Marta M., Piotr Zielenkiewicz, and Pawel Siedlecki. "Development and evaluation of a deep learning model for protein–ligand binding affinity prediction." Bioinformatics 34.21 (2018): 3666-3674.<br>
Ragoza, Matthew, et al. "Protein–ligand scoring with convolutional neural networks." Journal of chemical information and modeling 57.4 (2017): 942-957.
</p>
</section>
<section data-state="slide_common_methods">
<p class="slide-title">Kep hypothesis for 3D-based affinity prediction software</p>
<div class="slide_content_style1">
<p class="slide_item_style1">1. Ligand binding is primarily determined by the local atomic configurations</p>
<p class="slide_item_style1">2. Substructure reduces structural complexity of the binding pocket </p>
<p class="slide_item_style1">3. Molecular properties are able to be mapped to 3D voxels</p>
<p class="slide_item_style1">4. Various atomic/molecular properties helps neural network prediction</p>
<p class="content_subtitle"><span class="highlight_style1" style="line-height: 40px">Objective: </span><br><span class="slide_item_style1" style="margin-bottom: 10px">Develop a framework to preprocess molecular dynamics trajectories and prepare input data for voxel-based binding affinity prediction.</span><br> </p>
</div>
<div style="position: absolute; top:20%; left:50%; width:450px !important; height: 450px !important">
<div id="slide_RefPDBAndBox" style="width: 100%; height: 100%"></div>
<p class="content_subtitle" style="font-size: 12px"><span style="font-weight: bold;">Stage1:</span> Protein(gray cartoon); ligand and pocket(green sticks), bounding box(light green cuboid), grid points(red dots) and molecule blocks (purple cuboids)</p>
</div>
</section>
<section data-state="slide_">
<p class="slide-title">Improve the prediction with protein dynamics </p>
<div class="slide_content_style1">
<p class="slide_item_style1">1. Protein is flexible in natural condition and soaked in solvent</p>
<p class="slide_item_style1">2. Small conformational change could lead to significant change in 3D features</p>
<p class="slide_item_style1">3. Molecular dynamics simulation provides a interface to study the protein flexibility and solvent effect</p>
<p class="slide_item_style1">4. Dynamics features for static atomic configurations might enable predicting dynamic features</p>
</div>
<div class="image-style1">
<img src="/example_dynamic_protein.gif" style="width: 75% !important;">
<p class="content_subtitle">Fig. Example ligand in molecular dynamics simulation </p>
</div>
</section>
<section data-state="">
<div class="chapter_break_container">
<p class="chapter_break">Data-driven method to re-construct the binding pocket landscape</p>
<img title="Needed for the ">
</div>
</section>
<section data-state="slide_">
<p class="slide-title">Feature database construction </p>
<!--Attach the representation of the Package Archetecture-->
<div class="gallery" style="top:30% !important;">
<figure class="gallery-item ColorSwapOnHover image-gray">
<img style="height: 200px !important; " src="/MD_workflow_2.png">
<figcaption>Fig1: Batch MD simulation using the ACGui</figcaption>
</figure>
<figure class="gallery-item ColorSwapOnHover image-gray">
<img src="/Step1_2_Trajectory_processing_pipeline.jpg">
<figcaption>Fig2: Automated pipeline for trajectory decomposition</figcaption>
</figure>
<figure class="gallery-item ColorSwapOnHover image-gray">
<img src="/Step1_3_Example_feature_block.png">
<figcaption>Fig3: Feature generation, representation, and storage</figcaption>
</figure>
</div>
</section>
<section data-state="slide_TrajectoryPreparation">
<p class="slide-title">Training material (Trajectory) generation </p>
<div class="slide_content_style1">
<p class="slide_item_style1">1. ACGui batch MD simulation is used for standard simulation system preparation </p>
<p class="slide_item_style1">2. Protein-ligand complexes are sourced from the PDBBind refined set</p>
<p class="slide_item_style1">3. In the first batch of simulations, 75 pairs of complexes are simulated 4 times for 50ns per run</p>
<p class="slide_item_style1">4. Sequence embedding is applied to eliminate protein redundancy </p>
</div>
<div class="image-style1">
<figure class="gallery-item gallery-item_style2" style="width: 100% !important; ">
<img id="traj_prep" src="/MD_workflow_2.png" style="width: 100% !important; height: auto;">
<figcaption class="content_subtitle" for="traj_prep">Fig. Workflow of the ACGui batch MD simulation. </figcaption>
</figure>
</div>
<p class="reference_style">
Liu, Zhihai, et al. "Forging the basis for developing protein–ligand interaction scoring functions." Accounts of chemical research 50.2 (2017): 302-309.<br>
Ranjan, Chitta, Samaneh Ebrahimi, and Kamran Paynabar. "Sequence graph transform (SGT): A feature embedding function for sequence data mining." arXiv preprint arXiv:1608.03533 (2016).
</p>
</section>
<section data-state="slide_ExampleMolBlock">
<p class="slide-title">Segmenting a molecule block </p>
<!--Example image of a molecule block (box) as well as protein structure -->
<div class="slide_content_style1">
<p class="slide_item_style1">1. Each molecule block is segmented by consecutive residues, top 6 segments are retained</p>
<p class="slide_item_style1">2. Atoms from consecutive residues (within N residues) are considered to be in one segment </p>
<p class="slide_item_style1">3. For the computation of chemical features, full residues are maintained if at least one atom falls into the bounding box (Stage1)</p>
<p class="slide_item_style1">4. Segments are ordered from the most atoms to the least atoms (Stage2)</p>
<p class="slide_item_style1">5. Each segment computes multiple types of descriptors as fingerprint(topological, chemical and geometric) </p>
</div>
<div class="image-style1" style="top:10% !important; ">
<div id="mol_block_pdb" class="mol-container" style="left: 50% !important; transform: translate(-50%) !important; height: 200px !important; width: 100% !important;"></div>
<p for="mol_block_pdb" class="content_subtitle" style="margin-top: 10px !important; position: relative; text-align: left !important; ">Stage1: Example of a "molecule block" shown in stick representation, with the bounding box colored purple. </p><br>
<div id="mol_block_mesh" style="height: 200px !important; width: 100% !important; position: relative; left: 50% !important; transform: translate(-50%) !important; "></div>
<p for="mol_block_mesh" class="content_subtitle" style="margin-top: 10px !important; position: relative; text-align: left !important; ">Stage2: Visualization of the segmented "molecule block" as a triangular mesh. Segments are arranged in a gradient from dark to bright colors, indicating their order. </p>
</div>
</section>
<section data-state="slide_Intro3DInterpolation">
<p class="slide-title">Feature interpolation from coordinates to mesh grids</p>
<!--Attach the function to interpolate and the method to distribute features -->
<div class="slide_content_style1">
<p class="slide_item_style1">1. Initialize a grid over each molecule block and create a Kd-tree for neighbor search</p>
<p class="slide_item_style1">2. Select a coordinate and query the grid point within a specified distance</p>
<p class="slide_item_style1">3. Rasterize the "weight" of the coordinate (e.g. atomic mass) using Gaussian: <br><span style="padding-left:100px">\(w_i = \frac{w_0}{\sigma \sqrt{2\pi}}exp\left(-\frac{\left(r_i-r_0\right)^2}{\sigma^2}\right)\)</span> </p>
<!-- <p class="slide_item_style1" style="padding-left: 20px !important; ">where \(r_i\) is the grid point coordinate and the \(r_0\) is the reference atom coordinate, \(w_0\) is the feature weight of that atom, and \(\sigma\) is the smoothness of the interpolation </p>-->
<p class="slide_item_style1">4. Repeat steps 2 and 3 until all atoms in the bounding box are processed </p>
</div>
<div class="image-style1">
<img id="feature_interp_exp1" src="/tmp05dodkbp_final_feat.png" style="height: 36%">
<img id="feature_struct_exp1" src="/tmp05dodkbp_final_conf.png" style="height: 36%">
<p class="content_subtitle">Fig1: Example structure 1 within bounding box (right) and its rasterized features (left); Grid points are sized according to the cumulative weight of atom numbers. Residues are depicted with gold sticks, and the bounding box is colored pink.</p>
<img id="feature_interp_exp2" src="/tmpo_gbx3oc_final_feat.png" style="height: 36%">
<img id="feature_struct_exp2" src="/tmpo_gbx3oc_final_conf.png" style="height: 36%">
<p class="content_subtitle">Fig2: Example structure 2 within bounding box (right) and its rasterized features (left); Color scheme is as the previous figure. </p>
</div>
</section>
<section data-state="slide_RepresentingMolBlock">
<p class="slide-title">Molecule block embedding</p>
<!-- Attach a picture of several residues inside a box -->
<div class="slide_content_style1">
<p class="slide_item_style1 highlight_style1" style="margin-bottom: 10px !important; ">
The fingerprint has to be equalvariant to rotation and translation
</p>
<p class="slide_item_style1">1. Topological, chemical, and geometric features are concatenated to represent a single segment</p>
<p class="slide_item_style1">2. Six segment vectors are concatenated to represent a molecule block</p>
<p class="slide_item_style1">3. The geometric feature is computed by segments' molecular surface</p>
<p class="slide_item_style1">4. Each molecular surface is down-sampled (as point clouds) and saved for further use</p>
<p class="slide_item_style1">5. Viewpoint components (discuss later) represent the relative positions between segments</p>
<!-- <p class="slide_item_style1">4. The feature vectors of each segment are ultimately joined by their "relative position" features</p>-->
</div>
<div class="image-style1" style="width: 50%" >
<table style="text-align: left; width: 100%; ">
<tbody style="font-size: 15px">
<tr><th class="content_subtitle" style="font-weight: bold; ">Topological feature</th></tr>
<tr><td style="padding-bottom: 5px">Atom number</td></tr>
<tr><td style="padding-bottom: 5px">Carbon Number</td></tr>
<tr><td style="padding-bottom: 5px">Hydrogen Number</td></tr>
<tr><td style="padding-bottom: 5px">Nitrogen number</td></tr>
<tr><td style="padding-bottom: 5px">Oxygen Number</td></tr>
<tr><td style="padding-bottom: 5px">Pseudo LJ: \(E_{lj} = 4 * \epsilon * ((\frac{\sigma}{r})^{12} - (\frac{\sigma}{r})^{6})\)</td></tr>
<tr><td style="padding-bottom: 5px">Pseudo Elec: \(E_{el} = k*\frac{q_{1}*q_{2}}{r}\)</td></tr>
<tr><th class="content_subtitle" style="font-weight: bold; ">Chemical feature</th></tr>
<tr><td style="padding-bottom: 5px">Donor number: SMARTS search</td></tr>
<tr><td style="padding-bottom: 5px">Acceptor number: SMARTS search</td></tr>
<tr><td style="padding-bottom: 5px">Positive charge: Gasteiger algorithm</td></tr>
<tr><td style="padding-bottom: 5px">Negative charge: Gasteiger algorithm</td></tr>
<tr><th class="content_subtitle" style="font-weight: bold; ">Geometric feature</th></tr>
<tr><td style="padding-bottom: 5px">Surface Area</td></tr>
<tr><td style="padding-bottom: 5px">Occupied Volume</td></tr>
<tr><td style="padding-bottom: 5px">Mean radius</td></tr>
<tr><td style="padding-bottom: 5px">Convex hull</td></tr>
</tbody>
</table>
<p class="" style="font-weight: bold">Table1: Available features </p>
</div>
<p class="reference_style">Sanner, M. F., Olson A.J. & Spehner, J.-C. (1996). Reduced Surface: An Efficient Way to Compute Molecular Surfaces. Biopolymers 38:305-320.</p>
</section>
<section data-state="slide_">
<p class="slide-title">Combine the structural features with geometric features</p>
<div class="slide_content_style1">
<p class="slide_item_style1 highlight_style1" style="margin-bottom: 10px !important; ">Robust and specific substructure fingerprint for similarity computaiton </p>
<p class="slide_item_style1">Viewpoint Feature Histogram (VFH) is suitable for object recognition and pose identification</p>
<p class="slide_item_style1">Viewpoint component might help the feature vector comparison rotational invariant</p>
<p class="slide_item_style1"></p>
</div>
<div class="image-style1">
<img src="/Viewpoint_Segments.jpg" alt="" style="height: 50% !important; ">
<p class="content_subtitle">Fig1. Schematic 2D representation of viewpoint component and segmentation </p>
</div>
<div class="image-style2">
<img src="/VFH_sig.png" alt="" style="width:75% !important; ">
<p class="content_subtitle">Fig2. Viewpoint Feature Histogram (VFH) signatures of two different poses</p>
</div>
<p class="reference_style">
Rusu, Radu Bogdan, et al. "Fast 3d recognition and pose using the viewpoint feature histogram." 2010 IEEE/RSJ International Conference on Intelligent Robots and Systems. IEEE, 2010.<br>
Sidor, Kamil, and Marian Wysocki. "Recognition of human activities using depth maps and the viewpoint feature histogram descriptor." Sensors 20.10 (2020): 2940.
</p>
</section>
<section data-state="slide_">
<p class="slide-title">Compute structural similarity from fingerprint</p>
<div class="slide_content_style1">
<p class="slide_item_style1 highlight_style1" style="margin-bottom: 10px !important; ">
Require a robust metric to measure the similarity between two molecule blocks
</p>
<p class="slide_item_style1">Each box contains 6 segments in maximum and similarity is calculated by: </p>
<p class="slide_item_style1" style="text-align: center !important; ">
\( S=\frac{ \sum_{i=0}^{N} (C_{i} \times w_{i})}{\sum_{i=0}^{N}w_{i}} \)
</p>
<p class="slide_item_style1">where \(w_{i}\) is the weight of the segment \(i\),</p>
<p class="slide_item_style1">\( C_{i} \) is the cosine similarity between the test segment i and the target segment i:</p>
<p class="slide_item_style1" style="text-align: center !important; ">
\(C_{i}=\frac{V_{i} \cdot V_{target_i}}{\left \|V_{i} \right \| \ \left\|V_{target_i}\right\|}\)
</p>
<!--S=\frac{ \sum_{i=0}^{N} (C_{i} \times w_{i})}{\sum_{i=0}^{N}w_{i}} -->
<!--C_{i}=\frac{V_{i} \cdot V_{target}}{\left \|V_{i} \right \| \ \left\|V_{target}\right\|}-->
</div>
<div class="image-style1">
<img style="height:80% !important;; width: auto !important; height:auto" src="/cosine_similarity.jpg">
<p class="content_subtitle">Fig. A paradigm for cosine similarity computation</p>
</div>
<p class="reference_style">https://en.wikipedia.org/wiki/Cosine_similarity<br>https://aitechtrend.com/how-cosine-similarity-can-improve-your-machine-learning-models/</p>
</section>
<!--<section data-state="slide_">-->
<!-- <p class="slide-title">This is a Template Slide for Demonstration</p>-->
<!-- <div class="slide_content_style1">-->
<!-- <p>This is a Template Main Context1</p>-->
<!-- <p>This is a Template Main Context2</p>-->
<!-- <p>This is a Template Main Context3</p>-->
<!-- </div>-->
<!-- <p class="reference_style">This is a template reference.</p>-->
<!--</section>-->
<section data-state="">
<div class="chapter_break_container">
<p class="chapter_break">Augmenting prediction with molecular dynamics data</p>
<img title="Needed for the ">
</div>
</section>
<section data-state="slide_">
<!--Page 17-->
<p class="slide-title">Binding affinity data augmentation workflow</p>
<div class="slide_content_style1">
<p class="slide_item_style1">Step 1: Load trajectories to a trajectory loader and register needed features </p>
<p class="slide_item_style1">Step 2: Set the box size to ~20\(Å^{3}\) with grid resolution of ~0.5Å; Align it to the center of ligand</p>
<p class="slide_item_style1">Step 3: Compute the time series of closest-atom-pairs distances (CAPD) </p>
<p class="slide_item_style1">Step 4: For each representative frames, compute the penalty by its deviation from CAPD</p>
<p class="slide_item_style1">Step 5: Label frames by </p>
<p class="slide_item_style1" style="text-align: center !important;">\(L = K_{i/d} \times (1-p)\)</p>
<p class="slide_item_style1">Step 6: Store the feature vectors and labels to database as training dataset</p>
</div>
<div class="image-style1">
<img src="/Augment_current_methods.jpg" style="height:90% !important;">
<p class="content_subtitle">Fig. Augment of binding affinity data from representative frames of trajectories</p>
</div>
</section>
<section data-state="slide_">
<p class="slide-title">Protein dynamics landscape re-construction</p>
<div class="gallery" style="top:30% !important;">
<figure class="gallery-item ColorSwapOnHover image-gray" >
<img src="/Step2_1_Image_pocket.png">
<figcaption>Fig1: An example binding pose to predict</figcaption>
</figure>
<figure class="gallery-item ColorSwapOnHover image-gray" >
<img src="/Step2_2_MBlock_retrieval.jpg">
<figcaption>Fig2: Binding pocket extraction, "molecule block" retrieval and feature reassembly</figcaption>
</figure>
<figure class="gallery-item ColorSwapOnHover image-gray gallery-item_style4" style="height: auto !important; width: 30% !important; ">
<img style="height: 100% !important; width: 100%" src="/Step_3DCNN_prediction.png">
<figcaption>Fig3: Affinity prediction via machine learning model (3D-CNN) </figcaption>
</figure>
</div>
</section>
<section data-state="slide_PocketReassembleDetails">
<p class="slide-title">Pocket reassembly workflow</p>
<div class="slide_content_style1">
<p class="slide_item_style1">Step 1. Extract the binding site at the interface and establish the corresponding mesh grid </p>
<p class="slide_item_style1">Step 2. Sample sub-blocks within the bounding box and retrieve features from database </p>
<p class="slide_item_style1">Step 3. Retain sub-blocks with similarity greater than a threshold (~0.9) </p>
<p class="slide_item_style1">Step 4. Align the two sub-blocks and obtain the transformation matrix </p>
<p class="slide_item_style1">Step 5. Transform the feature coordinate and map voxels to the target mesh grid</p>
</div>
<div class="image-style1">
<img id="img_pocket_assemble" src="/Step2_2_MBlock_retrieval.jpg" style="width: 100%; ">
<p class="content_subtitle">Fig1. Illustrating the process of converting a static docked pose into features suitable for training. </p>
</div>
</section>
<section data-state="slide_ICPRegistration">
<p class="slide-title">Align feature vectors</p>
<div class="slide_content_style1">
<p class="slide_item_style1"><span style="font-weight: bold;">Iterative closest point (ICP)</span> is a widely-used method for aligning two point clouds</p>
<p class="slide_item_style1"><span style="font-weight: bold;">Point cloud</span> is stored when depositing the feature vectors into the database</p>
<p class="slide_item_style1">Two main steps are iterated during point cloud registration: </p>
<p class="slide_item_style1" style="padding-left: 20px; ">1. Find correspondence set \(\kappa = \{(p,q)\}\) from target point cloud \(P\), and source point cloud \(Q\) </p>
<p class="slide_item_style1" style="padding-left: 20px; ">2. Update the transformation matrix by minimizing an objective function. </p>
<p class="slide_item_style1">For example, the objective function of Point-to-Point ICP algorithm is </p>
<p class="slide_item_style1" style="text-align: center !important;">\(E(T) = \sum_{(p,q)\in\kappa}{\left\|p-Tq \right\|^2}\)</p>
<!-- <p class="slide_item_style1">The output is a refined transformation matrix (Rotation matrix + translation)</p>-->
</div>
<div class="image-style1">
<img style="height:80% !important;; width: auto !important; height:auto" src="https://camo.githubusercontent.com/f28d342e5dc26c660ba6184a1265fd55cf69607e2ea4cadb11919a2d8ef63491/68747470733a2f2f6361732d61737369676e6d656e742e72656164746865646f63732e696f2f656e2f6c61746573742f5f696d616765732f6963705f616e696d6174696f6e2e676966">
<p class="content_subtitle">Fig. A visual demonstration of how the ICP algorithm aligns two point clouds</p>
</div>
<p class="reference_style">Besl, Paul J., and Neil D. McKay. "Method for registration of 3-D shapes." Sensor fusion IV: control paradigms and data structures. Vol. 1611. Spie, 1992.<br>https://github.com/yassram/iterative-closest-point</p>
</section>
<section data-state="slide_">
<p class="slide-title">Network architecture</p>
<div class="slide_content_style1">
<p class="highlight_style1">General-purpose network architecture</p>
<p class="slide_item_style1">ResNet: Solved the vanishing gradient problem enabling very deep network structure</p>
<p class="slide_item_style1">DenseNet: Improved gradient flow, feature reuse, and network efficiency</p>
<p class="highlight_style1">Other network architecture</p>
<img src="/gnina_networks.jpeg" style="width:80% !important;">
<p class="content_subtitle" style="text-align: center !important">Fig3. Network architecture used in Gnina</p>
</div>
<div class="image-style3">
</div>
<div class="image-style1">
<img src="/RoseNet_prediction_method.jpeg" style="width:100% !important;">
<p class="content_subtitle">Fig1. Input features used in RosENet (based on ResNet)</p>
<img src="/DenseNet_prediction_method.jpeg" style="margin-top:20px; width:100% !important;" alt="">
<p class="content_subtitle">Fig2. Schematic of the DenseNet architecture in a model</p>
</div>
<p class="reference_style" style="margin-bottom: -45px !important">
Imrie, Fergus, et al. "Protein family-specific models using deep neural networks and transfer learning improve virtual screening and highlight the need for more data." Journal of chemical information and modeling 58.11 (2018): 2319-2330.<br>
Hassan-Harrirou, Hussein, Ce Zhang, and Thomas Lemmin. "RosENet: improving binding affinity prediction by leveraging molecular mechanics energies with an ensemble of 3D convolutional neural networks." Journal of chemical information and modeling 60.6 (2020): 2791-2802.<br>
Francoeur, Paul G., et al. "Three-dimensional convolutional neural networks and a cross-docked data set for structure-based drug design." Journal of chemical information and modeling 60.9 (2020): 4200-4215.
</p>
</section>
<section data-state="">
<div class="chapter_break_container">
<p class="chapter_break">Progress and future work</p>
<img title="Needed for the ">
</div>
</section>
<section data-state="slide_KeyChildModule">
<p class="slide-title">Development of key subsystems</p>
<div class="slide_content_style1">
<p class="slide_item_style1"><span style="color:#285430; font-weight: bold">Training material:</span> Trajectories of complexes selected from PDBBind </p>
<p class="slide_item_style1"><span style="color:#285430; font-weight: bold">Trajectory loader:</span> Arrayify trajectories and automate featurization pipeline</p>
<p class="slide_item_style1"><span style="color:#285430; font-weight: bold">Molecule block generator:</span> Crop arbitrary molecule blocks </p>
<!-- Fingerprint of molecule blocks-->
<p class="slide_item_style1"><span style="color:#285430; font-weight: bold">Featurizer:</span> Transform molecule blocks into feature vectors</p>
<p class="slide_item_style1"><span style="font-weight: bold">Database depositor:</span> Feature vector storage, retrieval, and comparison</p>
<p class="slide_item_style1" style="padding-left: 40px"><span style="color:#285430; font-weight: bold">Temporary solution:</span> HDF (Hierarchical Data Format) file</p>
<p class="slide_item_style1"><span style="color:#C58940; font-weight: bold">Input structure processor:</span> Feature retrieval and pocket reassembly</p>
<p class="slide_item_style1"><span style="font-weight: bold">Neural network:</span> Predict binding affinities</p>
</div>
<div class="image-style1">
<figure class="gallery-item gallery-item_style2" style="width: 100% !important;">
<img id="img_childmodules" src="/Overall_workflow.jpg" style="width: 100% !important; height: auto; ">
<figcaption class="content_subtitle" for="img_childmodules">Fig. Overall workflow of trajectory featurization and machine learning model training, and the organization of corresponding sub-systems</figcaption>
</figure>
</div>
</section>
<section data-state="slide_FutureWork">
<p class="slide-title">Future Work</p>
<div class="slide_content_style1">
<p class="slide_item_style1">1. Augment ligand affinity data with molecular dynamics trajectory</p>
<p class="slide_item_style1">2. Re-train some existing machine learning models with the augmented data</p>
<p class="slide_item_style1">3. Build a feature database for molecule block storage/retrieval (16 examples in Stage1)</p>
<p class="slide_item_style1">4. Develop an efficient algorithm for aligning two molecule blocks</p>
<p class="slide_item_style1">5. Add more registrable features in the trajectory processing pipeline</p>
</div>
<div style="position: absolute; top:20%; left:50%; width:450px !important; height: 450px !important">
<div id="final_ply_display" style="width: 100%; height: 100%"> </div>
<p class="content_subtitle">Stage1: 3D visualization of 16 example molecule blocks</p>
</div>
</section>
<section data-state="Page_Acknowledgement">
<p class="slide-title">Acknowledgement</p>
<ul class="slide_content_style1">
<li>Amedeo Caflisch</li>
<li>Andreas Vitalis</li>
<p class="highlight_style1" style="margin-left: -20px !important;">ACGui Developers: </p>
<li>Fabian Radler</li>
<li>Cassiano Langini</li>
<p class="highlight_style1" style="margin-left: -20px !important;">Computationalists:</p>
<li>Francesco Cocina</li>
<li>Julian Widmer</li>
<p class="highlight_style1" style="margin-left: -20px !important;">All the members in Caflisch group</p>
</ul>
<!-- <div class="slide_content_style1" style="width:50%">-->
<!-- </div>-->
<div class="image-style1" style="top:10% !important;">
<img id="img_group" src="/Group_photo.jpg" style="width: 100%; height:auto">
<img id="img_group2" src="/Group_photo2.png" style="width: 100%; height:auto">
</div>
</section>
<!--<section data-state="slide_">-->
<!-- <p class="slide-title">This is a Template Slide for Demonstration</p>-->
<!-- <div class="slide_content_style1">-->
<!-- <p>This is a Template Main Context1</p>-->
<!-- <p>This is a Template Main Context2</p>-->
<!-- <p>This is a Template Main Context3</p>-->
<!-- </div>-->
<!-- <p class="reference_style">This is a template reference.</p>-->
<!--</section>-->
</div>
</div>
<script>
// Define the function to load ply file
async function getGithubContents(url){
// Fetch file from Github API
ret = await fetch(url, {headers: github_auth});
data = await ret.json();
if (data.hasOwnProperty("content") && data.content.length > 0){
console.log("Download file from github: route 1", url);
return data.content
} else if (data.hasOwnProperty("git_url")){
console.log("Download file from github: route 2", data.git_url);
ret_blob = await fetch(data.git_url, {headers: github_auth});
data2 = await ret_blob.json();
return data2.content
} else if (data.hasOwnProperty("sha")){
const blob_url = url.replace(/contents.*/, "git/blobs")+"/"+data.sha;
console.log("Download file from github: route 3", blob_url);
ret_blob = await fetch(blob_url, {headers: github_auth});
data2 = await ret_blob.json();
return data2.content
}
}
async function loadImageBase64(fileurl) {
// Load image from URL and return base64 encoded image
try {
const data = await getGithubContents(fileurl);
let dataUriPrefix;
let format = fileurl.split('/').pop().split('.').pop();
if (format == 'png') {
dataUriPrefix = 'data:image/png;base64,';
} else if (format == 'jpeg' || format == 'jpg') {
dataUriPrefix = 'data:image/jpeg;base64,';
} else if (format == 'gif') {
dataUriPrefix = 'data:image/gif;base64,';
}
return dataUriPrefix + data
} catch (error) {
console.error('An error occurred while fetching the file:', error);
}
}
async function setupImage(image, figurl){
var img;
// Check if image is an HTMLElement (like what's returned by document.getElementById)
if (image instanceof HTMLElement) {
img = image;
console.log("image is an HTMLElement")
} else if (typeof image === 'string') {
// If it's not, assume it's an id
img = document.getElementById(image);
if (img == null) {
console.log("No such image id: ", image, " in the slide.")
return;
}
}
if (figurl.startsWith("data")){
// Base64 as input
img.src = figurl;
} else if (figurl.startsWith("http")) {
// URL as input
const fig_content = await loadImageBase64(figurl);
img.src = fig_content;
} else if (figurl.length > 0 && figurl.length < 70) {
// Image file name as input
const fig_content = await loadImageBase64(repo_url + figurl);
img.src = fig_content;
} else {
// Actual file in the root folder as input
img.src = figurl;
}
}
// Load all of images in the repo to an object: FIGURES
async function getImagesFromRepo() {
try {
// Get the contents of the repo
let response = await fetch(repo_url, {headers: github_auth});
let data = await response.json();
// Iterate over each file in the repo: 1. Check image; 2. Download and store the image in base64 format
for (let file of data) {
if (file.name.endsWith('.png') || file.name.endsWith('.jpg') || file.name.endsWith('.jpeg') || file.name.endsWith('.gif')) {
filename = file.name.split('?')[0].split("/")[file.name.split('?')[0].split("/").length-1];
FIGURES[filename] = await loadImageBase64(repo_url + filename);
}
}
} catch (error) {
console.error("An error occurred:", error);
}
}
// Shortcut function to get the name of an element or an element id;
// The <src> attribute of the image should use the name of the image file
function getFileName(element){
if (typeof element === 'string' || element instanceof String){
let elem = document.getElementById(element);
// let src_str = elem.src.split("?")[0];
return elem.src.split("/")[elem.src.split("/").length-1]
} else if (element instanceof HTMLElement){
let elem = element;
return elem.src.split("/")[elem.src.split("/").length-1]
}
}
function loadPLYMesh(plycontent, scene, camera, light) {
// Load PLY file from base64 encoded string and return mesh
const loader = new PLYLoader();
const geometry = loader.parse(plycontent);
geometry.computeVertexNormals();
// const material = new THREE.MeshStandardMaterial({ color: 0x89aa97, flatShading: true });
const material = new THREE.MeshStandardMaterial({vertexColors: true, flatShading: true});
const mesh = new THREE.Mesh(geometry, material);
geometry.computeBoundingBox();
const boundingBox = geometry.boundingBox;
const center = boundingBox.getCenter(new THREE.Vector3());
mesh.position.set(-center.x, -center.y, -center.z);
mesh.onBeforeRender = function (renderer, scene, camera) {
light.position.copy(camera.position);
};
return mesh
}
async function addPLYtoStage(fileurl, scene, renderer, camera, light, offsets){
const plycontent = atob(await getGithubContents(fileurl));
var themesh = loadPLYMesh(plycontent, scene, camera, light);
themesh.geometry.computeBoundingBox();
const boundingBox = themesh.geometry.boundingBox;
const center = boundingBox.getCenter(new THREE.Vector3());
themesh.position.set(-center.x + offsets[0], -center.y + offsets[1], -center.z +offsets[2]);
// mesh.position.set(position[0], position[1], position[2]);
scene.add(themesh);
return [scene, renderer, camera, light]
}
async function initPLYObject(divid, fileurl){
// Initialize the scene with a ply object
const THREE = window.THREE;
const OrbitControls = window.OrbitControls ;
console.log("Adding Ply object")
// Setup for Renderer
const renderer = new THREE.WebGLRenderer();
var camera
if (divid.length > 0){
var container = document.getElementById(divid);
console.log("using divid", divid, "Width", container.clientWidth, "Height: ", container.clientHeight)
renderer.setSize(container.clientWidth, container.clientHeight);
container.appendChild(renderer.domElement);
const aspectRatio = container.clientWidth / container.clientHeight;
camera = new THREE.PerspectiveCamera(75, aspectRatio, 0.1, 1000);
camera.position.z = 5;
} else {
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);
camera.position.z = 5;