forked from gk-duplo/duplo-help
-
Notifications
You must be signed in to change notification settings - Fork 1
/
en.json
865 lines (865 loc) · 161 KB
/
en.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
{
"lang": "en",
"data": {
"FORM": {
"appserviceplan": {
"name": "Specify the name which should be used for this Service Plan.",
"platform": "Select the O/S type for the App Services to be hosted in this plan. Supported values include `Windows` and `Linux`.",
"location": "Select the Azure Region where the Service Plan should exist.",
"serversize": "Select the SKU size for the plan.",
"noofinstances": "Specify the number of Workers (instances) to be allocated."
},
"addTaintForm": {
"key": "Specify the Key for taint. Example- `taint-key`.",
"value": "Specify the Value for taint. Example- `taint-value`.",
"effect": "Select the taint effect."
},
"cwrule-target-add": {
"Target Name": "Specify the name of the rule you want to add targets to.",
"Target Type": "Select the Target type."
},
"azureAddVMScaleSet": {
"Name": "The name of the virtual machine scale set resource.",
"Subnets": "Select the subnet.",
"InstanceType": "The size of the Virtual Machine.",
"Capacity": "The number of virtual machines in the scale set.",
"ImageId": "Choose the Image for the VM. Image should be compatible with the agent platform. Select type as \"Other\" if you don't see desired option in dropdown.",
"Username": "The administrator username for the VM.",
"Password": "The administrator username for the VM."
},
"lbListener": {
"lbType": "Type of AWS loadbalancer. It supports following two types\n1. **Application**: Use this type when user wants to expose HTTP or HTTPS port using the Loadbalancer.\n2. **Network**: For any other ports use this type.\n",
"ContainerPort": "Port exposed by the container.",
"ExternalPort": "Port user want to expose using loadbalancer. It may not be same as to the container port.\n",
"customCidrs": "Specify CIDR and click on Add. You can add multiple CIDR Values.\n",
"visibility": "Visibility of the service to be exposed which can be either Internal or Public.\n1 **Internal Only**: When this option is selected service will accessible within the infrastructure created in AWS.\n2. **Public**: When this option is selected, service will be accessible to the internet. \n",
"HealthCheck": "Health check URL for this container. This required parameter when Application loadbalancer is selected. \nIt helps AWS ALB to decide whether service is up and running.\n",
"BackendProtocol": "Protocol for the service exposed by the container.",
"BeProtocolVersion": "Protocol version only applicable when application loadbalancer is selected\n1. **HTTP1**: Send requests to targets using HTTP/1.1. Supported when the request protocol is HTTP/1.1 or HTTP/2.\n2. **HTTP2**: Send requests to targets using HTTP/2. Supported when the request protocol is HTTP/2 or gRPC, but gRPC-specific features are not available.\n3. **gRPC**: Send requests to targets using gRPC. Supported when the request protocol is gRPC.\n",
"Certificates": "AWS Certificate to be attached to loadbalancer to expose the service over SSL.",
"TgCount": "Target group count.",
"HealthyThresholdCount": "The number of consecutive health checks successes required before considering an unhealthy target healthy.",
"UnHealthyThresholdCount": "The number of consecutive health check failures required before considering a target unhealthy.",
"HealthCheckTimeoutSeconds": "The amount of time, in seconds, during which no response means a failed health check.",
"HealthCheckIntervalSeconds": "The approximate amount of time between health checks of an individual target.",
"HttpSuccessCode": "The HTTP codes to use when checking for a successful response from a target. You can specify multiple values (for example, \"200,202\") or a range of values (for example, \"200-299\").",
"GrpcSuccessCode": "The gRPC codes to use when checking for a successful response from a target. You can specify multiple values (for example, \"20,25\") or a range of values (for example, \"0-99\"). Only applicable when protocol version is selected as **GRPc**."
},
"dataPipelineAddImport": {
"Name": "Unique Data Pipeline Name",
"PipeLineDef": "Please provide Data Pipeline defination json. Provide EmrCluster details If using existing EmrCluster.\n ```js\n {\n \"PipelineObjects\": [\n {\n \"Id\": \"Default\",\n \"Name\": \"Default\",\n \"Fields\": [\n {\n \"Key\": \"failureAndRerunMode\",\n \"StringValue\": \"CASCADE\"\n },\n {\n \"Key\": \"pipelineLogUri\",\n \"StringValue\": \"s3://YOUR-S3-FOLDER/logs/data-pipelines/\"\n },\n {\n \"Key\": \"scheduleType\",\n \"StringValue\": \"cron\"\n }\n ]\n },\n {\n \"Id\": \"EmrConfigurationId_Q9rpL\",\n \"Name\": \"DefaultEmrConfiguration1\",\n \"Fields\": [\n {\n \"Key\": \"configuration\",\n \"RefValue\": \"EmrConfigurationId_LFzOl\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"EmrConfiguration\"\n },\n {\n \"Key\": \"classification\",\n \"StringValue\": \"spark-env\"\n }\n ]\n },\n {\n \"Id\": \"ActionId_SUEgm\",\n \"Name\": \"TriggerNotificationOnFail\",\n \"Fields\": [\n {\n \"Key\": \"subject\",\n \"StringValue\": \"Backcountry-clickstream-delta-hourly: #{node.@pipelineId} Error: #{node.errorMessage}\"\n },\n {\n \"Key\": \"message\",\n \"StringValue\": \"Backcountry-clickstream-delta-hourly failed to run\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"SnsAlarm\"\n },\n {\n \"Key\": \"topicArn\",\n \"StringValue\": \"arn:aws:sns:us-west-2:269378226633:duploservices-pravin-test-del77-128329325849\"\n }\n ]\n },\n {\n \"Id\": \"EmrActivityObj\",\n \"Name\": \"EmrActivityObj\",\n \"Fields\": [\n {\n \"Key\": \"schedule\",\n \"RefValue\": \"ScheduleId_NfOUF\"\n },\n {\n \"Key\": \"step\",\n \"StringValue\": \"#{myEmrStep}\"\n },\n {\n \"Key\": \"runsOn\",\n \"RefValue\": \"EmrClusterObj\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"EmrActivity\"\n }\n ]\n },\n {\n \"Id\": \"EmrConfigurationId_LFzOl\",\n \"Name\": \"DefaultEmrConfiguration2\",\n \"Fields\": [\n {\n \"Key\": \"property\",\n \"RefValue\": \"PropertyId_NA18c\"\n },\n {\n \"Key\": \"classification\",\n \"StringValue\": \"export\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"EmrConfiguration\"\n }\n ]\n },\n {\n \"Id\": \"EmrClusterObj\",\n \"Name\": \"EmrClusterObj\",\n \"Fields\": [\n {\n \"Key\": \"taskInstanceType\",\n \"StringValue\": \"#{myTaskInstanceType}\"\n },\n {\n \"Key\": \"onFail\",\n \"RefValue\": \"ActionId_SUEgm\"\n },\n {\n \"Key\": \"maximumRetries\",\n \"StringValue\": \"1\"\n },\n {\n \"Key\": \"configuration\",\n \"RefValue\": \"EmrConfigurationId_Q9rpL\"\n },\n {\n \"Key\": \"coreInstanceCount\",\n \"StringValue\": \"#{myCoreInstanceCount}\"\n },\n {\n \"Key\": \"masterInstanceType\",\n \"StringValue\": \"#{myMasterInstanceType}\"\n },\n {\n \"Key\": \"releaseLabel\",\n \"StringValue\": \"#{myEMRReleaseLabel}\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"EmrCluster\"\n },\n {\n \"Key\": \"terminateAfter\",\n \"StringValue\": \"3 Hours\"\n },\n {\n \"Key\": \"bootstrapAction\",\n \"StringValue\": \"#{myBootstrapAction}\"\n },\n {\n \"Key\": \"taskInstanceCount\",\n \"StringValue\": \"#{myTaskInstanceCount}\"\n },\n {\n \"Key\": \"coreInstanceType\",\n \"StringValue\": \"#{myCoreInstanceType}\"\n },\n {\n \"Key\": \"applications\",\n \"StringValue\": \"spark\"\n }\n ]\n },\n {\n \"Id\": \"ScheduleId_NfOUF\",\n \"Name\": \"Every 10 hr\",\n \"Fields\": [\n {\n \"Key\": \"period\",\n \"StringValue\": \"10 Hours start time 2\"\n },\n {\n \"Key\": \"startDateTime\",\n \"StringValue\": \"2022-01-07T21:21:00\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"Schedule\"\n },\n {\n \"Key\": \"endDateTime\",\n \"StringValue\": \"2022-01-08T15:44:28\"\n }\n ]\n },\n {\n \"Id\": \"PropertyId_NA18c\",\n \"Name\": \"DefaultProperty1\",\n \"Fields\": [\n {\n \"Key\": \"type\",\n \"StringValue\": \"Property\"\n },\n {\n \"Key\": \"value\",\n \"StringValue\": \"/usr/bin/python3\"\n },\n {\n \"Key\": \"key\",\n \"StringValue\": \"PYSPARK_PYTHON\"\n }\n ]\n }\n ],\n \"ParameterValues\": [\n {\n \"Id\": \"myEMRReleaseLabel\",\n \"StringValue\": \"emr-6.1.0\"\n },\n {\n \"Id\": \"myMasterInstanceType\",\n \"StringValue\": \"m3.xlarge\"\n },\n {\n \"Id\": \"myBootstrapAction\",\n \"StringValue\": \"s3://YOUR-S3-FOLDER/bootstrap_actions/your_boottrap_and_python_lib_installer.sh\"\n },\n {\n \"Id\": \"myEmrStep\",\n \"StringValue\": \"command-runner.jar,spark-submit,--packages,io.delta:delta-core_2.12:0.8.0,--conf,spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension,--conf,spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog,--num-executors,2,--executor-cores,2,--executor-memory,2G,--conf,spark.driver.memoryOverhead=4096,--conf,spark.executor.memoryOverhead=4096,--conf,spark.dynamicAllocation.enabled=false,--name,PixelClickstreamData,--py-files,s3://YOUR-S3-FOLDER/libraries/librariy1.zip,--py-files,s3://YOUR-S3-FOLDER/libraries/librariy2.zip,s3://YOUR-S3-FOLDER/your_script.py, your_script_arg1, your_script_arg2\"\n },\n {\n \"Id\": \"myEmrStep\",\n \"StringValue\": \"command-runner.jar,aws,athena,start-query-execution,--query-string,MSCK REPAIR TABLE your_database.your_table,--result-configuration,OutputLocation=s3://YOUR-S3-FOLDER/logs/your_query_parquest\"\n },\n {\n \"Id\": \"myCoreInstanceType\",\n \"StringValue\": \"m3.xlarge\"\n },\n {\n \"Id\": \"myCoreInstanceCount\",\n \"StringValue\": \"1\"\n }\n ],\n \"ParameterObjects\": [\n {\n \"Id\": \"myEC2KeyPair\",\n \"Attributes\": [\n {\n \"Key\": \"helpText\",\n \"StringValue\": \"An existing EC2 key pair to SSH into the master node of the EMR cluster as the user \\\"hadoop\\\".\"\n },\n {\n \"Key\": \"description\",\n \"StringValue\": \"EC2 key pair\"\n },\n {\n \"Key\": \"optional\",\n \"StringValue\": \"true\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"String\"\n }\n ]\n },\n {\n \"Id\": \"myEmrStep\",\n \"Attributes\": [\n {\n \"Key\": \"helpLink\",\n \"StringValue\": \"https://docs.aws.amazon.com/console/datapipeline/emrsteps\"\n },\n {\n \"Key\": \"watermark\",\n \"StringValue\": \"s3://myBucket/myPath/myStep.jar,firstArg,secondArg\"\n },\n {\n \"Key\": \"helpText\",\n \"StringValue\": \"A step is a unit of work you submit to the cluster. You can specify one or more steps\"\n },\n {\n \"Key\": \"description\",\n \"StringValue\": \"EMR step(s)\"\n },\n {\n \"Key\": \"isArray\",\n \"StringValue\": \"true\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"String\"\n }\n ]\n },\n {\n \"Id\": \"myTaskInstanceType\",\n \"Attributes\": [\n {\n \"Key\": \"helpText\",\n \"StringValue\": \"Task instances run Hadoop tasks.\"\n },\n {\n \"Key\": \"description\",\n \"StringValue\": \"Task node instance type\"\n },\n {\n \"Key\": \"optional\",\n \"StringValue\": \"true\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"String\"\n }\n ]\n },\n {\n \"Id\": \"myCoreInstanceType\",\n \"Attributes\": [\n {\n \"Key\": \"default\",\n \"StringValue\": \"m1.medium\"\n },\n {\n \"Key\": \"helpText\",\n \"StringValue\": \"Core instances run Hadoop tasks and store data using the Hadoop Distributed File System (HDFS).\"\n },\n {\n \"Key\": \"description\",\n \"StringValue\": \"Core node instance type\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"String\"\n }\n ]\n },\n {\n \"Id\": \"myEMRReleaseLabel\",\n \"Attributes\": [\n {\n \"Key\": \"default\",\n \"StringValue\": \"emr-5.13.0\"\n },\n {\n \"Key\": \"helpText\",\n \"StringValue\": \"Determines the base configuration of the instances in your cluster, including the Hadoop version.\"\n },\n {\n \"Key\": \"description\",\n \"StringValue\": \"EMR Release Label\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"String\"\n }\n ]\n },\n {\n \"Id\": \"myCoreInstanceCount\",\n \"Attributes\": [\n {\n \"Key\": \"default\",\n \"StringValue\": \"2\"\n },\n {\n \"Key\": \"description\",\n \"StringValue\": \"Core node instance count\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"Integer\"\n }\n ]\n },\n {\n \"Id\": \"myTaskInstanceCount\",\n \"Attributes\": [\n {\n \"Key\": \"description\",\n \"StringValue\": \"Task node instance count\"\n },\n {\n \"Key\": \"optional\",\n \"StringValue\": \"true\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"Integer\"\n }\n ]\n },\n {\n \"Id\": \"myBootstrapAction\",\n \"Attributes\": [\n {\n \"Key\": \"helpLink\",\n \"StringValue\": \"https://docs.aws.amazon.com/console/datapipeline/emr_bootstrap_actions\"\n },\n {\n \"Key\": \"helpText\",\n \"StringValue\": \"Bootstrap actions are scripts that are executed during setup before Hadoop starts on every cluster node.\"\n },\n {\n \"Key\": \"description\",\n \"StringValue\": \"Bootstrap action(s)\"\n },\n {\n \"Key\": \"isArray\",\n \"StringValue\": \"true\"\n },\n {\n \"Key\": \"optional\",\n \"StringValue\": \"true\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"String\"\n }\n ]\n },\n {\n \"Id\": \"myMasterInstanceType\",\n \"Attributes\": [\n {\n \"Key\": \"default\",\n \"StringValue\": \"m1.medium\"\n },\n {\n \"Key\": \"helpText\",\n \"StringValue\": \"The Master instance assigns Hadoop tasks to core and task nodes, and monitors their status.\"\n },\n {\n \"Key\": \"description\",\n \"StringValue\": \"Master node instance type\"\n },\n {\n \"Key\": \"type\",\n \"StringValue\": \"String\"\n }\n ]\n }\n ]\n }\n ```",
"PipeLineDefAws": "Please provide Data Pipeline defination json. Provide EmrCluster details If using existing EmrCluster.\n ```js\n {\n \"objects\": [\n {\n \"failureAndRerunMode\": \"CASCADE\",\n \"resourceRole\": \"DataPipelineDefaultResourceRole\",\n \"role\": \"DataPipelineDefaultRole\",\n \"pipelineLogUri\": \"s3://YOUR-S3-FOLDER/logs/data-pipelines/\",\n \"scheduleType\": \"cron\",\n \"name\": \"Default\",\n \"id\": \"Default\"\n },\n {\n \"configuration\": {\n \"ref\": \"EmrConfigurationId_LFzOl\"\n },\n \"name\": \"DefaultEmrConfiguration1\",\n \"id\": \"EmrConfigurationId_Q9rpL\",\n \"type\": \"EmrConfiguration\",\n \"classification\": \"spark-env\"\n },\n {\n \"role\": \"DataPipelineDefaultRole\",\n \"subject\": \"Backcountry-clickstream-delta-hourly: #{node.@pipelineId} Error: #{node.errorMessage}\",\n \"name\": \"TriggerNotificationOnFail\",\n \"id\": \"ActionId_SUEgm\",\n \"message\": \"Backcountry-clickstream-delta-hourly failed to run\",\n \"type\": \"SnsAlarm\",\n \"topicArn\": \"arn:aws:sns:us-west-2:269378226633:duploservices-pravin-test-del77-128329325849\"\n },\n {\n \"schedule\": {\n \"ref\": \"ScheduleId_NfOUF\"\n },\n \"name\": \"EmrActivityObj\",\n \"step\": \"#{myEmrStep}\",\n \"runsOn\": {\n \"ref\": \"EmrClusterObj\"\n },\n \"id\": \"EmrActivityObj\",\n \"type\": \"EmrActivity\"\n },\n {\n \"name\": \"DefaultEmrConfiguration2\",\n \"property\": {\n \"ref\": \"PropertyId_NA18c\"\n },\n \"id\": \"EmrConfigurationId_LFzOl\",\n \"classification\": \"export\",\n \"type\": \"EmrConfiguration\"\n },\n {\n \"taskInstanceType\": \"#{myTaskInstanceType}\",\n \"onFail\": {\n \"ref\": \"ActionId_SUEgm\"\n },\n \"maximumRetries\": \"1\",\n \"configuration\": {\n \"ref\": \"EmrConfigurationId_Q9rpL\"\n },\n \"coreInstanceCount\": \"#{myCoreInstanceCount}\",\n \"masterInstanceType\": \"#{myMasterInstanceType}\",\n \"releaseLabel\": \"#{myEMRReleaseLabel}\",\n \"type\": \"EmrCluster\",\n \"terminateAfter\": \"3 Hours\",\n \"availabilityZone\": \"us-west-2b\",\n \"bootstrapAction\": \"#{myBootstrapAction}\",\n \"taskInstanceCount\": \"#{myTaskInstanceCount}\",\n \"name\": \"EmrClusterObj\",\n \"coreInstanceType\": \"#{myCoreInstanceType}\",\n \"keyPair\": \"#{myEC2KeyPair}\",\n \"id\": \"EmrClusterObj\",\n \"applications\": [\n \"spark\"\n ]\n },\n {\n \"period\": \"10 Hours start time 2\",\n \"startDateTime\": \"2022-01-07T21:21:00\",\n \"name\": \"Every 10 hr\",\n \"id\": \"ScheduleId_NfOUF\",\n \"type\": \"Schedule\",\n \"endDateTime\": \"2022-01-08T15:44:28\"\n },\n {\n \"name\": \"DefaultProperty1\",\n \"id\": \"PropertyId_NA18c\",\n \"type\": \"Property\",\n \"value\": \"/usr/bin/python3\",\n \"key\": \"PYSPARK_PYTHON\"\n }\n ],\n \"parameters\": [\n {\n \"helpText\": \"An existing EC2 key pair to SSH into the master node of the EMR cluster as the user \\\"hadoop\\\".\",\n \"description\": \"EC2 key pair\",\n \"optional\": \"true\",\n \"id\": \"myEC2KeyPair\",\n \"type\": \"String\"\n },\n {\n \"helpLink\": \"https://docs.aws.amazon.com/console/datapipeline/emrsteps\",\n \"watermark\": \"s3://myBucket/myPath/myStep.jar,firstArg,secondArg\",\n \"helpText\": \"A step is a unit of work you submit to the cluster. You can specify one or more steps\",\n \"description\": \"EMR step(s)\",\n \"isArray\": \"true\",\n \"id\": \"myEmrStep\",\n \"type\": \"String\"\n },\n {\n \"helpText\": \"Task instances run Hadoop tasks.\",\n \"description\": \"Task node instance type\",\n \"optional\": \"true\",\n \"id\": \"myTaskInstanceType\",\n \"type\": \"String\"\n },\n {\n \"default\": \"m1.medium\",\n \"helpText\": \"Core instances run Hadoop tasks and store data using the Hadoop Distributed File System (HDFS).\",\n \"description\": \"Core node instance type\",\n \"id\": \"myCoreInstanceType\",\n \"type\": \"String\"\n },\n {\n \"default\": \"emr-5.13.0\",\n \"helpText\": \"Determines the base configuration of the instances in your cluster, including the Hadoop version.\",\n \"description\": \"EMR Release Label\",\n \"id\": \"myEMRReleaseLabel\",\n \"type\": \"String\"\n },\n {\n \"default\": \"2\",\n \"description\": \"Core node instance count\",\n \"id\": \"myCoreInstanceCount\",\n \"type\": \"Integer\"\n },\n {\n \"description\": \"Task node instance count\",\n \"optional\": \"true\",\n \"id\": \"myTaskInstanceCount\",\n \"type\": \"Integer\"\n },\n {\n \"helpLink\": \"https://docs.aws.amazon.com/console/datapipeline/emr_bootstrap_actions\",\n \"helpText\": \"Bootstrap actions are scripts that are executed during setup before Hadoop starts on every cluster node.\",\n \"description\": \"Bootstrap action(s)\",\n \"isArray\": \"true\",\n \"optional\": \"true\",\n \"id\": \"myBootstrapAction\",\n \"type\": \"String\"\n },\n {\n \"default\": \"m1.medium\",\n \"helpText\": \"The Master instance assigns Hadoop tasks to core and task nodes, and monitors their status.\",\n \"description\": \"Master node instance type\",\n \"id\": \"myMasterInstanceType\",\n \"type\": \"String\"\n }\n ],\n \"values\": {\n \"myEMRReleaseLabel\": \"emr-6.1.0\",\n \"myMasterInstanceType\": \"m3.xlarge\",\n \"myBootstrapAction\": \"s3://YOUR-S3-FOLDER/bootstrap_actions/your_boottrap_and_python_lib_installer.sh\",\n \"myEmrStep\": [\n \"command-runner.jar,spark-submit,--packages,io.delta:delta-core_2.12:0.8.0,--conf,spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension,--conf,spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog,--num-executors,2,--executor-cores,2,--executor-memory,2G,--conf,spark.driver.memoryOverhead=4096,--conf,spark.executor.memoryOverhead=4096,--conf,spark.dynamicAllocation.enabled=false,--name,PixelClickstreamData,--py-files,s3://YOUR-S3-FOLDER/libraries/librariy1.zip,--py-files,s3://YOUR-S3-FOLDER/libraries/librariy2.zip,s3://YOUR-S3-FOLDER/your_script.py, your_script_arg1, your_script_arg2\",\n \"command-runner.jar,aws,athena,start-query-execution,--query-string,MSCK REPAIR TABLE your_database.your_table,--result-configuration,OutputLocation=s3://YOUR-S3-FOLDER/logs/your_query_parquest\"\n ],\n \"myCoreInstanceType\": \"m3.xlarge\",\n \"myCoreInstanceCount\": \"1\"\n }\n }\n\n ```"
},
"awsBillingAlert": {
"Disable": "Enable or Disable the Billing Alert. If disabled, no alert will be triggered.",
"Previous Month Spend": "If selected, the threshold will be set to the monthly spend of the previous month. If not, the threshold must be set manually.",
"BudgetAmount": "Specify the custom threshold which will be used to compute the alert. Enter a positive dollar amount.",
"AlertTrigger": "Select the percentage of the threshold above which the alert will be triggered.",
"AlertTriggerOther": "Specify the custom trigger value. Enter a positive integer percent value.",
"EmailNotifications": "Select the admin users to receive the billing alert email."
},
"AddServiceBasic": {
"ServiceName": "Any friendly name to identify your service.",
"AllocationTag": "A string value that needs to be a substring of the corresponding allocation tag value set on a Host. If there is no host with such an allocation tag value then service will not get deployed. simplest example is if you want the container to be deployed in a certain set of hosts only then set an allocation tag on those hosts called web and then set the allocation tag here to be web.",
"IsDaemonset": "Enable Daemonset to run instance of this service on each host in selected tenant",
"IsAnyHostAllowed": "If \"Yes\", the service can run on any host in the same plan as the selected tenant.",
"asgName": "Selecting an auto scaling group here will ensure that the replicas of this service will match the nodes in the auto scaling group. The value set for replicas in this form will be overridden",
"ReplicaCollocation": "If this is enabled then 2 containers (replicas) of the same service can be allocated to same node. But if this is false then each replica of a certain service needs to be on a separate host. Note still a single host can hold containers from different services",
"LBSyncedDeployment": "This enables Zero Downtime Updates. When this flag is turned on then during update before tearing down a certain replica, the replica is set to draining mode in the load balancer, then removed from the LB and then an update performed then added back to the LB. This is done for each replica one by one. If this flag is turned off the containers are updated one after the other without co-ordination with the load balancer. While turning this on enables zero down time but makes the rollout slow. If the flag is off we have typically seen a 5 to 20 sec glitch for customers during upgrade. When this flag is turned on then the update takes a few minutes for each replica as against a few seconds when it is off.",
"LivenessProbe": "Example of livenessprobe\n```js\n{\n \"failureThreshold\":3,\n\n \"httpGet\":{\n \"path\":\"/healthz/live\",\n \"port\":80,\n\n \"scheme\":\"HTTP\"\n },\n \"periodSeconds\":10,\n \"successThreshold\":1,\n\n \"timeoutSeconds\":5\n}\n```\n",
"ReadinessProbe": "\"Example of readinessprobe\n```js\n{\n \"failureThreshold\":3,\n\n \"httpGet\": \n {\n \"path\":\"/healthz/ready\",\n \"port\":80,\n\n \"scheme\":\"HTTP\"\n },\n \"periodSeconds\":10,\n \"successThreshold\":1,\n\n \"timeoutSeconds\":5\n}\n```\n",
"securityContext": "Example of Security context\n```js\n{\n \"Capabilities\":\n {\n \"Add\": [\n \"NET_BIND_SERVICE\"\n ],\n \"Drop\":\n [\n \"ALL\"\n ]\n },\n \"ReadOnlyRootFilesystem\": false,\n\n \"RunAsNonRoot\": true,\n \"RunAsUser\": 1000\n}\n```\n",
"k8sPodConfig": "Any generic Kubernetes Pod configuration can be passed here, for\nexample resource limits, restart policy. As an example: \n```js\n{\n \"RestartPolicy\":\n \"Always\",\n \"envFrom\": [\n {\n \"configMapRef\": {\n \"name\":\n \"api-configs\"\n }\n },\n {\n \"secretRef\": {\n\n \"name\": \"api-secrets\"\n }\n },\n {\n \"configMapRef\":\n {\n \"name\": \"api-db\",\n \"optional\": true\n }\n\n }\n ],\n \"resources\": {\n \"limits\": {\n \"memory\":\n \"1Gi\"\n },\n \"requests\": {\n \"memory\": \"384Mi\"\n\n }\n }\n}\n```\n",
"podSecurityContext": "Example: \n```js\n{\n\"FsGroup\":1001,\n\"RunAsUser\":1001\n}\n```\n",
"EnvVariablesK8s": "Environment variables to be passed to the containers in the YAML format.\n```yaml\n---\n- Name: DB_HOST\n Value: abc.com\n- Name: DB_USER\n Value: myuser\n- Name: DB_PASSWORD\n Value: mypassword\n# using secrets\n- Name: SECRET_USERNAME\n valueFrom:\n secretKeyRef:\n name: mysecret\n key: username\n# using ConfigMap data \n- Name: LOG_LEVEL\n valueFrom:\n configMapKeyRef:\n name: env-config\n key: log_level\n```",
"NetworkMode": "If host network is selected then the container will share the same networking stack as host. This means any port in the container is accessible as is on host ip address without any explicit port mapping",
"ReplicationStrategy": "Replication Strategy can be used to manage the replica count for this Deployment/StatefulSet. Replication Strategy has three options as below.\n 1. **Static**: This option can be used to set fixed count for the replicas.\n 2. **Daemonset**: If this option is selected, DuploCloud will make sure pods will run on every host created in this tenant.\n 3. **Horizontal Pod Autoscaler**: This is more advanced option which user can use to automcatically scale up or down the replicas as per the different metrics like CPU Usage, Memory Usage, Ingress requests per second etc. [Click Here](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/) for more information on *Horizontal Pod Autoscaler*.\n",
"HPAConfig": "Horizontal pod autoscaler specifications as documented [here](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). Sample value can be like below.\n```yaml\nmaxReplicas: 5\nmetrics:\n - resource:\n name: cpu\n target:\n averageUtilization: 80\n type: Utilization\n type: Resource\nminReplicas: 2\n```",
"envVariables": "Environment Variables to be passed in json format. Refer sample as below.\n```js\n{\n \"discovery.type\": \"single-node\",\n \"plugins.security.disabled\" : \"true\",\n \"compatibility.override_main_response_version\" : \"true\"\n}\n",
"ReplicaPlacement": "Select replication placement strategy.\n1. **First Available**: Replicas are not required to be spread across availability zones. Replicas prefer to be scheduled on different hosts, but this is not required.\n2. **Place on Different Hosts**: Replicas are not required to be spread across availability zones. Replicas are required to be placed on different hosts.\n3. **Spread Across Zones**: Replicas are required to be spread across availability zones. Replicas prefer to be scheduled on different hosts, but this is not required.\n4. **Different Hosts and Spread Across Zones**: Replicas are required to be spread across availability zones. Replicas are required to be placed on different hosts.\n",
"TolerateSpotInstance": "Select to run pods in Spot Instance. `tolerations` property will be added in `Other Container Config` to support running of pods in Spot Instance."
},
"emrClusterAdd": {
"Name": "Unique Cluster Name",
"ReleaseLabel": "Choose ReleaseLabel or to provide ReleaseLabel not in list choose Other.",
"IdleTimeBeforeShutdown": "Idle time (when no job is running) in hours before terminating the cluster.",
"StepConcurrencyLevel": "Number of steps that can be executed concurrently. This setting should depend on the available resources.",
"Applications": "Applications to be installed in cluster (master and slaves).\n```js\n[\n{\n\"Name\" : \"Hadoop\"\n},\n{\n\"Name\" : \"JupyterHub\"\n},\n{\n\"Name\" : \"Spark\"\n},\n{\n\"Name\" : \"Hive\"\n}\n]\n```\n",
"Configurations": "Configurations to be installed in cluster (master and slaves).\n```js\n[\n{\n\"Classification\" : \"hive-site\",\n\"Properties\" : {\n\"hive.metastore.client.factory.class\" : \"com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory\",\n\"spark.sql.catalog.my_catalog\" : \"org.apache.iceberg.spark.SparkCatalog\",\n\"spark.sql.catalog.my_catalog.catalog-impl\" : \"org.apache.iceberg.aws.glue.GlueCatalog\",\n\"spark.sql.catalog.my_catalog.io-impl\" : \"org.apache.iceberg.aws.s3.S3FileIO\",\n\"spark.sql.catalog.my_catalog.lock-impl\" : \"org.apache.iceberg.aws.glue.DynamoLockManager\",\n\"spark.sql.catalog.my_catalog.lock.table\" : \"myGlueLockTable\",\n\"spark.sql.catalog.sampledb.warehouse\" : \"s3://name-of-my-bucket/icebergcatalog\"\n}\n}\n]\n```",
"BootstrapActions": "BootstrapActions during cluster setup.\n```js\n[\n{\n\"Name\": \"InstallApacheIceberg\",\n\"ScriptBootstrapAction\": {\n\"Args\": [\n\"name\",\n\"value\"\n],\n\"Path\": \"s3://name-of-my-bucket/bootstrap-iceberg.sh\"\n}\n}\n]\n```",
"Steps": "Jobs to be executed on cluster. Please update s3 and py file.\n```js\n[\n{\n\"ActionOnFailure\" : \"CONTINUE\",\n\"Name\" : \"sparkstepTest\",\n\"HadoopJarStep\" : {\n\"Jar\" : \"command-runner.jar\",\n\"Args\" : [\n\"spark-submit\",\n\"s3://YOUR-S3-FOLDER/script3.py\"\n]\n}\n}\n]\n```",
"InstanceFleets": "InstanceFleets example.\n```js\n[\n{\n\"Name\" : \"Masterfleet\",\n\"InstanceFleetType\" : \"MASTER\",\n\"TargetSpotCapacity\" : 1,\n\"LaunchSpecifications\" : {\n\"SpotSpecification\" : {\n\"TimeoutDurationMinutes\" : 120,\n\"TimeoutAction\" : \"SWITCH_TO_ON_DEMAND\"\n}\n},\n\"InstanceTypeConfigs\" : [\n{\n\"InstanceType\" : \"m5.xlarge\",\n\"BidPrice\" : \"0.89\"\n}\n]\n},\n{\n\"Name\" : \"Corefleet\",\n\"InstanceFleetType\" : \"CORE\",\n\"TargetSpotCapacity\" : 1,\n\"TargetOnDemandCapacity\" : 1,\n\"LaunchSpecifications\" : {\n\"OnDemandSpecification\" : {\n\"AllocationStrategy\" : \"lowest-price\",\n\"CapacityReservationOptions\" : {\n\"UsageStrategy\" : \"use-capacity-reservations-first\",\n\"CapacityReservationResourceGroupArn\" : \"String\"\n}\n},\n\"SpotSpecification\" : {\n\"AllocationStrategy\" : \"capacity-optimized\",\n\"TimeoutDurationMinutes\" : 120,\n\"TimeoutAction\" : \"TERMINATE_CLUSTER\"\n}\n},\n\"InstanceTypeConfigs\" : [\n{\n\"InstanceType\" : \"m4.xlarge\",\n\"BidPriceAsPercentageOfOnDemandPrice\" : 100\n}\n]\n},\n{\n\"Name\" : \"Taskfleet\",\n\"InstanceFleetType\" : \"TASK\",\n\"TargetSpotCapacity\" : 1,\n\"LaunchSpecifications\" : {\n\"OnDemandSpecification\" : {\n\"AllocationStrategy\" : \"lowest-price\",\n\"CapacityReservationOptions\" : {\n\"CapacityReservationPreference\" : \"none\"\n}\n},\n\"SpotSpecification\" : {\n\"TimeoutDurationMinutes\" : 120,\n\"TimeoutAction\" : \"TERMINATE_CLUSTER\"\n}\n},\n\"InstanceTypeConfigs\" : [\n{\n\"InstanceType\" : \"m4.xlarge\",\n\"BidPrice\" : \"0.89\"\n}\n]\n}\n]\n\n```",
"InstanceGroups": "InstanceGroups example.\n```js\n[\n{\n\"Name\": \"Master\",\n\"Market\": \"ON_DEMAND\",\n\"InstanceRole\": \"MASTER\",\n\"InstanceType\": \"m4.large\",\n\"InstanceCount\": 1,\n\"EbsConfiguration\": {\n\"EbsBlockDeviceConfigs\": [\n{\n\"VolumeSpecification\": {\n\"VolumeType\": \"gp2\",\n\"SizeInGB\": 10\n},\n\"VolumesPerInstance\": 1\n}\n\n],\n\"EbsOptimized\": false\n}\n},\n{\n\"Name\": \"Core\",\n\"Market\": \"ON_DEMAND\",\n\"InstanceRole\": \"CORE\",\n\"InstanceType\": \"m4.large\",\n\"InstanceCount\": 1,\n\"EbsConfiguration\": {\n\"EbsBlockDeviceConfigs\": [\n{\n\"VolumeSpecification\": {\n\"VolumeType\": \"gp2\",\n\"SizeInGB\": 10\n},\n\"VolumesPerInstance\": 1\n}\n\n],\n\"EbsOptimized\": false\n}\n},\n{\n\"Name\": \"Task\",\n\"Market\": \"ON_DEMAND\",\n\"InstanceRole\": \"TASK\",\n\"InstanceType\": \"m4.large\",\n\"InstanceCount\": 1,\n\"EbsConfiguration\": {\n\"EbsBlockDeviceConfigs\": [\n{\n\"VolumeSpecification\": {\n\"VolumeType\": \"gp2\",\n\"SizeInGB\": 10\n},\n\"VolumesPerInstance\": 1\n}\n\n],\n\"EbsOptimized\": false\n}\n}\n\n]\n\n```",
"ManagedScalingPolicy": "ManagedScalingPolicy example.\n```js\n{\n\"ComputeLimits\" : {\n\"UnitType\" : \"Instances\",\n\"MinimumCapacityUnits\" : 2,\n\"MaximumCapacityUnits\" : 5,\n\"MaximumOnDemandCapacityUnits\" : 5,\n\"MaximumCoreCapacityUnits\" : 3\n}\n}\n```",
"VisibleToAllUsers": "Cluster Visible To All Users."
},
"addAcceleratorForm": {
"acceleratorCount": "Specify the number of GPUs.",
"acceleratorType": "Specify Accelerator Type(GPU Type). Google does not offer every instance/GPU combination in every region/zone. \nEnter the compatible type based on the instance type and zone. Example: `nvidia-tesla-a100` Accelarator Type is supported for Zone `us-west4-b` for Instance Type `a2-highgpu-1g`.\nFor GPU regions and zone availability, click [here](https://cloud.google.com/compute/docs/gpus/gpu-regions-zones).\nMore details on running GPUs in GKE Standard Pool, click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/gpus).\n",
"gpuPartitionSize": "Specify GPC Partition Size. Example- `1g.5gb`. Refer [UserGuide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).",
"maxSharedClientsPerGpu": "Specify maximum shared client.",
"gpuSharingStrategy": "Select GCP Sharing Strategy.",
"gpuDriverInstallationConfig": "Select the GPU Driver Installation Strategy\n1. **Default:** Installs the default driver version for your GKE version.\n2. **Latest:** Installs the latest available driver version for your GKE version. Available only for nodes that use Container-Optimized OS.\n3. **Disable GPU driver auto installation:** Disables GPU driver auto installation and needs manual installation.\n4. **Do not install any GPU driver:** Skips automatic driver auto installation."
},
"cwrule-add": {
"Rule Name": "The name of the rule.",
"Description": "The description of the rule.",
"ScheduleExpression": "The scheduling expression. Use Rate Expressions. For example, `rate(5 minutes)` or `rate(1 day)`. Refer [here](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#RateExpressions).",
"State": "Select the state of the rule."
},
"addTimestreamTable": {
"TableName": "The name of the Timestream table.",
"MemoryStoreRetentionHours": "The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.",
"MagneticStoreRetentionPeriodInDays": "The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.",
"EnableMagneticStorageWrites": "Select to enable magnetic store writes.",
"S3BucketError": "Select S3 location for storing the error logs.",
"S3FolderError": "Specify the S3 folder location name.",
"KmsKeyId": "Select the KMS key for the S3 location."
},
"VantaControl": {
"EnableVanta": "Enable Vanta monitor. GuardDuty will be enabled by default when this setting is enabled.",
"tenant": "Select the tenant you want to configure the Vanta monitoring.",
"guardDutyEmail": "Enter email id. SNS Topic subscription will be sent to this email id.",
"description": "Specify the description.",
"production": "When Production is set as True, SNS Topic will be configured in the Infrastructure region fo the Tenant.",
"contains": "Enables User Data.",
"owner": "Enter email of the owner."
},
"bucket": {
"bucketName": "Specify the name of the bucket.",
"EnableVersioning": "Select to enable bucket's Versioning configuration.",
"AllowPublicAccess": "Select to enable public access to a bucket.",
"Labels": "Specify key/value label pairs to assign to the bucket.",
"MultiRegion": "(Optional) Multi-Region for availability across largest area. If Location Type input not provided, by default Bucket will be created in `us (Multiple Regions in United States)`.",
"Region": "(Optional) Select Single Region. Location Type cannot be edited, after bucket creation.",
"RegionOptions": "Select Region. Location Type cannot be edited, after bucket creation.",
"MultiRegionOptions": "Select Region. If not provided Bucket will be created in `us (Multiple Regions in United States)`. Location Type cannot be edited, after bucket creation."
},
"AddStorageClass": {
"name": "Storage Class Name.",
"provisioner": "Each StorageClass has a provisioner that determines what volume plugin is used for provisioning PVs. This field must be specified.",
"reclaimPolicy": "PersistentVolumes that are dynamically created by a StorageClass will have the reclaim policy specified in the reclaimPolicy field of the class, which can be either `Delete`or `Retain`. If no reclaimPolicy is specified when a StorageClass object is created, it will default to `Delete`.\nPersistentVolumes that are created manually and managed via a StorageClass will have whatever reclaim policy they were assigned at creation.\n",
"volumeBindingMode": "The Volume Binding Mode field controls when volume binding and dynamic provisioning should occur. When unset, `Immediate` mode is used by default. \nThe `Immediate` mode indicates that volume binding and dynamic provisioning occurs once the PersistentVolumeClaim is created. \nFor storage backends that are topology-constrained and not globally accessible from all Nodes in the cluster, PersistentVolumes will be bound or provisioned without knowledge of the Pod's scheduling requirements. \nThis may result in unschedulable Pods. \nA cluster administrator can address this issue by specifying the `WaitForFirstConsumer` mode which will delay the binding and provisioning of a PersistentVolume until a Pod using the PersistentVolumeClaim is created. PersistentVolumes will be selected or provisioned conforming to the topology that is specified by the Pod's scheduling constraints. \n",
"allowVolumeExpansion": "PersistentVolumes can be configured to be expandable. This feature when set to true, allows the users to resize the volume by editing the corresponding PVC object.",
"parameters": "Storage Classes have parameters that describe volumes belonging to the storage class. Different parameters may be accepted depending on the provisioner. \nFor example, the value io1, for the parameter type, and the parameter iopsPerGB are specific to EBS. When a parameter is omitted, some default is used.\nSample Value EFS provisioner \n```yml\nprovisioningMode: efs-ap\nfileSystemId: fs-0f5c4430534311cf1\ndirectoryPerms: \"700\"\ngidRangeStart: \"1000\" # optional\ngidRangeEnd: \"2000\" # optional\nbasePath: \"/dynamic_provisioning\" # optional\n\n## For Azure Storage Account Mount\nresourceGroup: rg-duplo-storage-account # Name of the RG in which the Storage Acc is residing\nsecretName: storage-account-volume-secret # Name of the secret to use (the secret should contain storage account name and key)\nshareName: storage-account-file-share # Name of the Azure file share\n```\n",
"storageClassAnnotations": "Kubernetes annotations in key value format. Sample value is like below\n```yml\nkey1: value1\nkey2: value2\n```\n",
"storageClassLabels": "Kubernetes labels in key value format. Sample value is like below\n```yml\nkey1: value1\nkey2: value2\n```\n",
"allowedTopologies": "When a cluster operator specifies the `WaitForFirstConsumer` volume binding mode, it is no longer necessary to restrict provisioning to specific topologies in most situations. However, if still required, `allowedTopologies` can be specified.\nSample Value Allowed Toplogies\n```yml\n- matchLabelExpressions:\n - key: failure-domain.beta.kubernetes.io/zone\n values:\n - us-central-1a\n - us-central-1b\n```"
},
"taskdef": {
"Name": "Specify a name for your task definition. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.",
"Image": "Image for your docker container.",
"memory": "The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB, for example 1024, or as a string using GB, for example '1GB' or '1 gb'.",
"vcpus": "The number of CPU units used by the task. It can be expressed as an integer using CPU units, for example 1024, or as a string using vCPUs, for example '1 vCPU' or '1 vcpu'.",
"Port": "Port mappings allow containers to access ports on the host container instance to send or receive traffic.",
"Protocol": "Protocol for this port.",
"environmentvars": "Environment variables to be passed to the container in the JSON format as below.\n```js\n[\n {\n \"Name\": \"<env_var_name>\"\n \"Value\": \"<env_var_value>\"\n }\n]\n```\n",
"command": "The command that is passed to the container. This parameter maps to **Cmd** in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information about the Docker CMD parameter, click[here](https://docs.docker.com/engine/reference/builder/#cmd).\nFollowing is the example value to make you container sleep for debugging.\n```js\n[\n \"sleep\",\n \"500000\"\n]\n```\n",
"healthcheck": "Health check configuration which helps to determine if container is healthy in a JSON format. JSON has following attributes.\n1. **command**: A string array representing the command that the container runs to determine if it is healthy. The string array can start with CMD to execute the command arguments directly, or CMD-SHELL to run the command with the container's default shell. If neither is specified, CMD is used by default.\n2. **interval**: The time period in seconds between each health check execution. You may specify between 5 and 300 seconds. The default value is 30 seconds.\n3. **timeout**: The time period in seconds to wait for a health check to succeed before it is considered a failure. You may specify between 2 and 60 seconds. The default value is 5 seconds.\n4. **retries**: The number of times to retry a failed health check before the container is considered unhealthy. You may specify between 1 and 10 retries. The default value is three retries.\n5. **startPeriod**: The optional grace period within which to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You may specify between 0 and 300 seconds. The startPeriod is disabled by default.\nFollowing is the example to perform health check by calling an API\n```js\n{\n \"command\" : [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]\n \"interval\": 20,\n \"timeout\" : 5,\n \"retries\" : 10,\n \"startPeriod\" : 20\n}\n```\n",
"Secret": "This is another way of setting up the environment values from AWS secrets in a JSON format. \n```js\n[\n {\n \"Name\": \"<env_var_name>\",\n \"ValueFrom\": \"<aws_secret_arn>:<key_in_aws_secret>::\"\n },\n {\n \"Name\": \"DB_HOST\",\n \"ValueFrom\": \"arn:aws:secretsmanager:us-west-2:2432432434343:secret:db-secret:DB_HOST::\"\n }\n]\n```\n",
"containerotherconfig": "All other advance properties documented [here](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeregisterTaskDefinition.html) applicable only for the Fargate in a JSON format.\nSample value can look like below.\n```js\n{\n \"LogConfiguration\": {\n \"LogDriver\": {\n \"Value\": \"awslogs\"\n },\n \"Options\": {\n \"awslogs-create-group\": \"true\",\n \"awslogs-group\": \"/ecs/duploservices-nonprod-api\",\n \"awslogs-region\": \"us-west-2\",\n \"awslogs-stream-prefix\": \"ecs\"\n },\n \"SecretOptions\": []\n }\n}\n```\n",
"volumes": "Volumes which can be mounted within container as documented [here](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#volumes) in a json format.\nSample Value with all the possible options can look like as below.\n```js\n[ \n { \n \"dockerVolumeConfiguration\": { \n \"autoprovision\": boolean,\n \"driver\": \"string\",\n \"driverOpts\": { \n \"string\" : \"string\" \n },\n \"labels\": { \n \"string\" : \"string\" \n },\n \"scope\": \"string\"\n },\n \"efsVolumeConfiguration\": { \n \"authorizationConfig\": { \n \"accessPointId\": \"string\",\n \"iam\": \"string\"\n },\n \"fileSystemId\": \"string\",\n \"rootDirectory\": \"string\",\n \"transitEncryption\": \"string\",\n \"transitEncryptionPort\": number\n },\n \"fsxWindowsFileServerVolumeConfiguration\": { \n \"authorizationConfig\": { \n \"credentialsParameter\": \"string\",\n \"domain\": \"string\"\n },\n \"fileSystemId\": \"string\",\n \"rootDirectory\": \"string\"\n },\n \"host\": { \n \"sourcePath\": \"string\"\n },\n \"name\": \"string\"\n }\n]\n```\n",
"Essential_1": "If enabled, container fails or stops for any reason, all other containers that are part of the task are stopped. \nIf disabled, then its failure doesn't affect the rest of the containers in a task."
},
"AddBYOHHost": {
"Name": "A friendly name to BYOH host.",
"DirectAddress": "IPv4 address to which DuploCloud will use to communicate with agent installed on your host.",
"FleetType": "Fleet type represents the type of container orchestrator running your Host.\n1. **Linux Docker/Native:** Select this option if operating system running on your host is Linux.\n2. **Docker Windows:** Select this option if operating system running on your host is Windows\n3. **None:** Select this option if no agent/container orchestrator is running on the host.\n",
"Username": "Username to login to your host. This is an optional field.",
"Password": "Password to login to your host. This is an optional field.",
"PrivateKey": "Private Key to login to your host using SSH. This is again an optional field. User can either specify `Password` or `Private Key`."
},
"emr_serverless_add_sub_form_basics": {
"Name": "The name of the application.",
"ReleaseLabel_other": "The EMR release version associated with the application.",
"Type": "The type of application you want to start, such as `spark` or `hive`.",
"Architecture": "The CPU architecture of an application. Valid values are `arm64 new` or `x86_64`. Default value is `x86_64`.",
"DriverImageUri": "To use the custom Image URI for drivers in your application, specify the ECR location of the image `account-id.dkr.ecr.region-id.amazonaws.com/your-Amazon-ECR-repo-name[:tag] or [@sha]`.\nMust be compatible with the selected EMR release [6.9.0 and above] and located in the same region.\n",
"ExecutorImageUri": "To use the custom Image URI for executors, specify the ECR location of the image `account-id.dkr.ecr.region-id.amazonaws.com/your-Amazon-ECR-repo-name[:tag] or [@sha]`.\nMust be compatible with the selected EMR release [6.9.0 and above] and located in the same region.\n"
},
"backupRetentionPeriod": {
"BackupRetentionPeriod": "Specify in days to save automated backups of your DB. Valid values 1-35."
},
"AddK8sDaemonset": {
"Name": "Specify the name of the Daemonset, must be unique",
"IsTenantLocal": "When set to true the daemon set will deploy only in hosts within this tenant as against an entire cluster",
"Configuration": "Add Daemonset Configurations."
},
"firestoreDatabase": {
"Name": "Specify the Name for your database.",
"type": "Select the type of the database.",
"locationId": "Select the location of the database.",
"pointInTimeRecoveryEnablement": "Choose to enable the PointInTimeRecovery feature on this database. If `POINT_IN_TIME_RECOVERY_ENABLED` is selected, reads are supported on selected versions of the data from within the past 7 days. Default value is `POINT_IN_TIME_RECOVERY_DISABLED`.",
"deleteProtectionState": "Select the State of delete protection for the database. When delete protection is enabled, this database cannot be deleted. Default is set to `DELETE_PROTECTION_DISABLED`."
},
"jobDefinition": {
"OtherContainerProperties": "All Other Container Properties for the Register Job Definition Request parameters as documented [here](https://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) only for the **ContainerProperties** section.\nSample Value for the Other Container properties to override **Command** is as below\n```js\n{\n \"Command\" : [\"sleep\", \"5\"]\n}\n```\n",
"OtherJobProperties": "All Other Job Properties for the Register Job Definition Request parameters as documented [here](https://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) except the **ContainerProperties** section.\nSample Value for the Other Job properties to set **RetryStrategy** is as below\n```js\n{\n \"RetryStrategy\": {\n \"EvaluateOnExit\": [\n {\n \"Action\": \"EXIT\",\n \"OnExitCode\": \"1*\",\n \"OnReason\": \"reason*\",\n \"OnStatusReason\": \"status\"\n }\n ]\n }\n}\n```"
},
"cloudfront-add": {
"Name": "Any friendly name to the cloudfront distribution",
"certificate": "Certificate ARN to be used by the distribution. Make sure the certificate is in the us-east-1 region.",
"DefaultRootObject": "Default root object object that will be returned while accessing root of the domain. Example: index.html. Should not start with \"/\"",
"defaultCachePolicyId": "Default Cache policy applied to the Distribution\n [Click Here](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) to know more about Amazon Managed policy\n You can also create your custom cache policy and provide the ID of the policy\n ",
"defaultTargetOriginId": "Target Origin to apply the cache policy. This Origin will be used as the default (\"*\") source by the cloudfront",
"itemAliasName0": "The number of CNAME aliases, that you want to associate with this distribution. End User will be able to access the disribution using this URL",
"itemDomainName0": "Origin Domain Name where the content will be fetched by the CDN. It can be S3 bucket or custom URL (api service url etc)",
"itemId0": "Unique Id for the origin. This Id will be refered in default cache behavior and custom cache behavior.",
"itemPath0": "Path that will suffixed to the origin domain name (url) while fetching content.\n For S3: If the content that need to be served is under prefix static. You should enter \"/static\" in path.\n For custom url: If all the API have a prefix like v1. You should enter \"/v1\" in path ",
"customCachePolicyId0": "Cache policy applied to the this custom path\n [Click Here](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) to know more about Amazon Managed policy\n You can also create your custom cache policy and provide the ID of the policy\n ",
"customCachePathPattern0": "The pattern (for example, images/*.jpg ) that specifies which requests to apply the behavior to. When CloudFront receives a viewer request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution.",
"customTargetOriginId0": "Target Origin to apply the cache policy. This Origin will be used as source by the cloudfront if the request URL matches the path pattern"
},
"AddSbAccessPolicy": {
"Name": "Specify the name of the Authorization Rule.",
"canManage": "Enable to configure authorization rule to have manage permissions to the ServiceBus Queue.",
"canSend": "Enable to configure authorization rule to have Send permissions to the ServiceBus Queue.",
"canListen": "Enable to configure authorization rule to have Listen permissions to the ServiceBus Queue."
},
"AddMySQLFlexi": {
"Name": "Specify the name which should be used for this MySQL Flexible Server.",
"AdminUsername": "Specify the Administrator Login for the MySQL Flexible Server.",
"SkuTier": "Select the compute tier. Hardware list would be populated based on the Tier selection.",
"SkuName": "Select the Hardware as per the requirement.",
"AdminPassword": "Specify the Password associated with the administrator_login for the MySQL Flexible Server.",
"Version": "Select the version of the database.",
"StorageSizeGB": "Select the maximum storage allowed. Possible values are between `20` and `16384`.",
"Iops": "Specify the storage IOPS for the MySQL Flexible Server.",
"AutoGrow": "Select to enable Storage Auto Grow.",
"SubnetId": "Select the subnet to create the MySQL Flexible Server.",
"BackupRetentionDays": "Enter the backup retention days. Accepted values are between 1 and 35 days",
"GeoRedundantBackup": "Select to enable geo redundant backup.",
"ProcessorType": "Select the processor.",
"HighAvailability": "Select value\n1. **Disabled** - HighAvailability Mode disabled.\n2. **Same zone** - a standby server is always available within the same zone as the primary server\n3. **Zone redundant** - a standby server is always available within another zone in the same region as the primary server\nAuto-grow would be by default enabled when high availability zone is enabled."
},
"batchQueue": {
"tags": "Key value pair of tags to be assigned to queues\nSample Value for tags configuration:\n```js\n{\n \"key1\" : \"value1\",\n \"key2\" : \"valuev\"\n}\n```"
},
"AddRdsReplica": {
"Identifier": "Please provide a unique identifier for the RDS replica instance that is unique across all tenants. The cluster identifier is used to determine the cluster's endpoint. An identifier cannot end with a hyphen or contain two consecutive hyphens or start with a number. It should also be 49 characters or shorter and must be in all lowercase.",
"Engine": "Select Database engine for creating RDS instance.",
"EngineVersion": "Select database engine version. If not selected latest version will be used while creating database. Select type as 'Other' if you don't see desired option in dropdown list.",
"Size": "Instance size for RDS. Select type as 'Other' if you don't see desired option in dropdown list.",
"AvailabilityZone": "Select availability zone for high availability."
},
"AddNamespaceQueue": {
"Name": "Specify name of the ServiceBus Queue resource.",
"MaxSizeInMegabytes": "Select the maximum size of memory allocated for the queue.",
"LockDuration": "Specify the amount of time in seconds that the message is locked for other receivers.",
"MaxDeliveryCount": "Specify value which controls when a message is automatically dead lettered.",
"AutoGrow": "Enable/ Disable to control whether the Queue has dead letter support when a message expires.",
"EnablePartitioning": "Enable/ Disable to control whether to enable the queue to be partitioned across multiple message brokers."
},
"awsAccountSecurity": {
"EnableSecurityHub": "Enable AWS Security Hub in any region where there is infrastructure managed by DuploCloud.",
"EnableAllSecurityHubRegions": "Enable AWS Security Hub in all AWS regions managed by DuploCloud.",
"SecurityHubMembershipType": "Enable or disable multi-account AWS Security Hub:\n\n- **Local**: Disable any multi-account administration by this account in AWS Security Hub.\n- **Centralized (in this account)**: Allow this account to manage other accounts in AWS Security Hub.\n- **Centralized (in another account)**: Allow this account to be managed by another DuploCloud in AWS Security Hub. Disable any multi-account administration by this account in AWS Security Hub.\n",
"SecurityHubAdminAccount": "The AWS Account ID of the Security Hub administrator account.",
"ConfigLogBucketType": "Enable or disable cross-account AWS Config logging:\n\n- **Local**: AWS Config will log to an S3 bucket owned by this account.\n- **Centralized (in this account)**: AWS Config will log to an S3 bucket owned by this account. Allow AWS Config in other accounts to log to this same bucket.\n- **Centralized (in another account)**: AWS Config will log to an S3 bucket owned by another account.\n",
"ConfigLogBucketName": "The S3 bucket name that AWS Config will log to.",
"EnableGuardDuty": "Enable AWS Guard Duty in all AWS regions managed by DuploCloud.",
"EnableInspector": "Enable AWS Inspector in any region where there is infrastructure managed by DuploCloud.",
"EnableAllInspectorRegions": "Enable AWS Inspector in all AWS regions managed by DuploCloud.",
"IgnoreDefaultEbsEncryption": "Normally, DuploCloud enables EBS Default Encryption for all regions in which you deploy infrastructure.\n\nWhen this box is checked, DuploCloud will ignore the EBS Default Encryption settings when creating any new infrastructure.\n\nHowever, you can still edit the `EBS Encryption by Default` setting for your infrastructure - to enable EBS encryption by default, for the entire AWS region.\n",
"EnablePasswordPolicy": "Enable an account-level IAM User password policy:\n\n- Minimum password length is 14 characters\n- Require at least one uppercase letter from Latin alphabet (A-Z)\n- Require at least one lowercase letter from Latin alphabet (a-z)\n- Require at least one number\n- Require at least one non-alphanumeric character (! @ # $ % ^ & * ( ) _ + - = [ ] { } | ')\n- Password expires in 90 day(s)\n- Allow users to change their own password\n- Remember last 24 password(s) and prevent reuse\n",
"EnableCloudTrail": "Enable a multi-region CloudTrail for this AWS account.\n\nEnabling this feature will tell DuploCloud to:\n\n- Create and manage a multi-region CloudTrail in this AWS account\n- Create a CloudWatch log group named `/cloudtrail/duplo` that receives CloudTrail events\n- Create and manage an S3 bucket that receives CloudTrail log files\n",
"CloudTrailLogBucketType": "Enable or disable cross-account AWS CloudTrail logging:\n\n- **Local**: AWS CloudTrail will log to an S3 bucket owned by this account.\n- **Centralized (in this account)**: AWS CloudTrail will log to an S3 bucket owned by this account. Allow AWS CloudTrail in other accounts to log to this same bucket.\n- **Centralized (in another account)**: AWS CloudTrail will log to an S3 bucket owned by another account.\n",
"CloudTrailLogBucketAccount": "The AWS Account where AWS CloudTrail S3 logs will reside in.",
"CloudTrailLogBucketName": "The S3 bucket name that AWS CloudTrail will log to.",
"CloudTrailLogBucketKmsKeyId": "The KMS Key ID that AWS CloudTrail S3 logs will use.",
"EnableVpcFlowLogs": "Enable VPC flow logs for all VPCs created by DuploCloud.",
"DeleteDefaultVpcs": "Delete default VPCs in all AWS regions managed by DuploCloud.",
"DeleteDefaultNaclRules": "Delete default NACL rules for all VPCs created by DuploCloud.",
"RevokeDefaultSgRules": "Revoke default Security Group rules for all VPCs created by DuploCloud.",
"EnableCisCloudTrailCloudWatchAlarms": "Enables CIS CloudTrail CloudWatch Alarms in all AWS regions managed by DuploCloud.\n\nEnabling this feature and specifying an email address to receive alerts will satisfy AWS CIS controls 3.1 through 3.14.\n",
"CisCloudWatchAlarmNotificationEmail": "Specifies an email address that should receive the CIS CloudTrail CloudWatch Alarms.\n\nIn order to satisfy AWS CIS controls 3.1 through 3.14, the email recipient must first confirm the subscription to receive the alerts from AWS."
},
"S3bucketReplication": {
"Name": "Specify replica rule name.",
"selectTenant": "Select the Tenant.",
"destinationS3": "Select the destination S3 bucket for replication",
"Priority": "Specify the Priority associated with the rule. Priority must be unique between multiple rules.",
"DeleteMarkerReplication": "Enable/Disable if delete markers needs be replicated.",
"changeStorage": "Select to set the Storage Class used to store the object.",
"StorageClass": "Select the Storage Class from the supported list to store the objects."
},
"databrick": {
"Name": "Enter Databricks workspace name.",
"Tier": "Choose between Standard or Premium. View Pricing details [here](https://azure.microsoft.com/en-us/pricing/details/databricks/).",
"DisablePublicIP": "If you set this to `Disabled`, no user access is permitted from the public internet.\nIf you set this to `Enabled`, users and REST API clients on the public internet can access Azure Databricks\n",
"VnetIntegration": "Choosing `Enabled` allows you to deploy an Azure Databricks workspace in your virtual network.",
"PrivateSubnet": "Use the default private subnet name.",
"PublicSubnet": "Use the default public subnet name."
},
"EditGceVmHost": {
"Tags": "Enter Network tags.",
"Labels": "Specify Labels\n```js\n{\n\"key\" : \"value\"\n}\n```\n",
"Metadata": "Configure Compute Engine instance metadata.\n```js\n{\n\"key\" : \"value\"\n}\n```"
},
"AddSecretProviderClass": {
"name": "Secret Provider Class Name",
"provider": "Secret Provider",
"parameters": "The parameters section contains the details of the mount request and contain one of the three fields:\n* objects: This is a string containing a YAML declaration (described below) of the secrets to be mounted.\n ```yaml\n objects:\n - objectName: \"MySecret\"\n objectType: \"secretsmanager\"\n ```\nThe objects field of the SecretProviderClass can contain the following sub-fields:\n* objectName: This field is required. It specifies the name of the secret or parameter to be fetched. For Secrets Manager this is the [SecretId](https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html#API_GetSecretValue_RequestParameters) parameter and can be either the friendly name or full ARN of the secret. For SSM Parameter Store, this must be the [Name](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetParameter.html#API_GetParameter_RequestParameters) of the parameter and can not be a full ARN.\n* objectType: This field is optional when using a Secrets Manager ARN for objectName, otherwise it is required. This field can be either \"secretsmanager\" or \"ssmparameter\".\n* objectAlias: This optional field specifies the file name under which the secret will be mounted. When not specified the file name defaults to objectName.\n* jmesPath: This optional field specifies the specific key-value pairs to extract from a JSON-formatted secret. You can use this field to mount key-value pairs from a properly formatted secret value as individual secrets.\n If you use the jmesPath field, you must provide the following two sub-fields:\n * path: This required field is the [JMES path](https://jmespath.org/specification.html) to use for retrieval\n * objectAlias: This required field specifies the file name under which the key-value pair secret will be mounted. \n",
"secretObjects": "In some cases, you may want to create a Kubernetes Secret to mirror the mounted content. Use the optional secretObjects field to define the desired state of the synced Kubernetes secret objects. The volume mount is required for the Sync With Kubernetes Secrets\nYou can find more information about 'secretObjects' [here](https://secrets-store-csi-driver.sigs.k8s.io/topics/sync-as-kubernetes-secret.html)\n",
"secretProviderClassAnnotations": "Key Value pair of annotations to be added to SecretProviderClass\nSample Value Can be as bellow\n```yaml\nkey1: value1\nkey2: value2\n```\n",
"secretProviderClassLabels": "Key Value pair of labels to be added to SecretProviderClass\nSample Value Can be as bellow\n```yaml\nkey1: value1\nkey2: value2\n```"
},
"addHelmRelease": {
"Name": "Provide Name for the Helm Chart.",
"ReleaseName": "Provide release name to identify specific deployment of helm chart.",
"ChartName": "Provide unique name to specify the name of the chart.",
"ChartVersion": "Specify Helm Chart Version.",
"ChartReconcileStrategy": "Defaults to `Chart Version`. No new chart artifact is produced on updates to the source unless the version is changed in HelmRepository.\nUse `Revision` to produce new chart artifact on change in source revision.",
"SourceType": "Set as default HelmRepository.",
"SourceName": "Select the Helm Repo configured from the list.",
"Values": "Specify in yaml format. This field allows users to customize Helm charts. \nExample\n```yaml\n replicaCount: 2\n serviceAccount:\n create: true\n```"
},
"airflow_add_sub_form_basics": {
"Name": "The name of the Apache Airflow Environment.",
"AirflowVersion_other": "Airflow version of your environment, will be set by default to the latest version that MWAA supports.",
"WeeklyMaintenanceWindowStart_day": "Specify the start date for the weekly maintenance window.",
"WeeklyMaintenanceWindowStart_time": "Specify the time for the weekly maintenance window.",
"KmsKey": "Choose to encrypt.",
"WebserverAccessMode_bool": "Enable if you want to access the webserver over the internet."
},
"toggleIAMAuth": {
"EnableIamAuth": "Enable to set IAM database authentication. For supported regions and engine versions, refer [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RDS_Fea_Regions_DB-eng.Feature.IamDatabaseAuthentication.html)"
},
"AddGceVmHost": {
"Name": "Specify name for your GCE VM.",
"Zone": "Select the zone that the vm should be created in.",
"InstanceType": "Select the Machine Type.",
"AgentPlatform": "Select container orchestration platform.\n1. **Linux Docker/Native:** Select this option if you want to run docker native services which are Linux based.\n2. **None:** This option has to be selected when EC2 instance is not used for running containers.\n",
"ImageId": "Enter the image name. Specify in this format `projects/{project}/global/images/{image}`.",
"EnablePublicIpAddress": "Enable to assign public IP address.",
"Tags": "Enter Network tags.",
"AcceleratorType": "Specify Accelerator Type(GPU Type). Google does not offer every instance/GPU combination in every region/zone.\nEnter the compatible type based on the instance type and zone. For example- `nvidia-tesla-t4` Accelarator Type is supported for `n1-standard-1` Image Type for Zone `us-west4-a`.\nFor GPU regions and zone availability, click [here](https://cloud.google.com/compute/docs/gpus/gpu-regions-zones).\n",
"AcceleratorCount": "Specify the number of GPUs.",
"Labels": "Specify Labels\n```js\n{\n\"key\" : \"value\"\n}\n```\n",
"Metadata": "Configure Compute Engine instance metadata.\n```js\n{\n\"key\" : \"value\"\n}\n```\n",
"Base64UserData": "Configure startup-script metadata in below format. ```echo \"Hello from startup script!\" > /test.txt```"
},
"azureAddVM": {
"Name": "The name of the Virtual Machine.",
"Subnets": "Select the subnet.",
"InstanceType": "The size of the Virtual Machine.",
"ImageId": "Choose the Image for the VM. Image should be compatible with the agent platform. Select type as \"Other\" if you don't see desired option in dropdown.",
"publicIp": "Choose `Enable` to use a public IP address if you want to communicate with the virtual machine from outside the virtual network.",
"Encryption": "Choose to encrypt the given VM.",
"diskSize": "Disk size in GB.",
"allocationTags": "Allocation tags is the simplest way to constraint containers/pods with hosts/nodes. DuploCloud/Kubernetes Orchestrator will make sure containers will run on the hosts having same allocation tags.",
"joinDomain": "Choose Yes to join the VM to the managed domain.",
"joinLogAnalytics": "Select Yes to connect Azure virtual machines to Log Analytics.",
"Username": "The administrator username for the VM",
"Password": "The administrator password for the VM.",
"Name0": "Specify the Disk name.",
"VolumeId0": "Logical unit number(LUN) of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.",
"Size0": "Disk size in GB.",
"VolumeType0": "Choose the Storage Type.",
"ComputerName": "Enter to set Computer Name for your Virtul Machine. If not specified, will be set same as the name input provided.",
"DiskControllerType": "Default value set is `SCSI`. If you want to set `NVME`, specify the supported Instance Size. Check [here](https://learn.microsoft.com/en-us/azure/virtual-machines/ebdsv5-ebsv5-series#ebdsv5-series-nvme).",
"installDuploNativeAgent": "Enable to install agent on the host. Supports only Linux/Ubuntu.",
"joinDomainType": "(optional). Select Domain Type for the host.",
"timezone": "Select the timezone for the host.",
"base64": "Base64 encoded user data.",
"securityType": "Select `Standard` or `Trusted Launch` security type. Defaults to `Standard`.\nUse Trusted Launch for the security of `Generation 2` virtual machines (VMs). [Supported Sizes](https://learn.microsoft.com/en-us/azure/virtual-machines/trusted-launch#virtual-machines-sizes)\n",
"secureboot": "Select to enable Secure Boot for your VM.",
"vtpm": "Select to enable virtual Trusted Platform Module (vTPM) for Azure VM.",
"encryptAtHost": "Select to enable Encryption at host."
},
"addStorageAccount": {
"Name": "Enter name of the storage account. This must be unique across the entire Azure service, not just within the resource group."
},
"sqlDatabase": {
"Name": "Enter the name of the instance. Use lowercase letters, numbers, or hyphens. Start with a letter.",
"DatabaseVersion": "Select the MySQL, PostgreSQL or SQL Server version to use.",
"Tier": "Select from the list of all available machine types (tiers) for the database.",
"RootPassword": "Specify the root password. Required for MS SQL Server.",
"DataDiskSizeGb": "Specify the size of data disk, in GB.",
"Labels": "Specify labels in below format\n```js\n{\n \"key\" : \"value\"\n}\n```\n"
},
"addLambda": {
"FunctionName": "Unique name for your Lambda Function.",
"Description": "Description of what your Lambda Function does.",
"PackageType": "Lambda deployment package type.\n1. **Zip:** Upload a .zip file as your deployment package using Amazon Simple Storage Service (Amazon S3).\n2. **Image:** Upload your container images to Amazon Elastic Container Registry (Amazon ECR).\n",
"RuntimeValue": "Runtime is required if the deployment package type is a `Zip` file archive. Select the runtime compatible to the function code.",
"MemorySize": "Amount of memory in MB your Lambda Function can use at runtime.",
"TimeoutInt": "The amount of time in seconds that Lambda allows a function to run before stopping it.",
"Handler": "The name of the method within your code that Lambda calls to execute your function. Handler is required if the deployment package type is a `Zip` file archive.",
"EnvironmentVariables": "Map of environment variables that are accessible from the function code during execution.\nExample:\n```js\n{\n \"Variables\": {\"foo\":\"bar\"}\n}\n```\n",
"S3Bucket": "Select the S3 bucket location containing the function's deployment package.",
"S3BucketKey": "Enter the S3 key of an object containing the function's deployment package.",
"ImageConfiguration": "Specify container image configuration values.\nExample:\n```js\n{\n \"Command\": [\n \"app.handler\"\n ],\n \"EntryPoint\": [\n \"/usr/local/bin/python\",\n \"-m\",\n \"awslambdaruntimeclient\"\n ],\n \"WorkingDirectory\": \"/var/task\"\n}\n```\n",
"ImageUri": "Enter the ECR image URI containing the function's deployment package.",
"EphemeralStorage": "Specify Ephemeral Storage in MB, allows you to configure the storage upto 10240 MB. The default value set to 512 MB."
},
"publicIpPrefixForm": {
"Name": "Specify the name of the Public IP Prefix resource.",
"PrefixLength": "Select from the Public IP prefix sizes.",
"ResourceType": "Select `ApplicationGateway`. public IP from a prefix will be used for your gateway.\nIf not selected, Public IP Prefix gets created with type as Other. This will get used for Virtual machines resources."
},
"addSecretObjects": {
"Name": "Kubernetes secret name. It should only contain alphanumeric characters and '-'.\nSelect the parameters which you want to include into the secrets. You can also change the name of the parameter.\n"
},
"batchJob": {
"OtherJobProperties": "All Other Submit Job Definition Request parameters as documented\n [here](https://docs.aws.amazon.com/batch/latest/APIReference/API_SubmitJob.html#Batch-SubmitJob-request-containerOverrides).\nSample\n Value for the Other Job properties to set **ContainerOverrides** is as below\n```js\n{\n\n \"ContainerOverrides\" : {\n \"Command\" : [ \"echo\", \"hiii\"],\n \"Environment\":\n [\n {\n \"name\": \"KEY1\",\n \"value\": \"123\"\n\n }\n ]\n }\n\n}\n```"
},
"addSecretParams": {
"Name": "Select AWS Secret/SSM Parameter",
"objectAlias": "The file name under which the secret will be mounted. When not specified the file name defaults to objectName.",
"specificKeys": "You can enable this field to mount key-value pairs from a properly formatted secret value as individual secrets.\nYou can also selectively choose subset of keys from JSON and mount them with different name in the pods."
},
"AddResourceQuota": {
"name": "Resource Quota Name",
"cpu": "This input can be used to limit the CPU consumed by all pods in a non-terminal state. \nFor 5 CPUs value should be **5**, for half CPU value should be **500m**. \n",
"memory": "This input can be used to limit the memory consumed by all pods in a non-terminal. \nFor 512Mb value should be **512Mi**, For 5GB of ram value should be **5Gi**. \n",
"otherLimits": "You can use this input to all other quotas like storage, Object count etc. Value has to be provided in YAML format like below. \n```yaml\nrequests.storage: 500Gi\npersistentvolumeclaims: 10\n```\n",
"scopeSelector": "Each quota can have an associated set of scopes. A quota will only measure usage for a resource if it matches the intersection of enumerated scopes. \nWhen a scope is added to the quota, it limits the number of resources it supports to those that pertain to the scope. Resources specified on the quota outside of the allowed set results in a validation error. \nValue has to be provided in YAML format as below. \n```yaml\nmatchExpressions:\n- operator : In\n scopeName: PriorityClass\n values: [\"medium\"]\n```"
},
"azureAddAgentPool": {
"name": "Select Id to create a node pool",
"InstanceType": "The size of the virtual machines that will form the nodes in this node pool.",
"MinSize": "Set the minimum node count.",
"MaxSize": "Set the maximum node count.",
"DesiredCapacity": "Set the Desired capacity for the autoscaling.",
"AllocationTag": "Allocation tags is the simplest way to constraint containers/pods with hosts/nodes. DuploCloud/Kubernetes Orchestrator will make sure containers will run on the hosts having same allocation tags.",
"enableAutoScaling": "Enable this when you want kubernetes cluster to sized correctly for the current running workloads.",
"azureAddAgentPoolMaxPods": "Specify to adjust the maximum number of pods per node.",
"AvailabilityZones": "(Optional)Select availability zones for your AKS agent pool nodes.",
"ScaleSetPriority": "The Virtual Machine Scale Set priority.\n 1. **Regular**: Creates a regular agent pool node with standard priority.\n 2. **Spot**: Create Spot AKS agent pool nodes.\n",
"ScaleSetEvictionPolicy": "The eviction policy define what happens when Azure evicts.",
"SpotMaxPrice": "(Optional) Defines the max price when creating or adding a Spot VM agent pool.",
"MaxPods": "Enter the maximum number of pods per node. Default is 30."
},
"redisInstance": {
"Name": "The ID of the instance or a fully qualified identifier for the instance.",
"DisplayName": "Specify optional user-provided name for the instance.",
"Tier": "Select the service tier of the instance.\n 1. **Basic**: *Tier0* standalone instance\n 2. **Standard**: *Tier1* highly available primary/replica instances\n",
"RedisVersion": "Specify the version of Redis software.",
"MemorySizeGb": "Redis memory size in GiB.",
"AuthEnabled": "Indicates whether OSS Redis AUTH is enabled for the instance. If set to \"true\" AUTH is enabled on the instance.",
"TransitEncryptionEnabled": "Select to enable the TLS mode of the Redis instance.",
"ReadReplicaEnabled": "Select to enable Read replica mode.",
"ReplicasCount": "Specify the number of replica nodes. The valid range for the Standard Tier with read replicas enabled is [1-5].",
"Redis Config": "Specify Redis Configuration. Refer [here](https://cloud.google.com/memorystore/docs/redis/supported-redis-configurations?authuser=1&_ga=2.150484978.-535098261.1654188041).\nExample: \n```js\n{\n \"activedefrag\":\"yes\",\n \"maxmemory-policy\":\"allkeys-lru\"\n}\n```\n",
"Labels": "Specify labels in below format\n```js\n{\n \"key\" : \"value\"\n}\n```\n"
},
"AddElasticSearch": {
"Name": "Name of the domain.",
"Version": "Select the version of Elasticsearch to deploy.",
"DataInstanceType": "Select the Instance type of data nodes in the cluster.",
"DataInstanceCount": "Provide Data Instance Count.",
"VolumeSize": "Provide Storage in Gb.",
"DedicatedMasterType": "Select the Instance type of the dedicated master nodes in the cluster.",
"DedicatedMasterCount": "Provide the Number of dedicated master nodes in the cluster.",
"Kms": "Select the key to encrypt the Elasticsearch domain with.",
"RequireSSL": "Enable if Https is required.",
"UseLatestTlsCipher": "Select to use latest TLS Cipher.",
"EnableNodeToNodeEncryption": "Select to enable node-to-node encryption.",
"WarmEnabled": "Enable UltraWarm storage.",
"WarmType": "Select the Instance type for the OpenSearch cluster's warm nodes.",
"WarmCount": "Specify the number of warm nodes in the cluster. Valid values are between 2 and 150.",
"ColdStorageOptions": "Select to enable cold storage for an OpenSearch domain."
},
"dataPipelineAdd": {
"Name": "Unique Data Pipeline Name",
"PipeLineDef": "Please provide Data Pipeline defination json. Provide EmrCluster details If using existing EmrCluster.\n```js\n{\n\"PipelineObjects\": [\n{\n\"Id\": \"Default\",\n\"Name\": \"Default\",\n\"Fields\": [\n{\n\"Key\": \"failureAndRerunMode\",\n\"StringValue\": \"CASCADE\"\n},\n{\n\"Key\": \"pipelineLogUri\",\n\"StringValue\": \"s3://YOUR-S3-FOLDER/logs/data-pipelines/\"\n},\n{\n\"Key\": \"scheduleType\",\n\"StringValue\": \"cron\"\n}\n]\n},\n{\n\"Id\": \"EmrConfigurationId_Q9rpL\",\n\"Name\": \"DefaultEmrConfiguration1\",\n\"Fields\": [\n{\n\"Key\": \"configuration\",\n\"RefValue\": \"EmrConfigurationId_LFzOl\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"EmrConfiguration\"\n},\n{\n\"Key\": \"classification\",\n\"StringValue\": \"spark-env\"\n}\n]\n},\n{\n\"Id\": \"ActionId_SUEgm\",\n\"Name\": \"TriggerNotificationOnFail\",\n\"Fields\": [\n{\n\"Key\": \"subject\",\n\"StringValue\": \"Backcountry-clickstream-delta-hourly: #{node.@pipelineId} Error: #{node.errorMessage}\"\n},\n{\n\"Key\": \"message\",\n\"StringValue\": \"Backcountry-clickstream-delta-hourly failed to run\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"SnsAlarm\"\n},\n{\n\"Key\": \"topicArn\",\n\"StringValue\": \"arn:aws:sns:us-west-2:269378226633:duploservices-pravin-test-del77-128329325849\"\n}\n]\n},\n{\n\"Id\": \"EmrActivityObj\",\n\"Name\": \"EmrActivityObj\",\n\"Fields\": [\n{\n\"Key\": \"schedule\",\n\"RefValue\": \"ScheduleId_NfOUF\"\n},\n{\n\"Key\": \"step\",\n\"StringValue\": \"#{myEmrStep}\"\n},\n{\n\"Key\": \"runsOn\",\n\"RefValue\": \"EmrClusterObj\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"EmrActivity\"\n}\n]\n},\n{\n\"Id\": \"EmrConfigurationId_LFzOl\",\n\"Name\": \"DefaultEmrConfiguration2\",\n\"Fields\": [\n{\n\"Key\": \"property\",\n\"RefValue\": \"PropertyId_NA18c\"\n},\n{\n\"Key\": \"classification\",\n\"StringValue\": \"export\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"EmrConfiguration\"\n}\n]\n},\n{\n\"Id\": \"EmrClusterObj\",\n\"Name\": \"EmrClusterObj\",\n\"Fields\": [\n{\n\"Key\": \"taskInstanceType\",\n\"StringValue\": \"#{myTaskInstanceType}\"\n},\n{\n\"Key\": \"onFail\",\n\"RefValue\": \"ActionId_SUEgm\"\n},\n{\n\"Key\": \"maximumRetries\",\n\"StringValue\": \"1\"\n},\n{\n\"Key\": \"configuration\",\n\"RefValue\": \"EmrConfigurationId_Q9rpL\"\n},\n{\n\"Key\": \"coreInstanceCount\",\n\"StringValue\": \"#{myCoreInstanceCount}\"\n},\n{\n\"Key\": \"masterInstanceType\",\n\"StringValue\": \"#{myMasterInstanceType}\"\n},\n{\n\"Key\": \"releaseLabel\",\n\"StringValue\": \"#{myEMRReleaseLabel}\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"EmrCluster\"\n},\n{\n\"Key\": \"terminateAfter\",\n\"StringValue\": \"3 Hours\"\n},\n{\n\"Key\": \"bootstrapAction\",\n\"StringValue\": \"#{myBootstrapAction}\"\n},\n{\n\"Key\": \"taskInstanceCount\",\n\"StringValue\": \"#{myTaskInstanceCount}\"\n},\n{\n\"Key\": \"coreInstanceType\",\n\"StringValue\": \"#{myCoreInstanceType}\"\n},\n{\n\"Key\": \"applications\",\n\"StringValue\": \"spark\"\n}\n]\n},\n{\n\"Id\": \"ScheduleId_NfOUF\",\n\"Name\": \"Every 10 hr\",\n\"Fields\": [\n{\n\"Key\": \"period\",\n\"StringValue\": \"10 Hours start time 2\"\n},\n{\n\"Key\": \"startDateTime\",\n\"StringValue\": \"2022-01-07T21:21:00\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"Schedule\"\n},\n{\n\"Key\": \"endDateTime\",\n\"StringValue\": \"2022-01-08T15:44:28\"\n}\n]\n},\n{\n\"Id\": \"PropertyId_NA18c\",\n\"Name\": \"DefaultProperty1\",\n\"Fields\": [\n{\n\"Key\": \"type\",\n\"StringValue\": \"Property\"\n},\n{\n\"Key\": \"value\",\n\"StringValue\": \"/usr/bin/python3\"\n},\n{\n\"Key\": \"key\",\n\"StringValue\": \"PYSPARK_PYTHON\"\n}\n]\n}\n],\n\"ParameterValues\": [\n{\n\"Id\": \"myEMRReleaseLabel\",\n\"StringValue\": \"emr-6.1.0\"\n},\n{\n\"Id\": \"myMasterInstanceType\",\n\"StringValue\": \"m3.xlarge\"\n},\n{\n\"Id\": \"myBootstrapAction\",\n\"StringValue\": \"s3://YOUR-S3-FOLDER/bootstrap_actions/your_boottrap_and_python_lib_installer.sh\"\n},\n{\n\"Id\": \"myEmrStep\",\n\"StringValue\": \"command-runner.jar,spark-submit,--packages,io.delta:delta-core_2.12:0.8.0,--conf,spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension,--conf,spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog,--num-executors,2,--executor-cores,2,--executor-memory,2G,--conf,spark.driver.memoryOverhead=4096,--conf,spark.executor.memoryOverhead=4096,--conf,spark.dynamicAllocation.enabled=false,--name,PixelClickstreamData,--py-files,s3://YOUR-S3-FOLDER/libraries/librariy1.zip,--py-files,s3://YOUR-S3-FOLDER/libraries/librariy2.zip,s3://YOUR-S3-FOLDER/your_script.py, your_script_arg1, your_script_arg2\"\n},\n{\n\"Id\": \"myEmrStep\",\n\"StringValue\": \"command-runner.jar,aws,athena,start-query-execution,--query-string,MSCK REPAIR TABLE your_database.your_table,--result-configuration,OutputLocation=s3://YOUR-S3-FOLDER/logs/your_query_parquest\"\n},\n{\n\"Id\": \"myCoreInstanceType\",\n\"StringValue\": \"m3.xlarge\"\n},\n{\n\"Id\": \"myCoreInstanceCount\",\n\"StringValue\": \"1\"\n}\n],\n\"ParameterObjects\": [\n{\n\"Id\": \"myEC2KeyPair\",\n\"Attributes\": [\n{\n\"Key\": \"helpText\",\n\"StringValue\": \"An existing EC2 key pair to SSH into the master node of the EMR cluster as the user \\\"hadoop\\\".\"\n},\n{\n\"Key\": \"description\",\n\"StringValue\": \"EC2 key pair\"\n},\n{\n\"Key\": \"optional\",\n\"StringValue\": \"true\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"String\"\n}\n]\n},\n{\n\"Id\": \"myEmrStep\",\n\"Attributes\": [\n{\n\"Key\": \"helpLink\",\n\"StringValue\": \"https://docs.aws.amazon.com/console/datapipeline/emrsteps\"\n},\n{\n\"Key\": \"watermark\",\n\"StringValue\": \"s3://myBucket/myPath/myStep.jar,firstArg,secondArg\"\n},\n{\n\"Key\": \"helpText\",\n\"StringValue\": \"A step is a unit of work you submit to the cluster. You can specify one or more steps\"\n},\n{\n\"Key\": \"description\",\n\"StringValue\": \"EMR step(s)\"\n},\n{\n\"Key\": \"isArray\",\n\"StringValue\": \"true\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"String\"\n}\n]\n},\n{\n\"Id\": \"myTaskInstanceType\",\n\"Attributes\": [\n{\n\"Key\": \"helpText\",\n\"StringValue\": \"Task instances run Hadoop tasks.\"\n},\n{\n\"Key\": \"description\",\n\"StringValue\": \"Task node instance type\"\n},\n{\n\"Key\": \"optional\",\n\"StringValue\": \"true\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"String\"\n}\n]\n},\n{\n\"Id\": \"myCoreInstanceType\",\n\"Attributes\": [\n{\n\"Key\": \"default\",\n\"StringValue\": \"m1.medium\"\n},\n{\n\"Key\": \"helpText\",\n\"StringValue\": \"Core instances run Hadoop tasks and store data using the Hadoop Distributed File System (HDFS).\"\n},\n{\n\"Key\": \"description\",\n\"StringValue\": \"Core node instance type\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"String\"\n}\n]\n},\n{\n\"Id\": \"myEMRReleaseLabel\",\n\"Attributes\": [\n{\n\"Key\": \"default\",\n\"StringValue\": \"emr-5.13.0\"\n},\n{\n\"Key\": \"helpText\",\n\"StringValue\": \"Determines the base configuration of the instances in your cluster, including the Hadoop version.\"\n},\n{\n\"Key\": \"description\",\n\"StringValue\": \"EMR Release Label\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"String\"\n}\n]\n},\n{\n\"Id\": \"myCoreInstanceCount\",\n\"Attributes\": [\n{\n\"Key\": \"default\",\n\"StringValue\": \"2\"\n},\n{\n\"Key\": \"description\",\n\"StringValue\": \"Core node instance count\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"Integer\"\n}\n]\n},\n{\n\"Id\": \"myTaskInstanceCount\",\n\"Attributes\": [\n{\n\"Key\": \"description\",\n\"StringValue\": \"Task node instance count\"\n},\n{\n\"Key\": \"optional\",\n\"StringValue\": \"true\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"Integer\"\n}\n]\n},\n{\n\"Id\": \"myBootstrapAction\",\n\"Attributes\": [\n{\n\"Key\": \"helpLink\",\n\"StringValue\": \"https://docs.aws.amazon.com/console/datapipeline/emr_bootstrap_actions\"\n},\n{\n\"Key\": \"helpText\",\n\"StringValue\": \"Bootstrap actions are scripts that are executed during setup before Hadoop starts on every cluster node.\"\n},\n{\n\"Key\": \"description\",\n\"StringValue\": \"Bootstrap action(s)\"\n},\n{\n\"Key\": \"isArray\",\n\"StringValue\": \"true\"\n},\n{\n\"Key\": \"optional\",\n\"StringValue\": \"true\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"String\"\n}\n]\n},\n{\n\"Id\": \"myMasterInstanceType\",\n\"Attributes\": [\n{\n\"Key\": \"default\",\n\"StringValue\": \"m1.medium\"\n},\n{\n\"Key\": \"helpText\",\n\"StringValue\": \"The Master instance assigns Hadoop tasks to core and task nodes, and monitors their status.\"\n},\n{\n\"Key\": \"description\",\n\"StringValue\": \"Master node instance type\"\n},\n{\n\"Key\": \"type\",\n\"StringValue\": \"String\"\n}\n]\n}\n]\n}\n```",
"RootActivity#scheduleType": "Valid Values are cron, ondemand, timeseries.\n[Click Here](https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-concepts-schedules.html)\n[Click Here](https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-object-schedule.html)\n\n",
"EmrActivity#step": "EmrActivity steps could be any supported steps/jobs like - spark-submit, hive, pig, athena ... etc.\n[Click Here](https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-object-emractivity.html)\n\n```js\n[ \"command-runner.jar,spark-submit,--packages,io.delta:delta-core_2.12:0.8.0,--conf,spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension,--conf,spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog,--num-executors,2,--executor-cores,2,--executor-memory,2G,--conf,spark.driver.memoryOverhead=4096,--conf,spark.executor.memoryOverhead=4096,--conf,spark.dynamicAllocation.enabled=false,--name,PixelClickstreamData,--py-files,s3://YOUR-S3-FOLDER/libraries/librariy1.zip,--py-files,s3://YOUR-S3-FOLDER/libraries/librariy2.zip,s3://YOUR-S3-FOLDER/your_script.py, your_script_arg1, your_script_arg2\",\n\"command-runner.jar,aws,athena,start-query-execution,--query-string,MSCK REPAIR TABLE your_database.your_table,--result-configuration,OutputLocation=s3://YOUR-S3-FOLDER/logs/your_query_parquest\"\n]\n```\n",
"EmrCluster#onFail": "EmrCluster onFail .e.g SNS message.\n[Click Here](https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-object-snsalarm.html)\n\n```js\n[{\n\"id\" : \"SuccessNotify\",\n\"name\" : \"SuccessNotify\",\n\"type\" : \"SnsAlarm\",\n\"topicArn\" : \"arn:aws:sns:us-east-1:28619EXAMPLE:ExampleTopic\",\n\"subject\" : \"COPY SUCCESS: #{node.@scheduledStartTime}\",\n\"message\" : \"Files were copied from #{node.input} to #{node.output}.\"\n}]\n```\n",
"EmrCluster#configuration": "EmrCluster configuration.\n[Click Here](https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-object-emractivity.html)\n\n```js\n[\n{\n\"classification\": \"core-site\",\n\"properties\": {\n\"io.file.buffer.size\": \"4096\",\n\"fs.s3.block.size\": \"67108864\"\n}\n},\n{\n\"classification\": \"hadoop-env\",\n\"properties\": {\n\n},\n\"configurations\": [\n{\n\"classification\": \"export\",\n\"properties\": {\n\"YARN_PROXYSERVER_HEAPSIZE\": \"2396\"\n}\n}\n]\n},\n{\n\"Classification\": \"spark\",\n\"Properties\": {\n\"maximizeResourceAllocation\": \"true\"\n}\n},\n{\n\"Classification\": \"spark-hive-site\",\n\"Properties\": {\n\"hive.metastore.client.factory.class\": \"com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory\",\n\"hive.metastore.glue.catalogid\": \"acct-id\"\n}\n},\n{\n\"Classification\": \"spark-env\",\n\"Properties\": {\n\n},\n\"Configurations\": [\n{\n\"Classification\": \"export\",\n\"Properties\": {\n\"PYSPARK_PYTHON\": \"/usr/bin/python34\"\n},\n\"Configurations\": [\n\n]\n}\n]\n},\n{\n\"Classification\": \"spark-defaults\",\n\"Properties\": {\n\"spark.yarn.appMasterEnv.PYSPARK_PYTHON\": \"/home/hadoop/venv/bin/python3.4\",\n\"spark.executor.memory\": \"2G\"\n}\n},\n{\n\"Classification\": \"emrfs-site\",\n\"Properties\": {\n\"fs.s3.enableServerSideEncryption\":true\n}\n}\n]\n```\n",
"EmrCluster#applicatiob": "EmrCluster application.\n[Click Here](https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-object-emractivity.html)\n\n```js\n[\n\"Spark\",\n\"Hadoop\",\n\"Pig\",\n\"Hive\",\n\"Jupyterlab\",\n]\n```"
},
"airflow_add_sub_form_configure": {
"EnvironmentClass_other": "Environment class for the cluster.",
"Schedulers": "The number of schedulers that you want to run in your environment. v2.0.2 and above accepts `2` - `5`, default `2`. v1.10.12 accepts `1`.",
"MinWorkers": "The minimum number of workers that you want to run in your environment.",
"MaxWorkers": "The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`.",
"AirflowConfigurationOptions_str": "Specify parameters to override airflow options.\nSample Value can be as below\n```js\n{\n \"core.log_format\": \"[%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s\"\n}\n```\n"
},
"batchSchedulingPolicy": {
"Name": "Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.",
"Tags": "A tag is a label that you assign to an AWS resource. Each tag consists of a key and an optional value. You can use tags to search and filter your resources or track your AWS costs\nSample Value for **Tags** is as below\n```js\n{\n \"Key\" : \"Value\"\n}\n\n```\n",
"ShareDistribution": "An array of SharedIdentifier objects that contain the weights for the fair share identifiers for the fair share policy. Fair share identifiers that aren't included have a default weight of 1.0.\n[Click Here](https://docs.aws.amazon.com/batch/latest/APIReference/API_ShareAttributes.html) to know more about **ShareDistribution**.\nSample Value for the Other Job properties to set **ContainerOverrides** is as below\n```js\n[\n {\n \"ShareIdentifier\": \"SomeIdentifier\",\n \"WeightFactor\": 0.1\n }\n]\n```"
},
"rdsRestorePointTime": {
"TargetName": "Specify the rds identifier name. Should be unique across all tenants.",
"latestRestorableTime": "Select to restore DB from the latest backup time.",
"type": "Select Custom, enter the date and time to which you want to restore the DB instance."
},
"thing": {
"Name": "The name of the thing.",
"IotCertificateArn": "Select IoT certificate.",
"ThingTypeName": "Select the thing type name. Defaults to `DuploDefault`",
"Attributes": "Map of attributes of the thing. Example\n```json\n{\"First\":\"examplevalue\"}\n```"
},
"addTimestreamDatabase": {
"DatabaseName": "The name of the Timestream database.",
"KmsKeyId": "Select the KMS key to be used to encrypt the data stored in the database."
},
"AddNamespace": {
"Name": "Specify the name of the ServiceBus Namespace resource.",
"SkuTier": "Select the Pricing Tier.",
"Version": "Select a specific TLS version for your namespace.",
"AutoGrow": "Enable/ Disable SAS authentication for the Service Bus namespace."
},
"emr_serverless_add_sub_form_limit": {
"MaximumCapacityCpu": "Specify the maximum allowed CPU for an application.",
"MaximumCapacityMemory": "Specify the maximum allowed resources for an application.",
"MaximumCapacityDisk": "Specify the maximum allowed disk for an application."
},
"AddPostgreSQLFlexi": {
"Name": "Specify Server Name.",
"SkuTier": "Select SKU Tier.",
"AdminUsername": "Specify the primary administrator username.",
"AdminPassword": "Specify Server Password.",
"SkuName": "Select Hardware.",
"StorageSizeGB": "Storage in GB.",
"Version": "Select the version of PostgreSQL Flexible Server to use.",
"BackupRetentionDays": "Specify The backup retention days for the PostgreSQL Flexible Server.",
"GeoRedundantBackup": "Select to enable Geo-Redundant backup.",
"SubnetId": "Select the Subnet.",
"HighAvailability": "Select High Availability."
},
"airflow_add_sub_form_dag": {
"S3Bucket": "Choose Amazon S3 storage bucket where DAG is stored.",
"DagS3Path": "Specify the relative path to the DAG folder from S3 bucket. For example, `AirflowDags/`",
"PluginsS3Path": "Specify the relative path to the plugins.zip file from S3 bucket. For example, plugins.zip. If a relative path is provided in the request, then `Plugins S3Object Version` is required.",
"RequirementsS3Path": "Specify the relative path to the requirements.txt file from S3 bucket. For example, `requirements.txt` or `AirflowDags/folder1/requirements.txt`. If a relative path is provided in the request, then `Requirements S3Object Version` is required.",
"RequirementsS3ObjectVersion": "Specify the Version Id of the the requirements.txt file version you want to use. For example, lSHNqFtO5Z7_6K6YfGpKnpyjqP2JTvSf.\nS3 Enable Versioning is required to use this in your environment.\n",
"PluginsS3ObjectVersion": "Specify the plugin.zip version. For example, lSHNqFtO5Z7_6K6YfGpKnpyjqP2JTvSf. S3 Enable Versioning is required to use this in your environment.",
"StartupScriptS3Path": "Specify a shell (.sh) script to be executed during startup on Apache Airflow components. You need to specify the relative path to the script hosted in S3 Bucket, for example, `AirflowDags/startup.sh`.\n",
"StartupScriptS3ObjectVersion": "Specify the version ID of the startup shell script from S3 Bucket. For example, YVu1x62otML9W8TQgCjm5iXWBtrGL3HP.\nS3 Enable Versioning is required to use this in your environment."
},
"K8sJobAdvancedForm": {
"specOther": "Add Job Spec in yaml format.\n```yaml\ncompletions: 3\nmanualSelector: true\nparallelism: 3\ntemplate:\n spec:\n dnsPolicy: ClusterFirst\n schedulerName: default-scheduler\n securityContext: {}\n terminationGracePeriodSeconds: 30\n volumes:\n - name: my-volume\n persistentVolumeClaim:\n claimName: my-pvc-claim\n```\n",
"medataAnnotations": "Add Job Metadata Annotations.\nFollow below format:\n```yaml\nannotation_name: value1\nannotation_type: value2\n```\n",
"medataLabels": "Add Job Metadata Labels.\n```yaml\nlabel1: value1\nlabel2: value2\n```"
},
"AddEFS": {
"Name": "Friendly name for Elastic File System. User provided name will be appended with prefix as \"duploservice-<tenant_name>-\".",
"CreationToken": "A string of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent creation.",
"ProvisionedThroughputInMibps": "The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. \nValid values are 1-1024. Required if ThroughputMode is set to provisioned . The upper limit for throughput is 1024 MiB/s.\n",
"performanceMode": "The performance mode of the file system. We recommend generalPurpose performance mode for most file systems. \nFile systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created.\nNote: The maxIO mode is not supported on file systems using One Zone storage classes.\n",
"throughputMode": "Specifies the throughput mode for the file system, either bursting or provisioned . If you set ThroughputMode to provisioned , you must also set a value for ProvisionedThroughputInMibps. \nAfter you create the file system, you can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes, as long as it\u2019s been more than 24 hours since the last decrease or throughput mode change.\n",
"Backup": "Specifies whether automatic backups are enabled on the file system that you are creating. Set the value to true to enable automatic backups. \nIf you are creating a file system that uses One Zone storage classes, automatic backups are enabled by default.\n",
"Encrypted": "A Boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying an existing Key Management Service key (KMS key)."
},
"emrserverlsess_add_runjob_sub_form_script": {
"ScriptQueryInitFileBucket": "Choose S3 Bucket where the initialization script is stored.",
"ScriptQueryInitFile": "Specify S3 folder location of the initialization script that you can use to initialize tables prior to running the Hive script. Example: `samples/hive-scripts/create_table.sql`\n",
"ScriptQueryBucket": "Choose S3 bucket where the query files are stored.",
"ScriptQueryFile": "Specify the S3 folder location of the script which needs to be executed in the job.\nExample: `samples/hive-scripts/extreme_weather.sql`\n",
"ScriptS3Bucket": "Choose S3 Bucket location S3 Bucket where scripts are stored.",
"ScriptS3BucketFolder": "Specify the S3 folder location where scripts are stored. Example: `samples/spark-scripts/wordcount.py`\n",
"ScriptArguments": "Specify array of arguments passed to your main JAR or Python script. Each argument in the array must be separated by a comma. \n Example:\n ```js\n [\"s3://<YOUR_S3_BUCKET_NAME>/wordcount_output\", \"40000\"}]\n ```\n",
"ScriptSubmitParameters": "Specify additional configuration properties for your each job.\n 1. **Spark:** \n Example:\n `--conf spark.executor.cores=1 --conf spark.executor.memory=4g --conf spark.driver.cores=1 --conf spark.driver.memory=4g --conf spark.executor.instances=1 \n `\n 2. **Hive:** \n Example: \n `--hiveconf hive.exec.scratchdir=s3://<YOUR_S3_BUCKET_NAME>/hive/scratch --hiveconf hive.metastore.warehouse.dir=s3://<YOUR_S3_BUCKET_NAME>/hive/warehouse\n `\n"
},
"emr_serverless_add_sub_form_pre_capacity": {
"DriverCount": "Specify initialized Driver Capacity.",
"ExecutorCount": "Specify initialized Executors Capacity.",
"DriverCpu": "Specify vCPU per driver.",
"DriverMemory": "Specify Memory in GB per driver.",
"DriverDisk": "Specify Disk in GB per driver.",
"ExecutorCpu": "Specify vCPU per executor.",
"ExecutorMemory": "Specify Memory in GB per executor.",
"ExecutorDisk": "Specify Disk in GB per executor."
},
"AddRDS": {
"Identifier": "Please provide a unique identifier for the RDS instance that is unique across all tenants. The cluster identifier is used to determine the cluster's endpoint. An identifier cannot end with a hyphen or contain two consecutive hyphens or start with a number. It should also be 49 characters or shorter and must be in all lowercase.",
"SnapshotId": "Select this when you want to create RDS instance from existing Snapshot.",
"Engine": "Select Database engine for creating RDS instance.",
"EngineVersion": "Select database engine version. If not selected latest version will be used while creating database. Select type as 'Other' if you don't see desired option in dropdown list.",
"Username": "Specify an alphanumeric string that defines the login ID for the master user. You use the master user login to start defining all users, objects, and permissions in a databases of your DB instance. Master Username must start with a letter.",
"Password": "Specify a string that defines the password for the master user. Master Password must be at least eight characters long and listed characters are accepted ```[a-z] [A-Z] [0-9] [- * ! $ % &]```.",
"ClusterIdentifier": "Cluster Identifier",
"DbSize": "Instance size for RDS. Select type as 'Other' if you don't see desired option in dropdown list.",
"AllocatedStorage": "Storage allocation for RDS instance in GB.",
"MinCapacity": "Set the minimum capacity unit for the DB cluster. Each capacity unit is equivalent to a specific compute and memory configuration.",
"MaxCapacity": "Set the maximum capacity unit for the DB cluster. Each capacity unit is equivalent to a specific compute and memory configuration.",
"AutoPause": "Specify the amount of time to pass with no database traffic before you scale to zero processing capacity. When database traffic resumes, your Serverless cluster resumes processing capacity and scales to handle the traffic.\\",
"AutoPauseDuration": "Amount of time the cluster can be idle before scaling to zero",
"DBParameterGroupName": "Database parameters group name.",
"ClusterParameterGroupName": "Cluster parameters group name.",
"EncryptionKey": "Choose to encrypt the given instance.",
"EnableLogging": "Select this option to enable logging for the RDS instance.",
"MultiAZ": "Create database in multiple availability zones for high availability.",
"StorageType": "Select the StorageType. Default is `gp3`.",
"StoreDetailsInSecretManager": "Enable to store RDS password in AWS Secret Manager.",
"EnableIamAuth": "Enable to set IAM database authentication. For supported regions and engine versions, refer [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RDS_Fea_Regions_DB-eng.Feature.IamDatabaseAuthentication.html)",
"BackupRetentionPeriod": "Specify in days for automated backups. Valid values 1-35. (Optional)If not specified, by default Backup Retention Day would be set as 1.",
"AvailabilityZone": "Select an Availability Zone (AZ).",
"CACertificateIdentifier": "Select Certificate authority.",
"EnableMultiAZ": "Enable Multi Availability Zone."
},
"selectIotDeviePackageCert": {
"certificateId": "Choose the certificate to download the device package."
},
"UpdateEfsLifecyclePolicy": {
"TransitionToIA": "Select the duration to transition files to the IA storage class.",
"TransitionToPrimaryStorageClass": "Enable to transition a file to primary storage."
},
"emr_serverless_add_sub_form_configure": {
"AutoStartConfiguration_Enabled": "Enable the configuration for an application to automatically start on job submission.",
"AutoStopConfiguration_Enabled": "Enable the configuration for an application to automatically stop after a certain amount of time being idle.",
"IdleTimeoutMinutes": "The amount of idle time in minutes after which your application will automatically stop."
},
"K8sJobBasicForm": {
"JobName": "Specify the name of the Job, must be unique. Minimum 3 characters.",
"Schedule": "Specify schedule in Cron format. Example- `0 0 * * 0`. This will run once a week at midnight on Sunday morning. For more help on cron schedule expressions, click [here](https://crontab.guru/#0_0_*_*_0)",
"ImageName": "Specify the image name. Example- `perl:5.34.0`.",
"CleanupFinished": "Specify time in seconds.\n1. **if set as 0**: Job becomes eligible to be deleted immediately after it finishes.\n2. **if field is unset/blank**: Jobs won't be automatically deleted. \n3. **if the field is set**: Job is eligible to be automatically delete after the seconds specified\n",
"restartPolicy": "1. **Never**: Job won't restart on failure.\n2. **OnFailure**: Job will restart on failure based on the retry count set.\n",
"Retries": "Set the retry limit. Once the BackoffLimit is reached the Job will be marked as failed.\n",
"envVariables": "Environment variables to be passed to the jobs in the YAML format.\n```yaml\n- Name: VARIABLE1\n Value: abc.com\n- Name: VARIABLE2\n Value: test_value\n```\n",
"ContainerName": "Specify Name of the Container.",
"command": "Specify the command attribute for the container.\n```yaml\n # Example 1\n [\"perl\", \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n\n # Example 2\n - /bin/sh\n - -c\n - >-\n echo \"Hello !!!\"\n```\n",
"args": "Add argument\n```yaml\n# Example 1\n[\"$(VARIABLE1)\"]\n\n# Example 2\n[\"-c\", \"while true; do echo hello; sleep 10;done\"]\n\n# Example 3\n - -c\n - echo \"Hello world!\" && sleep 5 && exit 42\n```\n",
"otherContainerConfig": "Additional Container Configurations. add here.\n```yaml\nimagePullPolicy: Always\nresources: {}\nterminationMessagePath: /dev/termination-log\nterminationMessagePolicy: File\nvolumeMounts:\n- mountPath: /opt\n name: my-volume\n```\n",
"AllocationTags": "Allocation tags is the simplest way to constraint containers/pods with hosts/nodes. DuploCloud/Kubernetes Orchestrator will make sure containers will run on the hosts having same allocation tags."
},
"topicRule": {
"Name": "Specify the name of the rule.",
"Description": "Add description to the rule.",
"SqlSelect_later": "The SQL statement used to query the topic. For SQL Expressions refer [here](https://docs.aws.amazon.com/iot/latest/developerguide/iot-sql-reference.html).",
"TopicFilter_later": "TopicFilter for the query. Defaults to `#`.",
"SqlWhere_later": "clause determines if the actions specified by a rule are carried out. Refer [here](https://docs.aws.amazon.com/iot/latest/developerguide/iot-sql-where.html).",
"SqlVersion": "The version of the SQL rules engine to use when evaluating the rule. Defaults to `2016-03-23`.",
"Disabled": "Select to enable or disable the destination.",
"Action 1": "Specify actions when a rule is invoked. For more details refer [here](https://docs.aws.amazon.com/iot/latest/developerguide/iot-rule-actions.html).",
"UseErrorAction": "Enable to activate the rules engine an error action, if any problem is identified while activating the topic rule.",
"Error Action": "Define the error action message. Refer [here](https://docs.aws.amazon.com/iot/latest/developerguide/rule-error-handling.html) for examples."
},
"AddSNS": {
"Name": "The name of the SNS topic.",
"KmsKeyId": "Select the KMS key for encryption.",
"FifoTopic": "Enable to create a SNS FIFO topic.",
"ContentBasedDeduplication": "When selected, enables content-based deduplication for FIFO topics."
},
"lbrule": {
"itemConditionType0": "Following types of Rule Conditions are supported:\n1. **Path:** Route based on path patterns in the request URLs.\n2. **Host Header:** Route based on the host name of each request.\n3. **Source IP:** Route based on the source IP address of each request.\n4. **HTTP Request Method:** Route based on the HTTP request method of each request.\nFor details, refer [here](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#listener-rules).\n",
"Priority": "(Optional) Priority would be added by DuploCloud when not entered. Note- A listener can't have multiple rules with the same priority.",
"tgForwardActionArn": "Select the Target Group to which to route the traffic.",
"itemVal00": "Enter Value applicable as per the Rule Condition selected. For details, refer [here](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#listener-rules)."
},
"addLogDeliveryConfiguration": {
"LogFormat": "Select Log Format. Supported format are `TEXT` and `JSON`.",
"LogType": "Select Log Type `engine log` and `slow log`.",
"LogGroup": "Sepcify Cloud Watch Log Group. If the Log Group do not exist, will create a new log group."
},
"AddServiceAdvanced": {
"VolumeMappingsK8s": "Volumes and Volume mount setting in the Duplo simplied format. If you specify Volumes here, Duplo will take care of configuring pod volumes and volume mounts.\n```yaml\n---\n# Statefulset using EBS volume\n- AccessMode: ReadWriteOnce\n Name: data\n Path: /attachedvolume\n Size: 10Gi\n# Deployment using host directory mount in read-only mode\n- Name: rootfs # Name of the volume\n Path: /rootfs # Path in the container\n ReadOnly: true # Set if it is readonly mount\n Spec: # K8s Defined volumes\n HostPath:\n Path: / # Path on the host\n# Deployment using host directory mount in read-write mode\n- Name: var-run\n Path: /var/run\n Spec:\n HostPath:\n Path: /var/run\n# Deployment mounting secret into directory\n- Name: nginx\n Path: /etc/nginx/conf.d\n Spec:\n Secret:\n SecretName: nginx\n# Deployment mounting config map into directory\n- Name: nginx\n Path: /etc/nginx/conf.d\n Spec:\n ConfigMap:\n Name: nginx\n# Deployment using PersistentVolumeClaim. \n- Name: nginx\n Path: /usr/share/nginx/html\n Spec:\n PersistentVolumeClaim:\n claimName: efs-claim\n```",
"PodConfigK8s": "Deployment and Pod confiiguration\n```yaml\nSubdomain: same-name-as-my-service\nLabels:\n label1: values1\nAnnotations:\n seccomp.security.alpha.kubernetes.io/pod: docker/default\nPodLabels:\n label1: values1\nPodAnnotations:\n seccomp.security.alpha.kubernetes.io/pod: docker/default\nRestartPolicy: Always\nPodSecurityContext:\n RunAsUser: 1000\n RunAsGroup: 3000\n FsGroup: 2000\nVolumes:\n - name: cache-volume\n emptyDir: {}\n - name: config-vol\n configMap:\n name: log-config\n items:\n - key: log_level\n path: log_level\nImagePullSecrets:\n - name: myregistrykey\nServiceAccountName: my-service-account-name\nAutomountServiceAccountToken: true\n# mount secretprovider class volume\nVolumes:\n- Name: secretvolume-name\n Csi:\n driver: secrets-store.csi.k8s.io\n readOnly: true\n VolumeAttributes:\n secretProviderClass: my-secret-provider-class\n# deployment strategy\nDeploymentStrategy:\n RollingUpdate:\n MaxSurge: 1\n MaxUnavailable: 0 \n\n```",
"ContainerConfigK8s": "Deployment and Pod configurations\n```yaml\nImagePullPolicy: IfNotPresent\nArgs:\n - '-- somearg'\nLivenessProbe:\n httpGet:\n path: /\n port: 80\n httpHeaders:\n - name: Custom-Header\n value: Awesome\n initialDelaySeconds: 3\n periodSeconds: 3\nReadinessProbe:\n exec:\n command:\n - cat\n - /tmp/healthy\n initialDelaySeconds: 5\n periodSeconds: 5\nstartupProbe:\n initialDelaySeconds: 1\n periodSeconds: 5\n timeoutSeconds: 1\n successThreshold: 1\n failureThreshold: 1\n exec:\n command:\n - cat\n - /etc/nginx/nginx.conf\nSecurityContext:\n Capabilities:\n Add:\n - NET_BIND_SERVICE\n Drop:\n - ALL\n ReadOnlyRootFilesystem: false\n RunAsNonRoot: true\n RunAsUser: 1000\nAutomountServiceAccountToken: true\nVolumesMounts:\nEnvFrom:\n- secretRef:\n name: secret_name\n- configMapRef:\n name: configmap-name\n# Mount SecretProvider\nVolumesMounts:\n- Name: volume-name\n MountPath: /mnt/secrets\n readOnly: true\nEnvFrom:\n- SecretRef:\n Name: secretobject-name\n# Set Resource Request and Limits\nresources:\n requests:\n memory: \"10Gi\"\n cpu: \"500m\"\n ephemeral-storage: 2Gi\n limits:\n memory: \"10Gi\"\n cpu: \"500m\"\n ephemeral-storage: 4Gi\n# Pod Toleration example\ntolerations:\n- key: key1\n operator: Equal\n value: value1\n effect: NoSchedule\n- key: example-key\n operator: Exists\n effect: NoExecute\n tolerationSeconds: 6000\n#lifecycle hook sample\nlifecycle:\n postStart:\n exec:\n command:\n - /bin/sh\n - '-c'\n - date > /container-started.txt\n preStop:\n exec:\n command:\n - /usr/sbin/nginx\n - '-s'\n - quit\n# StatefulSet Update Strategy\nStatefulSetUpdateStrategy:\n RollingUpdate:\n Partition: 1\n Type: RollingUpdate\n```",
"VolumeMappings": "Example of mounting a host drive into the container\n```js\n\"/home/ubuntu/data:/data\",\"/home/ubuntu/confg:/config\"\n```\n",
"otherDockerConfig": "Any custom docker create container can be passed here based\n on the documentation at https://docs.docker.com/engine/api/v1.41/#operation/ContainerCreate\n for example the following config overrides the entrypoint of the container and\n sets a few labels\n```js\n{\n \"Entrypoint\": [\n \"/bin/bash\",\n \"-c\",\n \"sleep 1h\"\n ],\n \"Labels\": {\n \"com.example.vendor\": \"Acme\",\n \"com.example.license\": \"GPL\",\n \"com.example.version\": \"1.0\"\n }\n}\n```"
},
"AddRdsAuroraReplica": {
"Identifier": "Please provide a unique identifier for the RDS replica instance that is unique across all tenants. The cluster identifier is used to determine the cluster's endpoint. An identifier cannot end with a hyphen or contain two consecutive hyphens or start with a number. It should also be 49 characters or shorter and must be in all lowercase.",
"Engine": "Select Database engine for creating RDS instance.",
"EngineVersion": "Select database engine version. If not selected latest version will be used while creating database. Select type as 'Other' if you don't see desired option in dropdown list.",
"DbSize": "Instance size for RDS. Select type as 'Other' if you don't see desired option in dropdown list.",
"MinCapacity": "Set the minimum capacity unit for the DB cluster. Each capacity unit is equivalent to a specific compute and memory configuration.",
"MaxCapacity": "Set the maximum capacity unit for the DB cluster. Each capacity unit is equivalent to a specific compute and memory configuration.",
"AvailabilityZone": "Select availability zone for high availability."
},
"IngressRule": {
"serviceName": "Name of the kubernetes service which Ingress will use as backend to serve the request. User will have to first configure a Loadbalancer/Kubernetes NodePort/Kubernetes ClusterIP for the DuploCloud service. \nYou can find the document [here](https://docs.duplocloud.com/docs/aws/quick-start/step-6-create-a-load-balancer)\n",
"port": "Port from the kubernetes service that ingress will use as backend port to serve the requests.",
"host": "If a host is provided (for e.g. example.com, foo.bar.com), the rules apply to that host.",
"path": "Specify the path (for e.g. /api /v1/api/) to do a path base routing. If host is specified then both path and host should be match for the incoming request.",
"pathType": "Each path in an Ingress is required to have a corresponding path type. Paths that do not include an explicit pathType will fail validation. There are three supported path types:\n\nImplementationSpecific: With this path type, matching is up to the IngressClass. Implementations can treat this as a separate pathType or treat it identically to Prefix or Exact path types.\n\n1. *Exact*: Matches the URL path exactly and with case sensitivity.\n\n2. *Prefix*: Matches based on a URL path prefix split by /. Matching is case sensitive and done on a path element by element basis. A path element refers to the list of labels in the path split by the / separator. A request is a match for path p if every p is an element-wise prefix of p of the request path. \n"
},
"cwtarget-ecs-add": {
"Task Definition Family": "Specify the task definition to use if the event target is ECS.",
"Task Version": "Select the task version.",
"Task Count": "Specify the number of tasks to create based on the TaskDefinition. Minimum value 1, Maximum value 10."
},
"addKeyVaultSecret": {
"name": "Specify the name of the Key Vault Secret.",
"value": "Specify the value of the Key Vault Secret",
"contentType": "Specify description of the secret contents (e.g. password, connection string, etc)."
},
"AddNodePool": {
"Name": "Specify the name for your Node Pool.",
"Zones": "Specify the zone for your Node Pool.",
"InstanceType": "Select the machine type.",
"AutoscalingEnabled": "Enable autoscaler configuration for this node pool. Per zone limits will enforce given limits on a per zone basis.",
"UseTotalCount": "Enable to limit total number of nodes independently of the spreading.",
"InitialNodeCount": "Specify initial number of nodes.",
"MinNodeCount": "Minimum number of nodes in the NodePool. Must be less than or equal to Maximum Node Count.",
"MaxNodeCount": "Maximum number of nodes in the NodePool. Must be greater than or equal to Maximum Node Count.",
"LocationPolicy": "Select the Location Policy.\n1. **BALANCED:** autoscaler tries to spread the nodes equally among zones.\n2. **ANY:** autoscaler prioritize utilization of unused reservations and to account for current resource availability constraints.\n",
"ImageType": "Select the Image Type.",
"DiscType": "Select the Disk Type.\n1. **Standard:** Suitable for large data processing workloads that primarily use sequential I/Os.\n2. **Balanced:** This disk type offers performance levels suitable for most general-purpose applications at a price point between that of standard and performance (pd-ssd) persistent disks.\n3. **SSD:** Suitable for enterprise applications and high-performance databases that require lower latency and more IOPS than standard persistent disks provide.\n",
"DiscSizeGb": "Specify Boot Disk Type in GB per node.",
"Spot": "When specified, the node pool will provision Spot instances. Spot Instances are ideal for fault-tolerant workloads and may be terminated at any time.",
"cgroupMode": "Select the Cgroup Policy. Defaults to `Cgroupv2`.",
"LoggingConfig": "Select the logging config parameter for specifying the type of logging agent used in a node pool.\n1. **Default:** Select default logging variant.\n2. **Max Throughput:** Maximum logging throughput variant.\n",
"Tags": "Enter the Network Tags. Multiple Network tags can be specified.",
"AutoRepair": "Whether or not the nodes will be automatically repaired.",
"Sysctls": "Specify Linux Node Sysctl. For Sysctl Configuration options, click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/node-system-config#sysctl-options).\nRefer example here.\n```js\n{\n \"net.core.somaxconn\": \"2048\",\n \"net.ipv4.tcp_rmem\": \"4096 87380 6291456\"\n}\n```\n",
"ResourceLabels": "Labels are applied to all nodes.\n```js\n {\n \"key\" : \"value\"\n }\n ```\n",
"Metadata": "Configure Compute Engine instance metadata.\n ```js\n {\n \"key\" : \"value\"\n }\n ```\n",
"UpdateStrategy": "Select Upgrade Strategy. Defaults to Surge upgrade. For more details, click [here](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-upgrades?&_ga=2.113758275.-535098261.1654188041#surge).\n",
"MaxSurge": "Max surge is the maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process.",
"MaxUnavailable": "Max unavailable is the maximum number of nodes that can be simultaneously unavailable during the upgrade process. A node is considered available if its status is Ready.",
"AllocationTags": "Allocation tags is the simplest way to constraint containers/pods with hosts/nodes. DuploCloud/Kubernetes Orchestrator will make sure containers will run on the hosts having same allocation tags.",
"AutoUpgrade": "Enable node auto-upgrade for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes.",
"TotalMinNodeCount": "Specify minimum number of all nodes.",
"TotalMaxNodeCount": "Specify maximum number of all nodes.",
"NodePoolSoakDuration": "Time needed after draining entire blue pool. After this period, blue pool will be cleaned up. A duration in seconds with up to nine fractional digits, ending with 's'. Example- `3.5s`",
"BatchNodeCount": "Number of blue nodes to drain in a batch. Only one of the batch_percentage or batch_node_count can be specified.",
"BatchPercentage": "Percentage of the bool pool nodes to drain in a batch. The range of this field should be (0.0, 1.0). Only one of the batch_percentage or batch_node_count can be specified.",
"BatchSoakDuration": "Enter Soak time after each batch gets drained. A duration in seconds with up to nine fractional digits, ending with 's'. Example- `3.5s`."
},
"emrserverlsess_add_runjob_sub_form_app_config": {
"ApplicationConfiguration": "Specify job configurations to override the default configurations for your applications.\n 1. **Spark:** \n```js\n[{\n \"Classification\": \"spark-defaults\",\n \"Configurations\": [],\n \"Properties\": {\n \"spark.driver.cores\": \"2\",\n \"spark.driver.memory\": \"4g\",\n\t\t\t \"spark.dynamicAllocation.minExecutors\":\"1\"\n }\n }]\n```\n2. **Hive:** \n```js\n[\n {\n \"Classification\": \"hive-site\",\n \"Configurations\": [],\n \"Properties\": {\n \"hive.driver.cores\": \"2\",\n \"hive.driver.memory\": \"4g\",\n \"hive.tez.container.size\": \"8192\",\n \"hive.tez.cpu.vcores\": \"4\"\n }\n }\n ]\n```\n"
},
"emrserverlsess_add_runjob_sub_form_basics": {
"ApplicationName": "EMR Serverless Application Name.",
"Type": "EMR Serverless Application Architecture type.",
"Name": "Enter Application Run Job Name.",
"ApplicationId": "Application Id",
"ExecutionTimeoutMinutes": "Specify execution time in minutes."
},
"AddEcache": {
"CacheType": "Select the cache engine to be used for this cache cluster.",
"Name": "Enter Cluster Identifier.",
"EngineVersion": "Select version number of the cache engine to be used. If not set, defaults to the latest version.",
"Size": "Select the NodeType.",
"Replicas": "Specify the initial number of cache nodes that the cache cluster will have.",
"EnableEncryptionAtTransit": "Select if Encryption At Transit is needed.",
"ParameterGroupName": "Specify the name of the parameter group to associate with this cache cluster.",
"SnapshotArnsInput": "Specify the ARN of a Redis RDB snapshot file stored in Amazon S3. Example- `arn:aws:s3:::s3-backup-foldername/backupobject.rdb`",
"ClusteringEnabled": "Enable to create Redis in Cluster mode.",
"NoOfShards": "Specify number of Shards for Cluster.",
"SnapshotName": "Select the snapshot/backup you want to use for creating redis.",
"SnapshotRetentionLimit": "Specify retention limit in days. Accepted values - 1-35.",
"Kms": "Select KMS Key.",
"LogDeliveryConfiguration": "Enables exporting engine logs and slow logs to Amazon CloudWatch Logs. Select `CloudWatch` to configure."
},
"AddDynamoDB": {
"Name": "Specify unique name of the table.",
"PrimaryKeyName": "Specify the primary key of the table.",
"Type": "**HASH** - Select to use as the hash (partition) key.\n**Range** - Select to use as the range (sort) key.\n",
"Size": "Select the attribute. Data types allowed for primary key attributes are string or number.",
"SortKeyName": "SortKey is applicable for range attribute. Stores item in sorted order by the sort key value.",
"SortKeyType": "Select key type. Type supported is String or Number."
},
"cert": {
"SetAsActive": "Enable to create to Certificate in Active state."
},
"cwrule-target-lambda-add": {
"Lambda Function Name": "Select the Lambda function."
},
"GenericSideBar": {
"AddSteps": "Jobs to be executed on cluster. Please update s3 and py file.\n```js\n[\n{\n\"ActionOnFailure\" : \"CONTINUE\",\n\"Name\" : \"sparkstepTest\",\n\"HadoopJarStep\" : {\n\"Jar\" : \"command-runner.jar\",\n\"Args\" : [\n \"spark-submit\",\n \"s3://YOUR-S3-FOLDER/script3.py\"\n]\n}\n}\n]\n```",
"ManagedScaling": "ManagedScalingPolicy example.\n```js\n{\n\"ComputeLimits\" : {\n\"UnitType\" : \"Instances\",\n\"MinimumCapacityUnits\" : 2,\n\"MaximumCapacityUnits\" : 5,\n\"MaximumOnDemandCapacityUnits\" : 5,\n\"MaximumCoreCapacityUnits\" : 3\n}\n}\n```"
},
"lambda-add-layer": {
"SelectLayer": "Select the Layer. The list shows the layers compatible to the runtime of the function. Applicable for only `.zip` package type.",
"SelectVersion": "Select the version of the layer to use in the function."
},
"AddIngress": {
"name": "Name for your ingress object.",
"ingressClassName": "Select ingress controller name.",
"dNSPrefix": "Provide the DNS prefix to expose services using Route53 domain.",
"visibility": "Visibility can be used to manage the accessibility of services exposed by Ingress load balancer.\n*Internal Only*: Services will be accessible within tenant and to other tenants only if allowed by security rules.\n*Public*: Services will be accessible over internet.\n",
"certificateArn": "Select certificate ARN to expose services over HTTPS.",
"ingressRules": "Ingress rules specifications as documented [here](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules)\nClick on the `Add Rule` button to add the rules for the kubernetes services created using DuploCloud.\n```\n",
"ingressAnnotations": "List of Key Value pairs to annotate the Ingress Object. \nAnnotations are used for controlling the behavior of the services exposed by the Ingress. \nEach ingress controller defines the set of annotations which can be used to control the behavior.\nRefer these links: \n *AWS ALB ingress controller* [here](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/)\n *Azure Application Gateway Ingress* [here](https://azure.github.io/application-gateway-kubernetes-ingress/annotations/#list-of-supported-annotations)\n",
"httpPort": "HTTP Listener Port. If you dont want to exposed your services over HTTP, make it blank.",
"httpsPort": "HTTPS Listener Port. HTTPS Listener port can be specified only when SSL certificate ARN is specified.",
"ingressLabels": "Key Value pair of labels to be added to SecretProviderClass\nSample Value Can be as bellow\n```yaml\nkey1: value1\nkey2: value2\n```\n",
"portOverride": "Select port to override. This field allows to configure frontend listener to use different ports other than 80/443 for http/https.",
"targetType": "# Specifies how to route traffic to pods. You can choose between `instance` and `ip`.\n\n1. **instance**\nThis mode will route traffic to all ec2 instances within cluster on NodePort opened for your service. Service must be of type `NodePort` or `LoadBalancer` to use instance mode.\n\n2. **ip**\nThis mode will route traffic directly to the pod IP. Network plugin must use secondary IP addresses on ENI for pod IP to use ip mode. e.g. amazon-vpc-cni-k8s. \nService can be of any type like `ClusterIP` or `NodePort` or `LoadBalancer` to use instance mode.\nIP mode is required for sticky sessions to work with Application Load Balancers. \n"
},
"createSnapshot": {
"SnapshotName": "Specify the name for snapshot."
},
"AddAwsSecrets": {
"Name": "The name of the new secret.",
"SecretValueType": "Secret type.\n1. **JSON Key/Value pairs:** Provide your secret information, such as credentials and connection details, as key/value pairs.\n2. **Plain text:** You can choose the `Plain text` option to store your secret in plaintext string.\n",
"KeyValsObject": "Key Value pair\n```yaml\n{\n \"username\": \"USER\",\n \"password\": \"EXAMPLE-PASSWORD\"\n}\n```",
"Value": "Plain Text String."
},
"ecsservice": {
"Name": "The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.",
"vcpus": "Task definition with revision.",
"Replicas": "ECS makes sure a specified number of replicas run across your cluster.",
"DnsPrfx": "Prefix which will get added to the base domain registered in the plan for this tenant. If not specified default value will be `<service-name>-<tenant-name>`",
"HealthCheckGracePeriodSeconds": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. \nThis is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of 0 is used.\n",
"OldTaskDefinitionBufferSize": "Old task definitions buffer size. This can be used to limit the size the stale task definitions in buffer. Default value is 10.",
"CapacityProvider": "Add a capacity provider to the custom capacity provider strategy for the cluster. Add a capacity provider to the custom capacity provider strategy for the cluster. If there are no existing capacity providers, create a new capacity provider from the capacity providers tab on the cluster details screen."
},
"addEmrStudioSidebar": {
"Name": "EMR Studio.",
"Description": "A detailed description of the Amazon EMR Studio.",
"S3Bucket": "Select S3 for backup.",
"S3Folder": "Specify folder name in S3."
},
"AddS3Bucket": {
"Name": "Specify name of the bucket.",
"InTenantRegion": "Select the AWS region.",
"s3ObjectLock": "Enable to configure S3 Object Lock. When you create a bucket with S3 Object Lock enabled, Amazon S3 automatically enables versioning for the bucket.",
"enableBucketVersioning": "Enable to configure S3 versioning."
},
"batchEnv": {
"OtherConfigurations": "All Other Create Compute Environment Request parameters as documented [here](https://docs.aws.amazon.com/batch/latest/APIReference/API_CreateComputeEnvironment.html).\nSample Value for customized EC2 configuration.\n```js\n{\n \"ComputeResources\": {\n \"Ec2Configuration\": [\n {\n \"ImageType\": \"ECS_AL2_NVIDIA\"\n }\n ]\n }\n}\n```"
},
"AddSbPrivateEndpoint": {
"Name": "Specify Private EndPoint name.",
"SubnetId": "Select Subnet."
},
"AddSQS": {
"Name": "The name of the queue.",
"QueueType": "Select AWS supported standard or FIFO queues.",
"MessageRetentionPeriod": "The number of seconds Amazon SQS retains a message, from 60 (1 minute) to 1209600 (14 days).",
"VisibilityTimeout": "The visibility timeout for the queue in seconds. Inputs allowed from 0 to 43200 (12 hours).",
"ContentBasedDuplication": "Enables content-based deduplication for FIFO queues.",
"DuplicationScope": "Specifies whether message deduplication occurs at the message group or queue level.",
"FIFOthroughput": "Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group.",
"DelaySeconds": "Set the Delivery Delay when sending a message to a queue in seconds. Default is `0`. Maximum delay can be set is of 15 minutes (`900` seconds)."
},
"AddEC2Host": {
"Name": "Friendly name for host. User provided name will be appended by prefix as \"duploservice-<tenant_name>-\".",
"Zone": "AWS Availability Zone to create an EC2 instance.\n1. **Automatic:** Select this option to automatically assign the availability zone to EC2 host. DuploCloud will automatically assign a zone with subnet having most available IPv4 addresses.\n",
"InstanceType": "Select an instance type that meets your computing, memory, networking, or storage needs. Select type as \"Other\" if you don't see desired option in the dropdown.",
"InstanceCount": "Desired capacity for the autoscaling group.",
"minInstanceCount": "Minimum Instance Count. Autoscaling group will make sure that total no of instance will always be greater than or equal to min count.",
"maxInstanceCount": "Maximum Instance Count. Autoscaling group will make sure that total no of instance will always be less than or equal to max count.",
"IsClusterAutoscaled": "Check this when you want kubernetes cluster autoscaler to manage this cluster auto scaling.",
"allocationTags": "Allocation tags is the simplest way to constraint containers/pods with hosts/nodes. DuploCloud/Kubernetes Orchestrator will make sure containers will run on the hosts having same allocation tags.",
"diskSize": "EBS volume size in GB. If not specified volume size will be same as defined the AMI.",
"agentPlatform": "Select container orchestration platform.\n1. **Linux Docker/Native:** Select this option if you want to run docker native services which are Linux based.\n2. **Windows Docker/Native:** Select this option if you want to run docker native services which are Windows based.\n3. **EKS Linux:** Select this options if you want to run services on the Kubernetes Cluster.\n4. **None:** This option has to be selected when EC2 instance is not used for running containers.\n",
"ImageId": "AMI id for the EC2 instance. AMI should be compatible with the agent platform. Select type as \"Other\" if you don't see desired option in dropdown.",
"blockEBSOptimization": "Set this to enable block EBS optimization.",
"enableHibernation": "Hibernation stops your instance and saves the contents of the instance\u2019s RAM to the root volume. You cannot enable hibernation after EC2 host is launch.",
"metaDataServiceFlag": "Select `Disabled` to turn off access to instance metadata. Otherwise you can set `V1 and V2`, or just `V2`. If you do not specify a value, the default is V2 only.",
"base64": "Base64 encoded user data. On Linux machine you can encode script file using command ```cat <filepath> | base64 -w 0 ```.",
"tags": "Tags to be added to ec2 instance. Format for adding tags is as below.\n 1. **EKS Linux:** Platform\n```js\n{\n \"key\" : \"value\"\n}\n```\n2. **Linux Docker/Native:** Platform\n`\"key=value, key1=value1\"`\n",
"volumes": "Array of extra block devices in json format as below.\n```js\n[\n {\n \"Name\":\"/dev/sda1\", \n \"VolumeType\":\"gp2\", \n \"Size\":\"100\",\n \"DeleteOnTermination\": \"true\"\n }\n]\n```\n",
"nwInterfaces": "Extra network interfaces to be attached to the ec2 host in a JSON format as below.\n```js\n[\n {\n \"NetworkInterfaceId\": \"eni-095827b411091db43\",\n \"DeviceIndex\": 0\n },\n {\n \"NetworkInterfaceId\": \"eni-0df26c4b283cde675\",\n \"DeviceIndex\": 1\n }\n] \n```\n",
"DedicatedHostId": "Specify the Dedicated Host ID. This ID is used to launch an instance onto specified host. Example- `h-0c6ab6f38bdcb24f6`.",
"useSpotInstancesCheck1": "Enable to launch hosts using Spot Instances.",
"maximumSpotPrice": "(Optional) If not specified, Default price would be referred.\nIf user wants to set, specify price in dollars, example- `0.0245`. Refer Spot Instance pricing [here](https://aws.amazon.com/ec2/spot/pricing/).\n",
"canScaleFromZero": "Enable the Scale From Zero (BETA) feature so DuploCloud can scale up the initial host in the ASG whenever it detects that an otherwise unschedulable pod would be able to run on such a host."
},
"AddK8sHelmRepo": {
"Name": "Specify name of the Helm Repository.",
"URL": "Specify Helm Repository URL. example- `https://helm.github.io/examples`."
},
"AddPVC": {
"name": "Name for Persistent Volume Claim.",
"storageClassName": "Storage Class Name for the Persistent Volume Claim to be created.",
"volumeName": "Provide Volume Name to claim existing PV.",
"volumeMode": "Kubernetes supports two volumeModes of PersistentVolumes: `Filesystem` and `Block`. \nVolume Mode is an optional API parameter. `Filesystem` is the default mode used when volumeMode parameter is omitted. \nA volume with volumeMode: `Filesystem` is mounted into Pods into a directory. If the volume is backed by a block device and the device is empty, Kubernetes creates a filesystem on the device before mounting it for the first time. \nYou can set the value of volumeMode to `Block` to use a volume as a raw block device. Such volume is presented into a Pod as a block device, without any filesystem on it. This mode is useful to provide a Pod the fastest possible way to access a volume, without any filesystem layer between the Pod and the volume. \n",
"accessModes": "A PersistentVolume can be mounted on a host in any way supported by the resource provider. As shown below, providers will have different capabilities and each PV's access modes are set to the specific modes supported by that particular volume. For example, NFS can support multiple read/write clients, but a specific NFS PV might be exported on the server as read-only. Each PV gets its own set of access modes describing that specific PV's capabilities.\nThe access modes are:\n1. **ReadWriteOnce**\nthe volume can be mounted as read-write by a single node. ReadWriteOnce access mode still can allow multiple pods to access the volume when the pods are running on the same node.\n2. **ReadOnlyMany**\nthe volume can be mounted as read-only by many nodes.\n3. **ReadWriteMany**\nthe volume can be mounted as read-write by many nodes.\n4. **ReadWriteOncePod**\nthe volume can be mounted as read-write by a single Pod. Use ReadWriteOncePod access mode if you want to ensure that only one pod across whole cluster can read that PVC or write to it. This is only supported for CSI volumes and Kubernetes version 1.22+.\n",
"resources": "Claims, like Pods, can request specific quantities of a resource. In this case, the request is for storage. The same **[resource model]**(https://github.com/kubernetes/design-proposals-archive/blob/main/scheduling/resources.md) applies to both volumes and claims.\nSample Value\n```yml\nrequests:\n storage: 10Gi\n```\n",
"pvcAnnotations": "Kubernetes annotations in key value format. Sample value is like below\n```yml\nkey1: value1\nkey2: value2\n```\n",
"pvcLabels": "Kubernetes labels in key value format. Sample value is like below\n```yml\nkey1: value1\nkey2: value2\n```"
}
}
}
}