-
Notifications
You must be signed in to change notification settings - Fork 27
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Setup Initial Jenkins Git Structure #744
Changes from all commits
7e6f30e
14ad3a9
2fea84a
dcf9096
a63765e
ab555d6
97dd360
dd676fd
b34da81
3a9d6fc
f5226c1
6c5c2c4
1a98a1c
708df68
3f6b900
1abb6a4
24c0a37
63eae8a
81563a1
ff62416
3c7ad9d
ff1d9ac
13865ac
958fe73
6d5a519
94071fc
ca798d5
b08cdc1
32dd6f7
9b5993a
8a3013c
02d9551
93563ed
fc62873
14c4cee
cc11974
2ca87d0
fd9ba8d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,8 +1,53 @@ | ||
from console_link.models.cluster import Cluster | ||
from console_link.models.cluster import Cluster, HttpMethod | ||
from dataclasses import dataclass | ||
import logging | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
||
@dataclass | ||
class ConnectionResult: | ||
connection_message: str | ||
connection_established: bool | ||
cluster_version: str | ||
|
||
|
||
def cat_indices(cluster: Cluster, as_json=False): | ||
as_json_suffix = "?format=json" if as_json else "" | ||
as_json_suffix = "?format=json" if as_json else "?v" | ||
cat_indices_path = f"/_cat/indices{as_json_suffix}" | ||
r = cluster.call_api(cat_indices_path) | ||
return r.json() if as_json else r.content | ||
|
||
|
||
def connection_check(cluster: Cluster) -> ConnectionResult: | ||
cluster_details_path = "/" | ||
caught_exception = None | ||
r = None | ||
try: | ||
r = cluster.call_api(cluster_details_path, timeout=3) | ||
except Exception as e: | ||
caught_exception = e | ||
logging.debug(f"Unable to access cluster: {cluster} with exception: {e}") | ||
if caught_exception is None: | ||
response_json = r.json() | ||
return ConnectionResult(connection_message="Successfully connected!", | ||
Check warning on line 33 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py Codecov / codecov/patchTrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py#L23-L33
|
||
connection_established=True, | ||
cluster_version=response_json['version']['number']) | ||
else: | ||
return ConnectionResult(connection_message=f"Unable to connect to cluster with error: {caught_exception}", | ||
Check warning on line 37 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py Codecov / codecov/patchTrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py#L37
|
||
connection_established=False, | ||
cluster_version=None) | ||
|
||
|
||
def run_test_benchmarks(cluster: Cluster): | ||
cluster.execute_benchmark_workload(workload="geonames") | ||
cluster.execute_benchmark_workload(workload="http_logs") | ||
cluster.execute_benchmark_workload(workload="nested") | ||
cluster.execute_benchmark_workload(workload="nyc_taxis") | ||
Check warning on line 46 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py Codecov / codecov/patchTrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py#L43-L46
|
||
|
||
|
||
# As a default we exclude system indices and searchguard indices | ||
def clear_indices(cluster: Cluster): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this used anywhere? It's definitely very helpful for our testing, but I think we should put it behind a verification step where the user explicitly acknowledges that they're going to delete all the data in their cluster. (helper for that https://click.palletsprojects.com/en/8.1.x/prompts/#confirmation-prompts) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. potentially even do something like only create a CLI command for it (or at least only enable it) if there's an env variable like There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Went ahead and added this as a CLI option, with a confirmation step added to it, thanks for the link |
||
clear_indices_path = "/*,-.*,-searchguard*,-sg7*" | ||
r = cluster.call_api(clear_indices_path, method=HttpMethod.DELETE) | ||
return r.content | ||
Check warning on line 53 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py Codecov / codecov/patchTrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py#L51-L53
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,6 +4,7 @@ | |
from requests.auth import HTTPBasicAuth | ||
from cerberus import Validator | ||
import logging | ||
import subprocess | ||
from console_link.models.schema_tools import contains_one_of | ||
|
||
requests.packages.urllib3.disable_warnings() # ignore: type | ||
|
@@ -84,7 +85,7 @@ | |
elif 'sigv4' in config: | ||
self.auth_type = AuthMethod.SIGV4 | ||
|
||
def call_api(self, path, method: HttpMethod = HttpMethod.GET) -> requests.Response: | ||
def call_api(self, path, method: HttpMethod = HttpMethod.GET, timeout=None) -> requests.Response: | ||
""" | ||
Calls an API on the cluster. | ||
""" | ||
|
@@ -105,7 +106,30 @@ | |
f"{self.endpoint}{path}", | ||
verify=(not self.allow_insecure), | ||
auth=auth, | ||
timeout=timeout | ||
) | ||
logger.debug(f"Cluster API call request: {r.request}") | ||
r.raise_for_status() | ||
return r | ||
|
||
def execute_benchmark_workload(self, workload: str, | ||
workload_params='target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,' | ||
'search_clients:1'): | ||
client_options = "" | ||
if not self.allow_insecure: | ||
client_options += "use_ssl:true,verify_certs:false" | ||
if self.auth_type == AuthMethod.BASIC_AUTH: | ||
if self.auth_details['password'] is not None: | ||
client_options += (f"basic_auth_user:{self.auth_details['username']}," | ||
Check warning on line 123 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py Codecov / codecov/patchTrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py#L118-L123
|
||
f"basic_auth_password:{self.auth_details['password']}") | ||
else: | ||
raise NotImplementedError(f"Auth type {self.auth_type} with AWS Secret ARN is not currently support " | ||
Check warning on line 126 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py Codecov / codecov/patchTrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py#L126
|
||
f"for executing benchmark workloads") | ||
elif self.auth_type == AuthMethod.SIGV4: | ||
raise NotImplementedError(f"Auth type {self.auth_type} is not currently support for executing " | ||
Check warning on line 129 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py Codecov / codecov/patchTrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py#L128-L129
|
||
f"benchmark workloads") | ||
logger.info(f"Running opensearch-benchmark with '{workload}' workload") | ||
subprocess.run(f"opensearch-benchmark execute-test --distribution-version=1.0.0 " | ||
Check warning on line 132 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py Codecov / codecov/patchTrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py#L131-L132
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do the benchmark outputs print to stdout or are they captured here? Kind of nice to show the user that something's happening, so I think I'm moderately in favor of not-capturing, but don't feel super strongly. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right now they are printing to stdout which I kinda like as well. I'm open to change that though if we decide we don't like it |
||
f"--target-host={self.endpoint} --workload={workload} --pipeline=benchmark-only --test-mode " | ||
f"--kill-running-processes --workload-params={workload_params} " | ||
f"--client-options={client_options}", shell=True) |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -51,6 +51,44 @@ def test_cli_cluster_cat_indices(runner, env, mocker): | |
mock.assert_called() | ||
|
||
|
||
def test_cli_cluster_connection_check(runner, env, mocker): | ||
mock = mocker.patch('console_link.logic.clusters.connection_check') | ||
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'connection-check'], | ||
catch_exceptions=True) | ||
# Should have been called two times. | ||
assert result.exit_code == 0 | ||
assert 'SOURCE CLUSTER' in result.output | ||
assert 'TARGET CLUSTER' in result.output | ||
mock.assert_called() | ||
|
||
|
||
def test_cli_cluster_run_test_benchmarks(runner, env, mocker): | ||
mock = mocker.patch('console_link.logic.clusters.run_test_benchmarks') | ||
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'run-test-benchmarks'], | ||
catch_exceptions=True) | ||
mock.assert_called_once() | ||
assert result.exit_code == 0 | ||
|
||
|
||
def test_cli_cluster_clear_indices(runner, env, mocker): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can you do a version of this test without There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added 👍 |
||
mock = mocker.patch('console_link.logic.clusters.clear_indices') | ||
result = runner.invoke(cli, | ||
['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'clear-indices', | ||
'--cluster', 'source', '--acknowledge-risk'], | ||
catch_exceptions=True) | ||
mock.assert_called_once() | ||
assert result.exit_code == 0 | ||
|
||
|
||
def test_cli_cluster_clear_indices_no_acknowledge(runner, env, mocker): | ||
mock = mocker.patch('console_link.logic.clusters.clear_indices') | ||
runner.invoke(cli, | ||
['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'clear-indices', | ||
'--cluster', 'source'], | ||
catch_exceptions=True) | ||
assert not mock.called | ||
|
||
|
||
def test_cli_with_metrics_get_data(runner, env, mocker): | ||
mock = mocker.patch('console_link.models.metrics_source.PrometheusMetricsSource.get_metrics') | ||
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'metrics', 'list'], | ||
|
@@ -69,10 +107,10 @@ def test_cli_with_backfill_describe(runner, env, mocker): | |
|
||
def test_cli_snapshot_create(runner, env, mocker): | ||
mock = mocker.patch('console_link.logic.snapshot.create') | ||
|
||
# Set the mock return value | ||
mock.return_value = SnapshotStatus.COMPLETED, "Snapshot created successfully." | ||
|
||
# Test snapshot creation | ||
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'snapshot', 'create'], | ||
catch_exceptions=True) | ||
|
@@ -87,16 +125,16 @@ def test_cli_snapshot_create(runner, env, mocker): | |
@pytest.mark.skip(reason="Not implemented yet") | ||
def test_cli_snapshot_status(runner, env, mocker): | ||
mock = mocker.patch('console_link.logic.snapshot.status') | ||
|
||
# Set the mock return value | ||
mock.return_value = SnapshotStatus.COMPLETED, "Snapshot status: COMPLETED" | ||
|
||
# Test snapshot status | ||
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'snapshot', 'status'], | ||
catch_exceptions=True) | ||
assert result.exit_code == 0 | ||
assert "Snapshot status: COMPLETED" in result.output | ||
|
||
# Ensure the mocks were called | ||
mock.assert_called_once() | ||
|
||
|
@@ -124,7 +162,7 @@ def test_cli_cat_indices_e2e(runner, env): | |
text=target_cat_indices) | ||
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'cat-indices'], | ||
catch_exceptions=True) | ||
|
||
assert result.exit_code == 0 | ||
assert 'SOURCE CLUSTER' in result.output | ||
assert 'TARGET CLUSTER' in result.output | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,70 @@ | ||
// Note: | ||
// 1. We are using an existing common VPC that we provide through a 'vpcId' parameter on the pipeline for now until we move | ||
// to a proper Jenkins accounts and can create a setup without public subnets as well as request an extension to allow more than 5 VPCs per region | ||
// 2. There is a still a manual step needed on the EC2 source load balancer to replace its security group rule which allows all traffic (0.0.0.0/0) to | ||
// allow traffic for the relevant service security group. This needs a better story around accepting user security groups in our Migration CDK. | ||
|
||
def sourceContextId = 'source-single-node-ec2' | ||
def migrationContextId = 'migration-default' | ||
def gitUrl = 'https://github.com/opensearch-project/opensearch-migrations.git' | ||
def gitBranch = 'main' | ||
def stageId = 'aws-integ' | ||
def source_cdk_context = """ | ||
{ | ||
"source-single-node-ec2": { | ||
"suffix": "ec2-source-<STAGE>", | ||
"networkStackSuffix": "ec2-source-<STAGE>", | ||
"vpcId": "$vpcId", | ||
"distVersion": "7.10.2", | ||
"cidr": "12.0.0.0/16", | ||
"distributionUrl": "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.10.2-linux-x86_64.tar.gz", | ||
"captureProxyEnabled": true, | ||
"securityDisabled": true, | ||
"minDistribution": false, | ||
"cpuArch": "x64", | ||
"isInternal": true, | ||
"singleNodeCluster": true, | ||
"networkAvailabilityZones": 2, | ||
"dataNodeCount": 1, | ||
"managerNodeCount": 0, | ||
"serverAccessType": "ipv4", | ||
"restrictServerAccessTo": "0.0.0.0/0" | ||
} | ||
} | ||
""" | ||
def migration_cdk_context = """ | ||
{ | ||
"migration-default": { | ||
"stage": "<STAGE>", | ||
"vpcId": "$vpcId", | ||
"engineVersion": "OS_2.11", | ||
"domainName": "os-cluster-<STAGE>", | ||
"dataNodeCount": 2, | ||
"openAccessPolicyEnabled": true, | ||
"domainRemovalPolicy": "DESTROY", | ||
"artifactBucketRemovalPolicy": "DESTROY", | ||
"trafficReplayerExtraArgs": "--speedup-factor 10.0", | ||
"fetchMigrationEnabled": true, | ||
"reindexFromSnapshotServiceEnabled": true, | ||
"sourceClusterEndpoint": "<SOURCE_CLUSTER_ENDPOINT>", | ||
"dpPipelineTemplatePath": "../../../test/dp_pipeline_aws_integ.yaml", | ||
"migrationConsoleEnableOSI": true, | ||
"migrationAPIEnabled": true | ||
} | ||
} | ||
""" | ||
|
||
@Library("migrations-shared-lib@main")_ | ||
|
||
defaultIntegPipeline( | ||
sourceContext: source_cdk_context, | ||
migrationContext: migration_cdk_context, | ||
sourceContextId: sourceContextId, | ||
migrationContextId: migrationContextId, | ||
gitUrl: gitUrl, | ||
gitBranch: gitBranch, | ||
stageId: stageId | ||
//deployStep: { | ||
// echo 'Custom Test Step' | ||
//} | ||
Comment on lines
+67
to
+69
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. intentional? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I was trying to use it as a reference that steps could be replaced if needed |
||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
// Note: | ||
// 1. We are using an existing common VPC that we provide through a 'vpcId' parameter on the pipeline for now until we move | ||
// to a proper Jenkins accounts and can create a setup without public subnets as well as request an extension to allow more than 5 VPCs per region | ||
// 2. There is a still a manual step needed on the EC2 source load balancer to replace its security group rule which allows all traffic (0.0.0.0/0) to | ||
// allow traffic for the relevant service security group. This needs a better story around accepting user security groups in our Migration CDK. | ||
|
||
def sourceContextId = 'source-single-node-ec2' | ||
def migrationContextId = 'migration-rfs' | ||
def gitUrl = 'https://github.com/opensearch-project/opensearch-migrations.git' | ||
def gitBranch = 'main' | ||
def stageId = 'rfs-integ' | ||
def source_cdk_context = """ | ||
{ | ||
"source-single-node-ec2": { | ||
"suffix": "ec2-source-<STAGE>", | ||
"networkStackSuffix": "ec2-source-<STAGE>", | ||
"vpcId": "$vpcId", | ||
"distVersion": "7.10.2", | ||
"distributionUrl": "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.10.2-linux-x86_64.tar.gz", | ||
"captureProxyEnabled": false, | ||
"securityDisabled": true, | ||
"minDistribution": false, | ||
"cpuArch": "x64", | ||
"isInternal": true, | ||
"singleNodeCluster": true, | ||
"networkAvailabilityZones": 2, | ||
"dataNodeCount": 1, | ||
"managerNodeCount": 0, | ||
"serverAccessType": "ipv4", | ||
"restrictServerAccessTo": "0.0.0.0/0" | ||
} | ||
} | ||
""" | ||
def migration_cdk_context = """ | ||
{ | ||
"migration-rfs": { | ||
"stage": "<STAGE>", | ||
"vpcId": "$vpcId", | ||
"engineVersion": "OS_2.11", | ||
"domainName": "os-cluster-<STAGE>", | ||
"dataNodeCount": 2, | ||
"openAccessPolicyEnabled": true, | ||
"domainRemovalPolicy": "DESTROY", | ||
"artifactBucketRemovalPolicy": "DESTROY", | ||
"kafkaBrokerServiceEnabled": true, | ||
"trafficReplayerServiceEnabled": false, | ||
"reindexFromSnapshotServiceEnabled": true, | ||
"sourceClusterEndpoint": "<SOURCE_CLUSTER_ENDPOINT>" | ||
} | ||
} | ||
""" | ||
|
||
@Library("migrations-shared-lib@main")_ | ||
|
||
defaultIntegPipeline( | ||
sourceContext: source_cdk_context, | ||
migrationContext: migration_cdk_context, | ||
sourceContextId: sourceContextId, | ||
migrationContextId: migrationContextId, | ||
gitUrl: gitUrl, | ||
gitBranch: gitBranch, | ||
stageId: stageId, | ||
finishStep: { | ||
echo 'Skipping step for RFS' | ||
} | ||
) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
minor, but I'd also reiterate in these messages which cluster is being cleared.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I like it, added