diff --git a/extensions/arc/src/models/postgresModel.ts b/extensions/arc/src/models/postgresModel.ts index 09931200c5..ff22486af6 100644 --- a/extensions/arc/src/models/postgresModel.ts +++ b/extensions/arc/src/models/postgresModel.ts @@ -55,7 +55,10 @@ export class PostgresModel extends ResourceModel { const cpuRequest = this._config.spec.scheduling?.default?.resources?.requests?.cpu; const ramRequest = this._config.spec.scheduling?.default?.resources?.requests?.memory; const storage = this._config.spec.storage?.data?.size; - const nodes = (this._config.spec.scale?.shards ?? 0) + 1; // An extra node for the coordinator + + // scale.shards was renamed to scale.workers. Check both for backwards compatibility. + const scale = this._config.spec.scale; + const nodes = (scale?.workers ?? scale?.shards ?? 0) + 1; // An extra node for the coordinator let configuration: string[] = []; configuration.push(`${nodes} ${nodes > 1 ? loc.nodes : loc.node}`); diff --git a/extensions/arc/src/test/.gitignore b/extensions/arc/src/test/.gitignore deleted file mode 100644 index e9ab85b883..0000000000 --- a/extensions/arc/src/test/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/env -/__pycache__ diff --git a/extensions/arc/src/test/README.md b/extensions/arc/src/test/README.md deleted file mode 100644 index ac0513c1e3..0000000000 --- a/extensions/arc/src/test/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Tests for deploying Arc resources via Jupyter notebook - -## Prerequisites -- Python >= 3.6 -- Pip package manager -- Azdata CLI installed and logged into an Arc controller - -## Running the tests -### 1. (Optional, recommended) Create and activate a Python virtual environment -- `python -m venv env` -- `source env/bin/activate` (Linux) -- `env\Scripts\activate.bat` (Windows) - -### 2. Upgrade pip -- `pip install --upgrade pip` - -### 3. Install the dependencies -- `pip install -r requirements.txt` - -### 4. Run the tests -- `pytest` diff --git a/extensions/arc/src/test/requirements.txt b/extensions/arc/src/test/requirements.txt deleted file mode 100644 index 9a99b6a1cd..0000000000 --- a/extensions/arc/src/test/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -pytest==5.3.5 -notebook==6.0.3 diff --git a/extensions/arc/src/test/test_notebooks.py b/extensions/arc/src/test/test_notebooks.py deleted file mode 100644 index a561589e06..0000000000 --- a/extensions/arc/src/test/test_notebooks.py +++ /dev/null @@ -1,111 +0,0 @@ -##--------------------------------------------------------------------------------------------- -## Copyright (c) Microsoft Corporation. All rights reserved. -## Licensed under the Source EULA. See License.txt in the project root for license information. -##-------------------------------------------------------------------------------------------- - -import json -import nbformat -import os -import random -import string -import sys -import uuid -from nbconvert.preprocessors import ExecutePreprocessor -from subprocess import Popen, PIPE, TimeoutExpired - -## Variables -notebook_path = '../../notebooks/arcDeployment/' - -## Helper functions -def generate_name(prefix, length=8): - return (prefix + '-' + ''.join( - [random.choice(string.ascii_lowercase) - for n in range(length - len(prefix) - 1)])) - -def clear_env(): - for k in [k for k in os.environ.keys() if k.startswith('AZDATA_NB_VAR_')]: - del os.environ[k] - -def azdata(commands, timeout=None, stdin=None): - commands.insert(0, "azdata") - print('Executing command: \n', ' '.join(commands)) - proc = Popen(commands, stdin=PIPE if stdin is not None else None, stdout=PIPE, stderr=PIPE, shell=os.name=='nt') - try: - (stdout, stderr) = proc.communicate(input=stdin, timeout=timeout) - except TimeoutExpired: - # https://docs.python.org/3.5/library/subprocess.html#subprocess.Popen.communicate - # The child process is not killed if the timeout expires, so in order to - # cleanup properly we should kill the child process and finish communication. - proc.kill() - (stdout, stderr) = proc.communicate(timeout=timeout) - sys.stdout.buffer.write(stdout) - sys.stderr.buffer.write(stderr) - raise - - sys.stdout.buffer.write(stdout) - if proc.returncode != 0: - raise Exception(stderr) - else: - sys.stderr.buffer.write(stderr) - - return (stdout.decode(sys.stdout.encoding), - stderr.decode(sys.stderr.encoding)) - -## Tests -def test_postgres_create(): - # Load the notebook - with open(notebook_path + 'deploy.postgres.existing.arc.ipynb') as f: - nb = nbformat.read(f, as_version=nbformat.NO_CONVERT) - - name = generate_name('pg') - try: - # Setup the environment - os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_NAME'] = name - subscription = os.environ['AZDATA_NB_VAR_ARC_SUBSCRIPTION'] = str(uuid.uuid4()) - resource_group = os.environ['AZDATA_NB_VAR_ARC_RESOURCE_GROUP_NAME'] = 'test' - namespace = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_NAMESPACE'] = 'default' - workers = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_WORKERS'] = '1' - service_type = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_SERVICE_TYPE'] = 'NodePort' - data_size = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_DATA_SIZE'] = '512' - port = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_PORT'] = '5431' - extensions = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_EXTENSIONS'] = 'pg_cron,postgis' - cpu_min = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_CPU_MIN'] = '1' - cpu_max = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_CPU_MAX'] = '2' - memory_min = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_MEMORY_MIN'] = '256' - memory_max = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_MEMORY_MAX'] = '1023' - backup_sizes = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_SIZES'] = '512,1023' - backup_full_interval = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_FULL_INTERVAL'] = '20' - backup_delta_interval = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_DELTA_INTERVAL'] = '10' - backup_retention_min = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_RETENTION_MIN'] = '1,1GB;2,2GB' - backup_retention_max = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_RETENTION_MAX'] = '2,2GB;3,3GB' - - # Execute the notebook that creates Postgres - ExecutePreprocessor(timeout=1200).preprocess(nb, {'metadata': {'path': notebook_path}}) - - # Verify that Postgres was created successfully - (out, _) = azdata(['postgres', 'server', 'show', '-n', name]) - db = json.loads(out) - assert db['metadata']['name'] == name - assert db['metadata']['namespace'] == namespace - assert db['spec']['scale']['shards'] == int(workers) - assert db['spec']['service']['type'] == service_type - assert db['spec']['storage']['volumeSize'] == data_size + 'Mi' - assert db['spec']['service']['port'] == int(port) - assert [p['name'] for p in db['spec']['engine']['plugins']] == ['pg_cron' ,'postgis'] - assert db['spec']['scheduling']['default']['resources']['requests']['cpu'] == cpu_min - assert db['spec']['scheduling']['default']['resources']['limits']['cpu'] == cpu_max - assert db['spec']['scheduling']['default']['resources']['requests']['memory'] == memory_min + 'Mi' - assert db['spec']['scheduling']['default']['resources']['limits']['memory'] == memory_max + 'Mi' - assert [t['storage']['volumeSize'] for t in db['spec']['backups']['tiers']] == [b + 'Mi' for b in backup_sizes.split(',')] - assert db['spec']['backups']['fullMinutes'] == int(backup_full_interval) - assert db['spec']['backups']['deltaMinutes'] == int(backup_delta_interval) - for i in range(len(db['spec']['backups']['tiers'])): - assert db['spec']['backups']['tiers'][i]['retention']['minimums'] == backup_retention_min.split(';')[i].split(',') - assert db['spec']['backups']['tiers'][i]['retention']['maximums'] == backup_retention_max.split(';')[i].split(',') - except Exception: - # Capture cell outputs to help with debugging - print([c['outputs'] for c in nb['cells'] if c.get('outputs')]) - raise - finally: - clear_env() - azdata(['postgres', 'server', 'delete', '-n', name]) diff --git a/extensions/arc/src/ui/dashboards/postgres/postgresComputeAndStoragePage.ts b/extensions/arc/src/ui/dashboards/postgres/postgresComputeAndStoragePage.ts index 13ab79de93..6b6548920b 100644 --- a/extensions/arc/src/ui/dashboards/postgres/postgresComputeAndStoragePage.ts +++ b/extensions/arc/src/ui/dashboards/postgres/postgresComputeAndStoragePage.ts @@ -387,17 +387,14 @@ export class PostgresComputeAndStoragePage extends DashboardPage { } private editWorkerNodeCount() { - let currentShards = this._postgresModel.config?.spec.scale.shards; - - if (!currentShards) { - this.workerBox!.min = 0; - this.workerBox!.placeHolder = ''; - } else { - this.workerBox!.min = currentShards; - this.workerBox!.placeHolder = currentShards!.toString(); - } + // scale.shards was renamed to scale.workers. Check both for backwards compatibility. + let scale = this._postgresModel.config?.spec.scale; + let currentWorkers = scale?.workers ?? scale?.shards ?? 0; + this.workerBox!.min = currentWorkers; + this.workerBox!.placeHolder = currentWorkers.toString(); this.workerBox!.value = ''; + this.saveArgs.workers = undefined; } diff --git a/extensions/azdata/src/typings/azdata-ext.d.ts b/extensions/azdata/src/typings/azdata-ext.d.ts index 8eef9ced13..5f660793e9 100644 --- a/extensions/azdata/src/typings/azdata-ext.d.ts +++ b/extensions/azdata/src/typings/azdata-ext.d.ts @@ -172,7 +172,8 @@ declare module 'azdata-ext' { } }, scale: { - shards: number // 1 + shards: number, // 1 (shards was renamed to workers, kept here for backwards compatibility) + workers: number // 1 }, scheduling: { default: {