Arc Postgres - Rename shards to workers

Co-authored-by: Brian Bergeron <brberger@microsoft.com>
This commit is contained in:
Brian Bergeron
2020-10-28 16:57:17 -07:00
committed by GitHub
parent e679d70a4b
commit d1b8c15e11
7 changed files with 12 additions and 147 deletions

View File

@@ -55,7 +55,10 @@ export class PostgresModel extends ResourceModel {
const cpuRequest = this._config.spec.scheduling?.default?.resources?.requests?.cpu;
const ramRequest = this._config.spec.scheduling?.default?.resources?.requests?.memory;
const storage = this._config.spec.storage?.data?.size;
const nodes = (this._config.spec.scale?.shards ?? 0) + 1; // An extra node for the coordinator
// scale.shards was renamed to scale.workers. Check both for backwards compatibility.
const scale = this._config.spec.scale;
const nodes = (scale?.workers ?? scale?.shards ?? 0) + 1; // An extra node for the coordinator
let configuration: string[] = [];
configuration.push(`${nodes} ${nodes > 1 ? loc.nodes : loc.node}`);

View File

@@ -1,2 +0,0 @@
/env
/__pycache__

View File

@@ -1,21 +0,0 @@
# Tests for deploying Arc resources via Jupyter notebook
## Prerequisites
- Python >= 3.6
- Pip package manager
- Azdata CLI installed and logged into an Arc controller
## Running the tests
### 1. (Optional, recommended) Create and activate a Python virtual environment
- `python -m venv env`
- `source env/bin/activate` (Linux)
- `env\Scripts\activate.bat` (Windows)
### 2. Upgrade pip
- `pip install --upgrade pip`
### 3. Install the dependencies
- `pip install -r requirements.txt`
### 4. Run the tests
- `pytest`

View File

@@ -1,2 +0,0 @@
pytest==5.3.5
notebook==6.0.3

View File

@@ -1,111 +0,0 @@
##---------------------------------------------------------------------------------------------
## Copyright (c) Microsoft Corporation. All rights reserved.
## Licensed under the Source EULA. See License.txt in the project root for license information.
##--------------------------------------------------------------------------------------------
import json
import nbformat
import os
import random
import string
import sys
import uuid
from nbconvert.preprocessors import ExecutePreprocessor
from subprocess import Popen, PIPE, TimeoutExpired
## Variables
notebook_path = '../../notebooks/arcDeployment/'
## Helper functions
def generate_name(prefix, length=8):
return (prefix + '-' + ''.join(
[random.choice(string.ascii_lowercase)
for n in range(length - len(prefix) - 1)]))
def clear_env():
for k in [k for k in os.environ.keys() if k.startswith('AZDATA_NB_VAR_')]:
del os.environ[k]
def azdata(commands, timeout=None, stdin=None):
commands.insert(0, "azdata")
print('Executing command: \n', ' '.join(commands))
proc = Popen(commands, stdin=PIPE if stdin is not None else None, stdout=PIPE, stderr=PIPE, shell=os.name=='nt')
try:
(stdout, stderr) = proc.communicate(input=stdin, timeout=timeout)
except TimeoutExpired:
# https://docs.python.org/3.5/library/subprocess.html#subprocess.Popen.communicate
# The child process is not killed if the timeout expires, so in order to
# cleanup properly we should kill the child process and finish communication.
proc.kill()
(stdout, stderr) = proc.communicate(timeout=timeout)
sys.stdout.buffer.write(stdout)
sys.stderr.buffer.write(stderr)
raise
sys.stdout.buffer.write(stdout)
if proc.returncode != 0:
raise Exception(stderr)
else:
sys.stderr.buffer.write(stderr)
return (stdout.decode(sys.stdout.encoding),
stderr.decode(sys.stderr.encoding))
## Tests
def test_postgres_create():
# Load the notebook
with open(notebook_path + 'deploy.postgres.existing.arc.ipynb') as f:
nb = nbformat.read(f, as_version=nbformat.NO_CONVERT)
name = generate_name('pg')
try:
# Setup the environment
os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_NAME'] = name
subscription = os.environ['AZDATA_NB_VAR_ARC_SUBSCRIPTION'] = str(uuid.uuid4())
resource_group = os.environ['AZDATA_NB_VAR_ARC_RESOURCE_GROUP_NAME'] = 'test'
namespace = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_NAMESPACE'] = 'default'
workers = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_WORKERS'] = '1'
service_type = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_SERVICE_TYPE'] = 'NodePort'
data_size = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_DATA_SIZE'] = '512'
port = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_PORT'] = '5431'
extensions = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_EXTENSIONS'] = 'pg_cron,postgis'
cpu_min = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_CPU_MIN'] = '1'
cpu_max = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_CPU_MAX'] = '2'
memory_min = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_MEMORY_MIN'] = '256'
memory_max = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_MEMORY_MAX'] = '1023'
backup_sizes = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_SIZES'] = '512,1023'
backup_full_interval = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_FULL_INTERVAL'] = '20'
backup_delta_interval = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_DELTA_INTERVAL'] = '10'
backup_retention_min = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_RETENTION_MIN'] = '1,1GB;2,2GB'
backup_retention_max = os.environ['AZDATA_NB_VAR_POSTGRES_SERVER_GROUP_BACKUP_RETENTION_MAX'] = '2,2GB;3,3GB'
# Execute the notebook that creates Postgres
ExecutePreprocessor(timeout=1200).preprocess(nb, {'metadata': {'path': notebook_path}})
# Verify that Postgres was created successfully
(out, _) = azdata(['postgres', 'server', 'show', '-n', name])
db = json.loads(out)
assert db['metadata']['name'] == name
assert db['metadata']['namespace'] == namespace
assert db['spec']['scale']['shards'] == int(workers)
assert db['spec']['service']['type'] == service_type
assert db['spec']['storage']['volumeSize'] == data_size + 'Mi'
assert db['spec']['service']['port'] == int(port)
assert [p['name'] for p in db['spec']['engine']['plugins']] == ['pg_cron' ,'postgis']
assert db['spec']['scheduling']['default']['resources']['requests']['cpu'] == cpu_min
assert db['spec']['scheduling']['default']['resources']['limits']['cpu'] == cpu_max
assert db['spec']['scheduling']['default']['resources']['requests']['memory'] == memory_min + 'Mi'
assert db['spec']['scheduling']['default']['resources']['limits']['memory'] == memory_max + 'Mi'
assert [t['storage']['volumeSize'] for t in db['spec']['backups']['tiers']] == [b + 'Mi' for b in backup_sizes.split(',')]
assert db['spec']['backups']['fullMinutes'] == int(backup_full_interval)
assert db['spec']['backups']['deltaMinutes'] == int(backup_delta_interval)
for i in range(len(db['spec']['backups']['tiers'])):
assert db['spec']['backups']['tiers'][i]['retention']['minimums'] == backup_retention_min.split(';')[i].split(',')
assert db['spec']['backups']['tiers'][i]['retention']['maximums'] == backup_retention_max.split(';')[i].split(',')
except Exception:
# Capture cell outputs to help with debugging
print([c['outputs'] for c in nb['cells'] if c.get('outputs')])
raise
finally:
clear_env()
azdata(['postgres', 'server', 'delete', '-n', name])

View File

@@ -387,17 +387,14 @@ export class PostgresComputeAndStoragePage extends DashboardPage {
}
private editWorkerNodeCount() {
let currentShards = this._postgresModel.config?.spec.scale.shards;
if (!currentShards) {
this.workerBox!.min = 0;
this.workerBox!.placeHolder = '';
} else {
this.workerBox!.min = currentShards;
this.workerBox!.placeHolder = currentShards!.toString();
}
// scale.shards was renamed to scale.workers. Check both for backwards compatibility.
let scale = this._postgresModel.config?.spec.scale;
let currentWorkers = scale?.workers ?? scale?.shards ?? 0;
this.workerBox!.min = currentWorkers;
this.workerBox!.placeHolder = currentWorkers.toString();
this.workerBox!.value = '';
this.saveArgs.workers = undefined;
}

View File

@@ -172,7 +172,8 @@ declare module 'azdata-ext' {
}
},
scale: {
shards: number // 1
shards: number, // 1 (shards was renamed to workers, kept here for backwards compatibility)
workers: number // 1
},
scheduling: {
default: {