mirror of
https://github.com/ckaczor/azuredatastudio.git
synced 2026-01-14 01:25:37 -05:00
Arc updates for March release (#14970)
* Updated Postgres Spec for where to find engine version, removed calling calling -ev in edit commands (#14735) * Added spec.engine.version, took out calling engine version with edit calls * Added text wrong place * missed updates * PR fix * Update Arc Postgres troubleshooting notebook Co-authored-by: Brian Bergeron <brberger@microsoft.com> * Remove AzdataSession from azdata commands (#14856) * remove session * Add in controller-context support * Revert "Add in controller-context support" This reverts commit 3b39b968efbf6054041cb01cb2d8443532643a82. * Add azdataContext to login * Undo book change * Undo change correctly * Add controller context support (#14862) * remove session * Add in controller-context support * Add params to fake * Fix tests * Add info and placeholder for controller URL/name (#14887) * Add info and placeholder for controller URL * add period + update name * update memento and allow editing of namespace/URL * vBump * vBump * Fix tests Co-authored-by: nasc17 <69922333+nasc17@users.noreply.github.com> Co-authored-by: Brian Bergeron <brian.e.bergeron@gmail.com> Co-authored-by: Brian Bergeron <brberger@microsoft.com>
This commit is contained in:
@@ -1,2 +1 @@
|
||||
title: Azure Arc Data Services
|
||||
description: A collection of notebooks to support Azure Arc Data Services.
|
||||
title: Azure Arc Data Services
|
||||
@@ -1,12 +1,10 @@
|
||||
- title: Welcome
|
||||
url: /readme
|
||||
not_numbered: true
|
||||
- title: Search
|
||||
search: true
|
||||
- title: Postgres
|
||||
url: /postgres/readme
|
||||
not_numbered: true
|
||||
expand_sections: true
|
||||
sections:
|
||||
- title: TSG100 - The Azure Arc enabled PostgreSQL Hyperscale troubleshooter
|
||||
url: postgres/tsg100-troubleshoot-postgres
|
||||
- title: Postgres
|
||||
url: /postgres/readme
|
||||
not_numbered: true
|
||||
sections:
|
||||
- title: TSG100 - The Azure Arc enabled PostgreSQL Hyperscale troubleshooter
|
||||
url: postgres/tsg100-troubleshoot-postgres
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
- This chapter contains notebooks for troubleshooting Postgres on Azure Arc
|
||||
|
||||
## Notebooks in this Chapter
|
||||
- [TSG100 - The Azure Arc enabled PostgreSQL Hyperscale troubleshooter](tsg100-troubleshoot-postgres.ipynb)
|
||||
|
||||
|
||||
[Home](../readme.md)
|
||||
|
||||
## Notebooks in this Chapter
|
||||
|
||||
- [TSG100 - The Azure Arc enabled PostgreSQL Hyperscale troubleshooter](../postgres/tsg100-troubleshoot-postgres.ipynb)
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
- title: Postgres
|
||||
url: /postgres/readme
|
||||
not_numbered: true
|
||||
expand_sections: true
|
||||
sections:
|
||||
- title: TSG100 - The Azure Arc enabled PostgreSQL Hyperscale troubleshooter
|
||||
url: postgres/tsg100-troubleshoot-postgres
|
||||
@@ -2,7 +2,11 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"TSG100 - The Azure Arc enabled PostgreSQL Hyperscale troubleshooter\n",
|
||||
"===================================================================\n",
|
||||
@@ -35,14 +39,17 @@
|
||||
"# the user will be prompted to select a server.\n",
|
||||
"namespace = os.environ.get('POSTGRES_SERVER_NAMESPACE')\n",
|
||||
"name = os.environ.get('POSTGRES_SERVER_NAME')\n",
|
||||
"version = os.environ.get('POSTGRES_SERVER_VERSION')\n",
|
||||
"\n",
|
||||
"tail_lines = 50"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### Common functions\n",
|
||||
"\n",
|
||||
@@ -63,7 +70,6 @@
|
||||
"import sys\n",
|
||||
"import os\n",
|
||||
"import re\n",
|
||||
"import json\n",
|
||||
"import platform\n",
|
||||
"import shlex\n",
|
||||
"import shutil\n",
|
||||
@@ -76,11 +82,7 @@
|
||||
"error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help\n",
|
||||
"install_hint = {} # The SOP to help install the executable if it cannot be found\n",
|
||||
"\n",
|
||||
"first_run = True\n",
|
||||
"rules = None\n",
|
||||
"debug_logging = False\n",
|
||||
"\n",
|
||||
"def run(cmd, return_output=False, no_output=False, retry_count=0):\n",
|
||||
"def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):\n",
|
||||
" \"\"\"Run shell command, stream stdout, print stderr and optionally return output\n",
|
||||
"\n",
|
||||
" NOTES:\n",
|
||||
@@ -103,13 +105,6 @@
|
||||
" output = \"\"\n",
|
||||
" retry = False\n",
|
||||
"\n",
|
||||
" global first_run\n",
|
||||
" global rules\n",
|
||||
"\n",
|
||||
" if first_run:\n",
|
||||
" first_run = False\n",
|
||||
" rules = load_rules()\n",
|
||||
"\n",
|
||||
" # When running `azdata sql query` on Windows, replace any \\n in \"\"\" strings, with \" \", otherwise we see:\n",
|
||||
" #\n",
|
||||
" # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')\n",
|
||||
@@ -172,7 +167,12 @@
|
||||
" if which_binary == None:\n",
|
||||
" which_binary = shutil.which(cmd_actual[0])\n",
|
||||
"\n",
|
||||
" # Display an install HINT, so the user can click on a SOP to install the missing binary\n",
|
||||
" #\n",
|
||||
" if which_binary == None:\n",
|
||||
" print(f\"The path used to search for '{cmd_actual[0]}' was:\")\n",
|
||||
" print(sys.path)\n",
|
||||
"\n",
|
||||
" if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:\n",
|
||||
" display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))\n",
|
||||
"\n",
|
||||
@@ -219,8 +219,6 @@
|
||||
" break # otherwise infinite hang, have not worked out why yet.\n",
|
||||
" else:\n",
|
||||
" print(line, end='')\n",
|
||||
" if rules is not None:\n",
|
||||
" apply_expert_rules(line)\n",
|
||||
"\n",
|
||||
" if wait:\n",
|
||||
" p.wait()\n",
|
||||
@@ -276,25 +274,22 @@
|
||||
" if line_decoded.find(error_hint[0]) != -1:\n",
|
||||
" display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))\n",
|
||||
"\n",
|
||||
" # apply expert rules (to run follow-on notebooks), based on output\n",
|
||||
" #\n",
|
||||
" if rules is not None:\n",
|
||||
" apply_expert_rules(line_decoded)\n",
|
||||
"\n",
|
||||
" # Verify if a transient error, if so automatically retry (recursive)\n",
|
||||
" #\n",
|
||||
" if user_provided_exe_name in retry_hints:\n",
|
||||
" for retry_hint in retry_hints[user_provided_exe_name]:\n",
|
||||
" if line_decoded.find(retry_hint) != -1:\n",
|
||||
" if retry_count < MAX_RETRIES:\n",
|
||||
" if retry_count \u003c MAX_RETRIES:\n",
|
||||
" print(f\"RETRY: {retry_count} (due to: {retry_hint})\")\n",
|
||||
" retry_count = retry_count + 1\n",
|
||||
" output = run(cmd, return_output=return_output, retry_count=retry_count)\n",
|
||||
"\n",
|
||||
" if return_output:\n",
|
||||
" return output\n",
|
||||
" else:\n",
|
||||
" return\n",
|
||||
" if base64_decode:\n",
|
||||
" import base64\n",
|
||||
" return base64.b64decode(output).decode('utf-8')\n",
|
||||
" else:\n",
|
||||
" return output\n",
|
||||
"\n",
|
||||
" elapsed = datetime.datetime.now().replace(microsecond=0) - start_time\n",
|
||||
"\n",
|
||||
@@ -311,78 +306,31 @@
|
||||
" print(f'\\nSUCCESS: {elapsed}s elapsed.\\n')\n",
|
||||
"\n",
|
||||
" if return_output:\n",
|
||||
" return output\n",
|
||||
"\n",
|
||||
"def load_json(filename):\n",
|
||||
" \"\"\"Load a json file from disk and return the contents\"\"\"\n",
|
||||
"\n",
|
||||
" with open(filename, encoding=\"utf8\") as json_file:\n",
|
||||
" return json.load(json_file)\n",
|
||||
"\n",
|
||||
"def load_rules():\n",
|
||||
" \"\"\"Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable\"\"\"\n",
|
||||
"\n",
|
||||
" # Load this notebook as json to get access to the expert rules in the notebook metadata.\n",
|
||||
" #\n",
|
||||
" try:\n",
|
||||
" j = load_json(\"tsg100-troubleshoot-postgres.ipynb\")\n",
|
||||
" except:\n",
|
||||
" pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?\n",
|
||||
" else:\n",
|
||||
" if \"metadata\" in j and \\\n",
|
||||
" \"azdata\" in j[\"metadata\"] and \\\n",
|
||||
" \"expert\" in j[\"metadata\"][\"azdata\"] and \\\n",
|
||||
" \"expanded_rules\" in j[\"metadata\"][\"azdata\"][\"expert\"]:\n",
|
||||
"\n",
|
||||
" rules = j[\"metadata\"][\"azdata\"][\"expert\"][\"expanded_rules\"]\n",
|
||||
"\n",
|
||||
" rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.\n",
|
||||
"\n",
|
||||
" # print (f\"EXPERT: There are {len(rules)} rules to evaluate.\")\n",
|
||||
"\n",
|
||||
" return rules\n",
|
||||
"\n",
|
||||
"def apply_expert_rules(line):\n",
|
||||
" \"\"\"Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so\n",
|
||||
" inject a 'HINT' to the follow-on SOP/TSG to run\"\"\"\n",
|
||||
"\n",
|
||||
" global rules\n",
|
||||
"\n",
|
||||
" for rule in rules:\n",
|
||||
" notebook = rule[1]\n",
|
||||
" cell_type = rule[2]\n",
|
||||
" output_type = rule[3] # i.e. stream or error\n",
|
||||
" output_type_name = rule[4] # i.e. ename or name \n",
|
||||
" output_type_value = rule[5] # i.e. SystemExit or stdout\n",
|
||||
" details_name = rule[6] # i.e. evalue or text \n",
|
||||
" expression = rule[7].replace(\"\\\\*\", \"*\") # Something escaped *, and put a \\ in front of it!\n",
|
||||
"\n",
|
||||
" if debug_logging:\n",
|
||||
" print(f\"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.\")\n",
|
||||
"\n",
|
||||
" if re.match(expression, line, re.DOTALL):\n",
|
||||
"\n",
|
||||
" if debug_logging:\n",
|
||||
" print(\"EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'\".format(output_type_name, output_type_value, expression, notebook))\n",
|
||||
"\n",
|
||||
" match_found = True\n",
|
||||
"\n",
|
||||
" display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))\n",
|
||||
" if base64_decode:\n",
|
||||
" import base64\n",
|
||||
" return base64.b64decode(output).decode('utf-8')\n",
|
||||
" else:\n",
|
||||
" return output\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print('Common functions defined successfully.')\n",
|
||||
"\n",
|
||||
"# Hints for binary (transient fault) retry, (known) error and install guide\n",
|
||||
"# Hints for tool retry (on transient fault), known errors and install guide\n",
|
||||
"#\n",
|
||||
"retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}\n",
|
||||
"error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}\n",
|
||||
"install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}"
|
||||
"retry_hints = {}\n",
|
||||
"error_hints = {}\n",
|
||||
"install_hint = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print('Common functions defined successfully.')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### Get Postgres server"
|
||||
]
|
||||
@@ -400,10 +348,11 @@
|
||||
"# Sets the 'server' variable to the spec of the Postgres server\n",
|
||||
"\n",
|
||||
"import math\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"# If a server was provided, get it\n",
|
||||
"if namespace and name and version:\n",
|
||||
" server = json.loads(run(f'kubectl get postgresql-{version} -n {namespace} {name} -o json', return_output=True))\n",
|
||||
"if namespace and name:\n",
|
||||
" server = json.loads(run(f'kubectl get postgresqls -n {namespace} {name} -o json', return_output=True))\n",
|
||||
"else:\n",
|
||||
" # Otherwise prompt the user to select a server\n",
|
||||
" servers = json.loads(run(f'kubectl get postgresqls --all-namespaces -o json', return_output=True))['items']\n",
|
||||
@@ -415,19 +364,18 @@
|
||||
"\n",
|
||||
" pad = math.floor(math.log10(len(servers)) + 1) + 3\n",
|
||||
" for i, s in enumerate(servers):\n",
|
||||
" print(f'{f\"[{i+1}]\":<{pad}}{full_name(s)}')\n",
|
||||
" print(f'{f\"[{i+1}]\":\u003c{pad}}{full_name(s)}')\n",
|
||||
"\n",
|
||||
" while True:\n",
|
||||
" try:\n",
|
||||
" i = int(input('Enter the index of a server to troubleshoot: '))\n",
|
||||
" i = int(input('Enter the index of a server'))\n",
|
||||
" except ValueError:\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" if i >= 1 and i <= len(servers):\n",
|
||||
" if i \u003e= 1 and i \u003c= len(servers):\n",
|
||||
" server = servers[i-1]\n",
|
||||
" namespace = server['metadata']['namespace']\n",
|
||||
" name = server['metadata']['name']\n",
|
||||
" version = server['kind'][len('postgresql-'):]\n",
|
||||
" break\n",
|
||||
"\n",
|
||||
"display(Markdown(f'#### Got server {namespace}.{name}'))"
|
||||
@@ -435,7 +383,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### Summarize all resources"
|
||||
]
|
||||
@@ -443,13 +395,15 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"uid = server['metadata']['uid']\n",
|
||||
"\n",
|
||||
"display(Markdown(f'#### Server summary'))\n",
|
||||
"run(f'kubectl get postgresql-{version} -n {namespace} {name}')\n",
|
||||
"run(f'kubectl get postgresqls -n {namespace} {name}')\n",
|
||||
"\n",
|
||||
"display(Markdown(f'#### Resource summary'))\n",
|
||||
"run(f'kubectl get sts,pods,pvc,svc,ep -n {namespace} -l postgresqls.arcdata.microsoft.com/cluster-id={uid}')"
|
||||
@@ -457,7 +411,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### Troubleshoot the server"
|
||||
]
|
||||
@@ -465,16 +423,22 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display(Markdown(f'#### Troubleshooting server {namespace}.{name}'))\n",
|
||||
"run(f'kubectl describe postgresql-{version} -n {namespace} {name}')"
|
||||
"run(f'kubectl describe postgresqls -n {namespace} {name}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### Troubleshoot the pods"
|
||||
]
|
||||
@@ -482,7 +446,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pods = json.loads(run(f'kubectl get pods -n {namespace} -l postgresqls.arcdata.microsoft.com/cluster-id={uid} -o json', return_output=True))['items']\n",
|
||||
@@ -505,7 +471,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### Troubleshoot the containers"
|
||||
]
|
||||
@@ -513,7 +483,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Summarize and get logs from each container\n",
|
||||
@@ -521,7 +493,7 @@
|
||||
" pod_name = pod['metadata']['name']\n",
|
||||
" cons = pod['spec']['containers']\n",
|
||||
" con_statuses = pod['status'].get('containerStatuses', [])\n",
|
||||
" display(Markdown(f'#### Troubleshooting {len(cons)} container{\"\" if len(cons) < 2 else \"s\"} '\n",
|
||||
" display(Markdown(f'#### Troubleshooting {len(cons)} container{\"\" if len(cons) \u003c 2 else \"s\"} '\n",
|
||||
" f'containers for pod {namespace}.{pod_name}'))\n",
|
||||
"\n",
|
||||
" for i, con in enumerate(cons):\n",
|
||||
@@ -537,14 +509,18 @@
|
||||
" run(f'kubectl logs -n {namespace} {pod_name} {con_name} --tail {tail_lines}')\n",
|
||||
"\n",
|
||||
" # Get logs from the previous terminated container if one exists\n",
|
||||
" if con_restarts > 0:\n",
|
||||
" if con_restarts \u003e 0:\n",
|
||||
" display(Markdown(f'#### Logs from previous terminated container {namespace}.{pod_name}/{con_name}'))\n",
|
||||
" run(f'kubectl logs -n {namespace} {pod_name} {con_name} --tail {tail_lines} --previous')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### Troubleshoot the PersistentVolumeClaims"
|
||||
]
|
||||
@@ -552,7 +528,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display(Markdown(f'#### Troubleshooting PersistentVolumeClaims'))\n",
|
||||
@@ -562,10 +540,12 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('Notebook execution complete.')"
|
||||
"print(\"Notebook execution is complete.\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -576,20 +556,36 @@
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"azdata": {
|
||||
"pansop": {
|
||||
"related": "",
|
||||
"test": {
|
||||
"ci": false,
|
||||
"gci": false
|
||||
},
|
||||
"contract": {
|
||||
"requires": {
|
||||
"kubectl": {
|
||||
"installed": true
|
||||
}
|
||||
"strategy": "",
|
||||
"types": null,
|
||||
"disable": {
|
||||
"reason": "",
|
||||
"workitems": null,
|
||||
"types": null
|
||||
}
|
||||
},
|
||||
"side_effects": false
|
||||
}
|
||||
"target": {
|
||||
"current": "public",
|
||||
"final": "public"
|
||||
},
|
||||
"internal": {
|
||||
"parameters": null,
|
||||
"symlink": false
|
||||
},
|
||||
"timeout": "0"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": "{ Name: \"\", Version: \"\"}",
|
||||
"file_extension": "",
|
||||
"mimetype": "",
|
||||
"name": "",
|
||||
"nbconvert_exporter": "",
|
||||
"pygments_lexer": "",
|
||||
"version": ""
|
||||
},
|
||||
"widgets": []
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# Azure Arc Data Services Jupyter Book
|
||||
|
||||
## Chapters
|
||||
|
||||
1. [Postgres](postgres/readme.md) - notebooks for troubleshooting Postgres on Azure Arc.
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "arc",
|
||||
"displayName": "%arc.displayName%",
|
||||
"description": "%arc.description%",
|
||||
"version": "0.9.0",
|
||||
"version": "0.9.2",
|
||||
"publisher": "Microsoft",
|
||||
"preview": true,
|
||||
"license": "https://raw.githubusercontent.com/Microsoft/azuredatastudio/main/LICENSE.txt",
|
||||
|
||||
@@ -10,6 +10,7 @@ import * as loc from '../localizedConstants';
|
||||
import { throwUnless } from './utils';
|
||||
export interface KubeClusterContext {
|
||||
name: string;
|
||||
namespace?: string;
|
||||
isCurrentContext: boolean;
|
||||
}
|
||||
|
||||
@@ -18,7 +19,7 @@ export interface KubeClusterContext {
|
||||
*
|
||||
* @param configFile
|
||||
*/
|
||||
export function getKubeConfigClusterContexts(configFile: string): Promise<KubeClusterContext[]> {
|
||||
export function getKubeConfigClusterContexts(configFile: string): KubeClusterContext[] {
|
||||
const config: any = yamljs.load(configFile);
|
||||
const rawContexts = <any[]>config['contexts'];
|
||||
throwUnless(rawContexts && rawContexts.length, loc.noContextFound(configFile));
|
||||
@@ -26,16 +27,16 @@ export function getKubeConfigClusterContexts(configFile: string): Promise<KubeCl
|
||||
throwUnless(currentContext, loc.noCurrentContextFound(configFile));
|
||||
const contexts: KubeClusterContext[] = [];
|
||||
rawContexts.forEach(rawContext => {
|
||||
const name = <string>rawContext['name'];
|
||||
const name = rawContext.name as string;
|
||||
const namespace = rawContext.context.namespace as string;
|
||||
throwUnless(name, loc.noNameInContext(configFile));
|
||||
if (name) {
|
||||
contexts.push({
|
||||
name: name,
|
||||
isCurrentContext: name === currentContext
|
||||
});
|
||||
}
|
||||
contexts.push({
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
isCurrentContext: name === currentContext
|
||||
});
|
||||
});
|
||||
return Promise.resolve(contexts);
|
||||
return contexts;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -47,22 +48,23 @@ export function getKubeConfigClusterContexts(configFile: string): Promise<KubeCl
|
||||
*
|
||||
*
|
||||
* @param clusterContexts
|
||||
* @param previousClusterContext
|
||||
* @param previousClusterContextName
|
||||
* @param throwIfNotFound
|
||||
*/
|
||||
export function getCurrentClusterContext(clusterContexts: KubeClusterContext[], previousClusterContext?: string, throwIfNotFound: boolean = false): string {
|
||||
if (previousClusterContext) {
|
||||
if (clusterContexts.find(c => c.name === previousClusterContext)) { // if previous cluster context value is found in clusters then return that value
|
||||
export function getCurrentClusterContext(clusterContexts: KubeClusterContext[], previousClusterContextName?: string, throwIfNotFound: boolean = false): KubeClusterContext {
|
||||
if (previousClusterContextName) {
|
||||
const previousClusterContext = clusterContexts.find(c => c.name === previousClusterContextName);
|
||||
if (previousClusterContext) { // if previous cluster context value is found in clusters then return that value
|
||||
return previousClusterContext;
|
||||
} else {
|
||||
if (throwIfNotFound) {
|
||||
throw new Error(loc.clusterContextNotFound(previousClusterContext));
|
||||
throw new Error(loc.clusterContextNotFound(previousClusterContextName));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if not previousClusterContext or throwIfNotFound was false when previousCLusterContext was not found in the clusterContexts
|
||||
const currentClusterContext = clusterContexts.find(c => c.isCurrentContext)?.name;
|
||||
const currentClusterContext = clusterContexts.find(c => c.isCurrentContext);
|
||||
throwUnless(currentClusterContext !== undefined, loc.noCurrentClusterContext);
|
||||
return currentClusterContext;
|
||||
}
|
||||
|
||||
@@ -97,13 +97,18 @@ export function connectToMSSql(name: string): string { return localize('arc.conn
|
||||
export function connectToPGSql(name: string): string { return localize('arc.connectToPGSql', "Connect to PostgreSQL Hyperscale - Azure Arc ({0})", name); }
|
||||
export const passwordToController = localize('arc.passwordToController', "Provide Password to Controller");
|
||||
export const controllerUrl = localize('arc.controllerUrl', "Controller URL");
|
||||
export const controllerUrlPlaceholder = localize('arc.controllerUrlPlaceholder', "https://<IP or hostname>:<port>");
|
||||
export const controllerUrlDescription = localize('arc.controllerUrlDescription', "The Controller URL is necessary if there are multiple clusters with the same namespace - this should generally not be necessary.");
|
||||
export const serverEndpoint = localize('arc.serverEndpoint', "Server Endpoint");
|
||||
export const controllerName = localize('arc.controllerName', "Name");
|
||||
export const controllerNameDescription = localize('arc.controllerNameDescription', "The name to display in the tree view, this is not applied to the controller itself.");
|
||||
export const controllerKubeConfig = localize('arc.controllerKubeConfig', "Kube Config File Path");
|
||||
export const controllerClusterContext = localize('arc.controllerClusterContext', "Cluster Context");
|
||||
export const defaultControllerName = localize('arc.defaultControllerName', "arc-dc");
|
||||
export const postgresProviderName = localize('arc.postgresProviderName', "PGSQL");
|
||||
export const miaaProviderName = localize('arc.miaaProviderName', "MSSQL");
|
||||
export const controllerUsername = localize('arc.controllerUsername', "Controller Username");
|
||||
export const controllerPassword = localize('arc.controllerPassword', "Controller Password");
|
||||
export const username = localize('arc.username', "Username");
|
||||
export const password = localize('arc.password', "Password");
|
||||
export const rememberPassword = localize('arc.rememberPassword', "Remember Password");
|
||||
|
||||
@@ -46,6 +46,20 @@ export class ControllerModel {
|
||||
return this._info;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the controller context to use when executing azdata commands. This is in one of two forms :
|
||||
*
|
||||
* If no URL is specified for this controller then just the namespace is used (e.g. test-namespace)
|
||||
* If a URL is specified then a 3-part name is used, combining the namespace, username and URL separated by
|
||||
* / (e.g. test-namespace/admin/https://10.91.86.13:30080)
|
||||
*/
|
||||
public get controllerContext(): string {
|
||||
if (this._info.endpoint) {
|
||||
return `${this._info.namespace}/${this._info.username}/${this._info.endpoint}`;
|
||||
}
|
||||
return this._info.namespace;
|
||||
}
|
||||
|
||||
public set info(value: ControllerInfo) {
|
||||
this._info = value;
|
||||
this._onInfoUpdated.fire(this._info);
|
||||
@@ -63,10 +77,10 @@ export class ControllerModel {
|
||||
* calls from changing the context while commands for this session are being executed.
|
||||
* @param promptReconnect
|
||||
*/
|
||||
public async acquireAzdataSession(promptReconnect: boolean = false): Promise<azdataExt.AzdataSession> {
|
||||
public async login(promptReconnect: boolean = false): Promise<void> {
|
||||
let promptForValidClusterContext: boolean = false;
|
||||
try {
|
||||
const contexts = await getKubeConfigClusterContexts(this.info.kubeConfigFilePath);
|
||||
const contexts = getKubeConfigClusterContexts(this.info.kubeConfigFilePath);
|
||||
getCurrentClusterContext(contexts, this.info.kubeClusterContext, true); // this throws if this.info.kubeClusterContext is not found in 'contexts'
|
||||
} catch (error) {
|
||||
const response = await vscode.window.showErrorMessage(loc.clusterContextConfigNoLongerValid(this.info.kubeConfigFilePath, this.info.kubeClusterContext, error), loc.yes, loc.no);
|
||||
@@ -100,8 +114,7 @@ export class ControllerModel {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return this._azdataApi.azdata.acquireSession(this.info.url, this.info.username, this._password, this.azdataAdditionalEnvVars);
|
||||
await this._azdataApi.azdata.login({ endpoint: this.info.endpoint, namespace: this.info.namespace }, this.info.username, this._password, this.azdataAdditionalEnvVars);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -115,67 +128,64 @@ export class ControllerModel {
|
||||
await this.refresh(false);
|
||||
}
|
||||
}
|
||||
public async refresh(showErrors: boolean = true, promptReconnect: boolean = false): Promise<void> {
|
||||
const session = await this.acquireAzdataSession(promptReconnect);
|
||||
public async refresh(showErrors: boolean = true): Promise<void> {
|
||||
// First need to log in to ensure that we're able to authenticate with the controller
|
||||
await this.login(false);
|
||||
const newRegistrations: Registration[] = [];
|
||||
try {
|
||||
await Promise.all([
|
||||
this._azdataApi.azdata.arc.dc.config.show(this.azdataAdditionalEnvVars, session).then(result => {
|
||||
this._controllerConfig = result.result;
|
||||
this.configLastUpdated = new Date();
|
||||
this._onConfigUpdated.fire(this._controllerConfig);
|
||||
}).catch(err => {
|
||||
// If an error occurs show a message so the user knows something failed but still
|
||||
// fire the event so callers hooking into this can handle the error (e.g. so dashboards don't show the
|
||||
// loading icon forever)
|
||||
if (showErrors) {
|
||||
vscode.window.showErrorMessage(loc.fetchConfigFailed(this.info.name, err));
|
||||
}
|
||||
this._onConfigUpdated.fire(this._controllerConfig);
|
||||
throw err;
|
||||
await Promise.all([
|
||||
this._azdataApi.azdata.arc.dc.config.show(this.azdataAdditionalEnvVars, this.controllerContext).then(result => {
|
||||
this._controllerConfig = result.result;
|
||||
this.configLastUpdated = new Date();
|
||||
this._onConfigUpdated.fire(this._controllerConfig);
|
||||
}).catch(err => {
|
||||
// If an error occurs show a message so the user knows something failed but still
|
||||
// fire the event so callers hooking into this can handle the error (e.g. so dashboards don't show the
|
||||
// loading icon forever)
|
||||
if (showErrors) {
|
||||
vscode.window.showErrorMessage(loc.fetchConfigFailed(this.info.name, err));
|
||||
}
|
||||
this._onConfigUpdated.fire(this._controllerConfig);
|
||||
throw err;
|
||||
}),
|
||||
this._azdataApi.azdata.arc.dc.endpoint.list(this.azdataAdditionalEnvVars, this.controllerContext).then(result => {
|
||||
this._endpoints = result.result;
|
||||
this.endpointsLastUpdated = new Date();
|
||||
this._onEndpointsUpdated.fire(this._endpoints);
|
||||
}).catch(err => {
|
||||
// If an error occurs show a message so the user knows something failed but still
|
||||
// fire the event so callers can know to update (e.g. so dashboards don't show the
|
||||
// loading icon forever)
|
||||
if (showErrors) {
|
||||
vscode.window.showErrorMessage(loc.fetchEndpointsFailed(this.info.name, err));
|
||||
}
|
||||
this._onEndpointsUpdated.fire(this._endpoints);
|
||||
throw err;
|
||||
}),
|
||||
Promise.all([
|
||||
this._azdataApi.azdata.arc.postgres.server.list(this.azdataAdditionalEnvVars, this.controllerContext).then(result => {
|
||||
newRegistrations.push(...result.result.map(r => {
|
||||
return {
|
||||
instanceName: r.name,
|
||||
state: r.state,
|
||||
instanceType: ResourceType.postgresInstances
|
||||
};
|
||||
}));
|
||||
}),
|
||||
this._azdataApi.azdata.arc.dc.endpoint.list(this.azdataAdditionalEnvVars, session).then(result => {
|
||||
this._endpoints = result.result;
|
||||
this.endpointsLastUpdated = new Date();
|
||||
this._onEndpointsUpdated.fire(this._endpoints);
|
||||
}).catch(err => {
|
||||
// If an error occurs show a message so the user knows something failed but still
|
||||
// fire the event so callers can know to update (e.g. so dashboards don't show the
|
||||
// loading icon forever)
|
||||
if (showErrors) {
|
||||
vscode.window.showErrorMessage(loc.fetchEndpointsFailed(this.info.name, err));
|
||||
}
|
||||
this._onEndpointsUpdated.fire(this._endpoints);
|
||||
throw err;
|
||||
}),
|
||||
Promise.all([
|
||||
this._azdataApi.azdata.arc.postgres.server.list(this.azdataAdditionalEnvVars, session).then(result => {
|
||||
newRegistrations.push(...result.result.map(r => {
|
||||
return {
|
||||
instanceName: r.name,
|
||||
state: r.state,
|
||||
instanceType: ResourceType.postgresInstances
|
||||
};
|
||||
}));
|
||||
}),
|
||||
this._azdataApi.azdata.arc.sql.mi.list(this.azdataAdditionalEnvVars, session).then(result => {
|
||||
newRegistrations.push(...result.result.map(r => {
|
||||
return {
|
||||
instanceName: r.name,
|
||||
state: r.state,
|
||||
instanceType: ResourceType.sqlManagedInstances
|
||||
};
|
||||
}));
|
||||
})
|
||||
]).then(() => {
|
||||
this._registrations = newRegistrations;
|
||||
this.registrationsLastUpdated = new Date();
|
||||
this._onRegistrationsUpdated.fire(this._registrations);
|
||||
this._azdataApi.azdata.arc.sql.mi.list(this.azdataAdditionalEnvVars, this.controllerContext).then(result => {
|
||||
newRegistrations.push(...result.result.map(r => {
|
||||
return {
|
||||
instanceName: r.name,
|
||||
state: r.state,
|
||||
instanceType: ResourceType.sqlManagedInstances
|
||||
};
|
||||
}));
|
||||
})
|
||||
]);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
]).then(() => {
|
||||
this._registrations = newRegistrations;
|
||||
this.registrationsLastUpdated = new Date();
|
||||
this._onRegistrationsUpdated.fire(this._registrations);
|
||||
})
|
||||
]);
|
||||
}
|
||||
|
||||
public get endpoints(): azdataExt.DcEndpointListResult[] {
|
||||
@@ -204,6 +214,6 @@ export class ControllerModel {
|
||||
* property to for use a display label for this controller
|
||||
*/
|
||||
public get label(): string {
|
||||
return `${this.info.name} (${this.info.url})`;
|
||||
return `${this.info.name} (${this.controllerContext})`;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,11 +71,9 @@ export class MiaaModel extends ResourceModel {
|
||||
return this._refreshPromise.promise;
|
||||
}
|
||||
this._refreshPromise = new Deferred();
|
||||
let session: azdataExt.AzdataSession | undefined = undefined;
|
||||
try {
|
||||
session = await this.controllerModel.acquireAzdataSession();
|
||||
try {
|
||||
const result = await this._azdataApi.azdata.arc.sql.mi.show(this.info.name, this.controllerModel.azdataAdditionalEnvVars, session);
|
||||
const result = await this._azdataApi.azdata.arc.sql.mi.show(this.info.name, this.controllerModel.azdataAdditionalEnvVars, this.controllerModel.controllerContext);
|
||||
this._config = result.result;
|
||||
this.configLastUpdated = new Date();
|
||||
this._onConfigUpdated.fire(this._config);
|
||||
@@ -109,7 +107,6 @@ export class MiaaModel extends ResourceModel {
|
||||
this._refreshPromise.reject(err);
|
||||
throw err;
|
||||
} finally {
|
||||
session?.dispose();
|
||||
this._refreshPromise = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,10 +53,7 @@ export class PostgresModel extends ResourceModel {
|
||||
|
||||
/** Returns the major version of Postgres */
|
||||
public get engineVersion(): string | undefined {
|
||||
const kind = this._config?.kind;
|
||||
return kind
|
||||
? kind.substring(kind.lastIndexOf('-') + 1)
|
||||
: undefined;
|
||||
return this._config?.spec.engine.version;
|
||||
}
|
||||
|
||||
/** Returns the IP address and port of Postgres */
|
||||
@@ -121,10 +118,8 @@ export class PostgresModel extends ResourceModel {
|
||||
return this._refreshPromise.promise;
|
||||
}
|
||||
this._refreshPromise = new Deferred();
|
||||
let session: azdataExt.AzdataSession | undefined = undefined;
|
||||
try {
|
||||
session = await this.controllerModel.acquireAzdataSession();
|
||||
this._config = (await this._azdataApi.azdata.arc.postgres.server.show(this.info.name, this.controllerModel.azdataAdditionalEnvVars, session)).result;
|
||||
this._config = (await this._azdataApi.azdata.arc.postgres.server.show(this.info.name, this.controllerModel.azdataAdditionalEnvVars, this.controllerModel.controllerContext)).result;
|
||||
this.configLastUpdated = new Date();
|
||||
this._onConfigUpdated.fire(this._config);
|
||||
this._refreshPromise.resolve();
|
||||
@@ -132,7 +127,6 @@ export class PostgresModel extends ResourceModel {
|
||||
this._refreshPromise.reject(err);
|
||||
throw err;
|
||||
} finally {
|
||||
session?.dispose();
|
||||
this._refreshPromise = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ export class ArcControllersOptionsSourceProvider implements rd.IOptionsSourcePro
|
||||
const controller = (await getRegisteredDataControllers(this._treeProvider)).find(ci => ci.label === controllerLabel);
|
||||
throwUnless(controller !== undefined, loc.noControllerInfoFound(controllerLabel));
|
||||
switch (variableName) {
|
||||
case 'endpoint': return controller.info.url;
|
||||
case 'endpoint': return controller.info.endpoint || '';
|
||||
case 'username': return controller.info.username;
|
||||
case 'kubeConfig': return controller.info.kubeConfigFilePath;
|
||||
case 'clusterContext': return controller.info.kubeClusterContext;
|
||||
|
||||
@@ -51,7 +51,7 @@ describe('KubeUtils', function (): void {
|
||||
contexts[1].name.should.equal('kubernetes-admin@kubernetes', `test: ${testName} failed`);
|
||||
contexts[1].isCurrentContext.should.be.false(`test: ${testName} failed`);
|
||||
};
|
||||
verifyContexts(await getKubeConfigClusterContexts(configFile), 'getKubeConfigClusterContexts');
|
||||
verifyContexts(getKubeConfigClusterContexts(configFile), 'getKubeConfigClusterContexts');
|
||||
});
|
||||
it('throws error when unable to load config file', async () => {
|
||||
const error = new Error('unknown error accessing file');
|
||||
|
||||
@@ -23,9 +23,9 @@ export class FakeAzdataApi implements azdataExt.IAzdataApi {
|
||||
},
|
||||
postgres: {
|
||||
server: {
|
||||
postgresInstances: [],
|
||||
postgresInstances: <azdataExt.PostgresServerListResult[]>[],
|
||||
delete(_name: string): Promise<azdataExt.AzdataOutput<void>> { throw new Error('Method not implemented.'); },
|
||||
async list(): Promise<azdataExt.AzdataOutput<azdataExt.PostgresServerListResult[]>> { return <any>{ result: this.postgresInstances }; },
|
||||
async list(): Promise<azdataExt.AzdataOutput<azdataExt.PostgresServerListResult[]>> { return { result: this.postgresInstances, logs: [], stdout: [], stderr: [] }; },
|
||||
show(_name: string): Promise<azdataExt.AzdataOutput<azdataExt.PostgresServerShowResult>> { throw new Error('Method not implemented.'); },
|
||||
edit(
|
||||
_name: string,
|
||||
@@ -42,16 +42,15 @@ export class FakeAzdataApi implements azdataExt.IAzdataApi {
|
||||
replaceEngineSettings?: boolean,
|
||||
workers?: number
|
||||
},
|
||||
_engineVersion?: string,
|
||||
_additionalEnvVars?: azdataExt.AdditionalEnvVars
|
||||
): Promise<azdataExt.AzdataOutput<void>> { throw new Error('Method not implemented.'); }
|
||||
}
|
||||
},
|
||||
sql: {
|
||||
mi: {
|
||||
miaaInstances: [],
|
||||
miaaInstances: <azdataExt.SqlMiListResult[]>[],
|
||||
delete(_name: string): Promise<azdataExt.AzdataOutput<void>> { throw new Error('Method not implemented.'); },
|
||||
async list(): Promise<azdataExt.AzdataOutput<azdataExt.SqlMiListResult[]>> { return <any>{ result: this.miaaInstances }; },
|
||||
async list(): Promise<azdataExt.AzdataOutput<azdataExt.SqlMiListResult[]>> { return { logs: [], stdout: [], stderr: [], result: this.miaaInstances }; },
|
||||
show(_name: string): Promise<azdataExt.AzdataOutput<azdataExt.SqlMiShowResult>> { throw new Error('Method not implemented.'); },
|
||||
edit(
|
||||
_name: string,
|
||||
@@ -66,17 +65,14 @@ export class FakeAzdataApi implements azdataExt.IAzdataApi {
|
||||
}
|
||||
};
|
||||
|
||||
// public postgresInstances: azdataExt.PostgresServerListResult[] = [];
|
||||
public set postgresInstances(instances: azdataExt.PostgresServerListResult[]) {
|
||||
this._arcApi.postgres.server.postgresInstances = <any>instances;
|
||||
this._arcApi.postgres.server.postgresInstances = instances;
|
||||
}
|
||||
|
||||
public set miaaInstances(instances: azdataExt.SqlMiListResult[]) {
|
||||
this._arcApi.sql.mi.miaaInstances = <any>instances;
|
||||
this._arcApi.sql.mi.miaaInstances = instances;
|
||||
}
|
||||
|
||||
// public miaaInstances: azdataExt.SqlMiListResult[] = [];
|
||||
|
||||
//
|
||||
// API Implementation
|
||||
//
|
||||
@@ -86,12 +82,9 @@ export class FakeAzdataApi implements azdataExt.IAzdataApi {
|
||||
getPath(): Promise<string> {
|
||||
throw new Error('Method not implemented.');
|
||||
}
|
||||
login(_endpoint: string, _username: string, _password: string): Promise<azdataExt.AzdataOutput<void>> {
|
||||
login(_endpointOrNamespace: azdataExt.EndpointOrNamespace, _username: string, _password: string, _additionalEnvVars: azdataExt.AdditionalEnvVars = {}, _azdataContext?: string): Promise<azdataExt.AzdataOutput<void>> {
|
||||
return <any>undefined;
|
||||
}
|
||||
acquireSession(_endpoint: string, _username: string, _password: string): Promise<azdataExt.AzdataSession> {
|
||||
return Promise.resolve({ dispose: () => { } });
|
||||
}
|
||||
version(): Promise<azdataExt.AzdataOutput<string>> {
|
||||
throw new Error('Method not implemented.');
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import { AzureArcTreeDataProvider } from '../../ui/tree/azureArcTreeDataProvider
|
||||
export class FakeControllerModel extends ControllerModel {
|
||||
|
||||
constructor(treeDataProvider?: AzureArcTreeDataProvider, info?: Partial<ControllerInfo>, password?: string) {
|
||||
const _info: ControllerInfo = Object.assign({ id: uuid(), url: '', kubeConfigFilePath: '', kubeClusterContext: '', name: '', username: '', rememberPassword: false, resources: [] }, info);
|
||||
const _info: ControllerInfo = Object.assign({ id: uuid(), endpoint: '', kubeConfigFilePath: '', kubeClusterContext: '', name: '', namespace: '', username: '', rememberPassword: false, resources: [] }, info);
|
||||
super(treeDataProvider!, _info, password);
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,20 @@ interface ExtensionGlobalMemento extends vscode.Memento {
|
||||
setKeysForSync(keys: string[]): void;
|
||||
}
|
||||
|
||||
function getDefaultControllerInfo(): ControllerInfo {
|
||||
return {
|
||||
id: uuid(),
|
||||
endpoint: '127.0.0.1',
|
||||
kubeConfigFilePath: '/path/to/.kube/config',
|
||||
kubeClusterContext: 'currentCluster',
|
||||
username: 'admin',
|
||||
name: 'arc',
|
||||
namespace: 'arc-ns',
|
||||
rememberPassword: true,
|
||||
resources: []
|
||||
};
|
||||
}
|
||||
|
||||
describe('ControllerModel', function (): void {
|
||||
afterEach(function (): void {
|
||||
sinon.restore();
|
||||
@@ -39,15 +53,15 @@ describe('ControllerModel', function (): void {
|
||||
|
||||
beforeEach(function (): void {
|
||||
sinon.stub(ConnectToControllerDialog.prototype, 'showDialog');
|
||||
sinon.stub(kubeUtils, 'getKubeConfigClusterContexts').resolves([{ name: 'currentCluster', isCurrentContext: true }]);
|
||||
sinon.stub(kubeUtils, 'getKubeConfigClusterContexts').returns([{ name: 'currentCluster', isCurrentContext: true }]);
|
||||
sinon.stub(vscode.window, 'showErrorMessage').resolves(<any>loc.yes);
|
||||
});
|
||||
|
||||
it('Rejected with expected error when user cancels', async function (): Promise<void> {
|
||||
// Returning an undefined model here indicates that the dialog closed without clicking "Ok" - usually through the user clicking "Cancel"
|
||||
sinon.stub(ConnectToControllerDialog.prototype, 'waitForClose').returns(Promise.resolve(undefined));
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', username: 'admin', name: 'arc', rememberPassword: true, resources: [] });
|
||||
await should(model.acquireAzdataSession()).be.rejectedWith(new UserCancelledError(loc.userCancelledError));
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), getDefaultControllerInfo());
|
||||
await should(model.login()).be.rejectedWith(new UserCancelledError(loc.userCancelledError));
|
||||
});
|
||||
|
||||
it('Reads password from cred store', async function (): Promise<void> {
|
||||
@@ -62,13 +76,13 @@ describe('ControllerModel', function (): void {
|
||||
|
||||
const azdataExtApiMock = TypeMoq.Mock.ofType<azdataExt.IExtension>();
|
||||
const azdataMock = TypeMoq.Mock.ofType<azdataExt.IAzdataApi>();
|
||||
azdataMock.setup(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataMock.setup(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataExtApiMock.setup(x => x.azdata).returns(() => azdataMock.object);
|
||||
sinon.stub(vscode.extensions, 'getExtension').returns(<any>{ exports: azdataExtApiMock.object });
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', username: 'admin', name: 'arc', rememberPassword: true, resources: [] });
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), getDefaultControllerInfo());
|
||||
|
||||
await model.acquireAzdataSession();
|
||||
azdataMock.verify(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), password, TypeMoq.It.isAny()), TypeMoq.Times.once());
|
||||
await model.login();
|
||||
azdataMock.verify(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), password, TypeMoq.It.isAny()), TypeMoq.Times.once());
|
||||
});
|
||||
|
||||
it('Prompt for password when not in cred store', async function (): Promise<void> {
|
||||
@@ -83,18 +97,18 @@ describe('ControllerModel', function (): void {
|
||||
|
||||
const azdataExtApiMock = TypeMoq.Mock.ofType<azdataExt.IExtension>();
|
||||
const azdataMock = TypeMoq.Mock.ofType<azdataExt.IAzdataApi>();
|
||||
azdataMock.setup(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataMock.setup(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataExtApiMock.setup(x => x.azdata).returns(() => azdataMock.object);
|
||||
sinon.stub(vscode.extensions, 'getExtension').returns(<any>{ exports: azdataExtApiMock.object });
|
||||
|
||||
// Set up dialog to return new model with our password
|
||||
const newModel = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', username: 'admin', name: 'arc', rememberPassword: true, resources: [] }, password);
|
||||
const newModel = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), getDefaultControllerInfo(), password);
|
||||
sinon.stub(ConnectToControllerDialog.prototype, 'waitForClose').returns(Promise.resolve({ controllerModel: newModel, password: password }));
|
||||
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', username: 'admin', name: 'arc', rememberPassword: true, resources: [] });
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), getDefaultControllerInfo());
|
||||
|
||||
await model.acquireAzdataSession();
|
||||
azdataMock.verify(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), password, TypeMoq.It.isAny()), TypeMoq.Times.once());
|
||||
await model.login();
|
||||
azdataMock.verify(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), password, TypeMoq.It.isAny()), TypeMoq.Times.once());
|
||||
});
|
||||
|
||||
it('Prompt for password when rememberPassword is true but prompt reconnect is true', async function (): Promise<void> {
|
||||
@@ -108,19 +122,19 @@ describe('ControllerModel', function (): void {
|
||||
|
||||
const azdataExtApiMock = TypeMoq.Mock.ofType<azdataExt.IExtension>();
|
||||
const azdataMock = TypeMoq.Mock.ofType<azdataExt.IAzdataApi>();
|
||||
azdataMock.setup(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataMock.setup(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataExtApiMock.setup(x => x.azdata).returns(() => azdataMock.object);
|
||||
sinon.stub(vscode.extensions, 'getExtension').returns(<any>{ exports: azdataExtApiMock.object });
|
||||
|
||||
// Set up dialog to return new model with our new password from the reprompt
|
||||
const newModel = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', username: 'admin', name: 'arc', rememberPassword: true, resources: [] }, password);
|
||||
const newModel = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), getDefaultControllerInfo(), password);
|
||||
const waitForCloseStub = sinon.stub(ConnectToControllerDialog.prototype, 'waitForClose').returns(Promise.resolve({ controllerModel: newModel, password: password }));
|
||||
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', username: 'admin', name: 'arc', rememberPassword: true, resources: [] });
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), getDefaultControllerInfo());
|
||||
|
||||
await model.acquireAzdataSession(true);
|
||||
await model.login(true);
|
||||
should(waitForCloseStub.called).be.true('waitForClose should have been called');
|
||||
azdataMock.verify(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), password, TypeMoq.It.isAny()), TypeMoq.Times.once());
|
||||
azdataMock.verify(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), password, TypeMoq.It.isAny()), TypeMoq.Times.once());
|
||||
});
|
||||
|
||||
it('Prompt for password when we already have a password but prompt reconnect is true', async function (): Promise<void> {
|
||||
@@ -134,20 +148,20 @@ describe('ControllerModel', function (): void {
|
||||
|
||||
const azdataExtApiMock = TypeMoq.Mock.ofType<azdataExt.IExtension>();
|
||||
const azdataMock = TypeMoq.Mock.ofType<azdataExt.IAzdataApi>();
|
||||
azdataMock.setup(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataMock.setup(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataExtApiMock.setup(x => x.azdata).returns(() => azdataMock.object);
|
||||
sinon.stub(vscode.extensions, 'getExtension').returns(<any>{ exports: azdataExtApiMock.object });
|
||||
|
||||
// Set up dialog to return new model with our new password from the reprompt
|
||||
const newModel = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', username: 'admin', name: 'arc', rememberPassword: true, resources: [] }, password);
|
||||
const newModel = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), getDefaultControllerInfo(), password);
|
||||
const waitForCloseStub = sinon.stub(ConnectToControllerDialog.prototype, 'waitForClose').returns(Promise.resolve({ controllerModel: newModel, password: password }));
|
||||
|
||||
// Set up original model with a password
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', username: 'admin', name: 'arc', rememberPassword: true, resources: [] }, 'originalPassword');
|
||||
const model = new ControllerModel(new AzureArcTreeDataProvider(mockExtensionContext.object), getDefaultControllerInfo(), 'originalPassword');
|
||||
|
||||
await model.acquireAzdataSession(true);
|
||||
await model.login(true);
|
||||
should(waitForCloseStub.called).be.true('waitForClose should have been called');
|
||||
azdataMock.verify(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), password, TypeMoq.It.isAny()), TypeMoq.Times.once());
|
||||
azdataMock.verify(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), password, TypeMoq.It.isAny()), TypeMoq.Times.once());
|
||||
});
|
||||
|
||||
it('Model values are updated correctly when modified during reconnect', async function (): Promise<void> {
|
||||
@@ -162,7 +176,7 @@ describe('ControllerModel', function (): void {
|
||||
|
||||
const azdataExtApiMock = TypeMoq.Mock.ofType<azdataExt.IExtension>();
|
||||
const azdataMock = TypeMoq.Mock.ofType<azdataExt.IAzdataApi>();
|
||||
azdataMock.setup(x => x.acquireSession(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataMock.setup(x => x.login(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => <any>Promise.resolve(undefined));
|
||||
azdataExtApiMock.setup(x => x.azdata).returns(() => azdataMock.object);
|
||||
sinon.stub(vscode.extensions, 'getExtension').returns(<any>{ exports: azdataExtApiMock.object });
|
||||
|
||||
@@ -170,27 +184,19 @@ describe('ControllerModel', function (): void {
|
||||
const originalPassword = 'originalPassword';
|
||||
const model = new ControllerModel(
|
||||
treeDataProvider,
|
||||
{
|
||||
id: uuid(),
|
||||
url: '127.0.0.1',
|
||||
kubeConfigFilePath: '/path/to/.kube/config',
|
||||
kubeClusterContext: 'currentCluster',
|
||||
username: 'admin',
|
||||
name: 'arc',
|
||||
rememberPassword: false,
|
||||
resources: []
|
||||
},
|
||||
getDefaultControllerInfo(),
|
||||
originalPassword
|
||||
);
|
||||
await treeDataProvider.addOrUpdateController(model, originalPassword);
|
||||
|
||||
const newInfo: ControllerInfo = {
|
||||
id: model.info.id, // The ID stays the same since we're just re-entering information for the same model
|
||||
url: 'newUrl',
|
||||
endpoint: 'newUrl',
|
||||
kubeConfigFilePath: '/path/to/.kube/config',
|
||||
kubeClusterContext: 'currentCluster',
|
||||
username: 'newUser',
|
||||
name: 'newName',
|
||||
namespace: 'newNamespace',
|
||||
rememberPassword: true,
|
||||
resources: []
|
||||
};
|
||||
@@ -203,7 +209,7 @@ describe('ControllerModel', function (): void {
|
||||
const waitForCloseStub = sinon.stub(ConnectToControllerDialog.prototype, 'waitForClose').returns(Promise.resolve(
|
||||
{ controllerModel: newModel, password: newPassword }));
|
||||
|
||||
await model.acquireAzdataSession(true);
|
||||
await model.login(true);
|
||||
should(waitForCloseStub.called).be.true('waitForClose should have been called');
|
||||
should((await treeDataProvider.getChildren()).length).equal(1, 'Tree Data provider should still only have 1 node');
|
||||
should(model.info).deepEqual(newInfo, 'Model info should have been updated');
|
||||
|
||||
@@ -40,7 +40,8 @@ export const FakePostgresServerShowOutput: azdataExt.AzdataOutput<azdataExt.Post
|
||||
extensions: [{ name: '' }],
|
||||
settings: {
|
||||
default: { ['']: '' }
|
||||
}
|
||||
},
|
||||
version: ''
|
||||
},
|
||||
scale: {
|
||||
shards: 0,
|
||||
@@ -114,7 +115,7 @@ describe('PostgresModel', function (): void {
|
||||
controllerModel = new FakeControllerModel();
|
||||
|
||||
//Stub calling azdata login and acquiring session
|
||||
sinon.stub(controllerModel, 'acquireAzdataSession').returns(Promise.resolve(vscode.Disposable.from()));
|
||||
sinon.stub(controllerModel, 'login').returns(Promise.resolve());
|
||||
|
||||
// Stub the azdata CLI API
|
||||
azdataApi = new FakeAzdataApi();
|
||||
|
||||
@@ -38,7 +38,8 @@ export const FakePostgresServerShowOutput: azdataExt.AzdataOutput<azdataExt.Post
|
||||
extensions: [{ name: '' }],
|
||||
settings: {
|
||||
default: { ['']: '' }
|
||||
}
|
||||
},
|
||||
version: '12'
|
||||
},
|
||||
scale: {
|
||||
shards: 0,
|
||||
@@ -121,7 +122,7 @@ describe('postgresConnectionStringsPage', function (): void {
|
||||
controllerModel = new FakeControllerModel();
|
||||
|
||||
//Stub calling azdata login and acquiring session
|
||||
sinon.stub(controllerModel, 'acquireAzdataSession').returns(Promise.resolve(vscode.Disposable.from()));
|
||||
sinon.stub(controllerModel, 'login').returns(Promise.resolve());
|
||||
|
||||
// Setup PostgresModel
|
||||
const postgresResource: PGResourceInfo = { name: 'pgt', resourceType: '' };
|
||||
|
||||
@@ -78,7 +78,7 @@ describe('postgresOverviewPage', () => {
|
||||
|
||||
beforeEach(() => {
|
||||
sinon.stub(utils, 'promptForInstanceDeletion').returns(Promise.resolve(true));
|
||||
sinon.stub(controllerModel, 'acquireAzdataSession').returns(Promise.resolve(vscode.Disposable.from()));
|
||||
sinon.stub(controllerModel, 'login').returns(Promise.resolve());
|
||||
refreshTreeNode = sinon.stub(controllerModel, 'refreshTreeNode');
|
||||
});
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ describe('ConnectControllerDialog', function (): void {
|
||||
|
||||
(<{ info: ControllerInfo | undefined, description: string }[]>[
|
||||
{ info: undefined, description: 'all input' },
|
||||
{ info: { url: '127.0.0.1' }, description: 'all but URL' },
|
||||
{ info: { url: '127.0.0.1', username: 'sa' }, description: 'all but URL and password' }]).forEach(test => {
|
||||
{ info: { endpoint: '127.0.0.1' }, description: 'all but URL' },
|
||||
{ info: { endpoint: '127.0.0.1', username: 'sa' }, description: 'all but URL and password' }]).forEach(test => {
|
||||
it(`Validate returns false when ${test.description} is empty`, async function (): Promise<void> {
|
||||
const connectControllerDialog = new ConnectToControllerDialog(undefined!);
|
||||
connectControllerDialog.showDialog(test.info, undefined);
|
||||
@@ -32,7 +32,7 @@ describe('ConnectControllerDialog', function (): void {
|
||||
it('validate returns false if controller refresh fails', async function (): Promise<void> {
|
||||
sinon.stub(ControllerModel.prototype, 'refresh').returns(Promise.reject('Controller refresh failed'));
|
||||
const connectControllerDialog = new ConnectToControllerDialog(undefined!);
|
||||
const info = { id: uuid(), url: 'https://127.0.0.1:30080', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] };
|
||||
const info: ControllerInfo = { id: uuid(), endpoint: 'https://127.0.0.1:30080', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', namespace: 'arc-ns', username: 'sa', rememberPassword: true, resources: [] };
|
||||
connectControllerDialog.showDialog(info, 'pwd');
|
||||
await connectControllerDialog.isInitialized;
|
||||
const validateResult = await connectControllerDialog.validate();
|
||||
@@ -41,36 +41,36 @@ describe('ConnectControllerDialog', function (): void {
|
||||
|
||||
it('validate replaces http with https', async function (): Promise<void> {
|
||||
await validateConnectControllerDialog(
|
||||
{ id: uuid(), url: 'http://127.0.0.1:30081', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] },
|
||||
{ id: uuid(), endpoint: 'http://127.0.0.1:30081', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', namespace: 'arc-ns', username: 'sa', rememberPassword: true, resources: [] },
|
||||
'https://127.0.0.1:30081');
|
||||
});
|
||||
|
||||
it('validate appends https if missing', async function (): Promise<void> {
|
||||
await validateConnectControllerDialog({ id: uuid(), url: '127.0.0.1:30080', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] },
|
||||
await validateConnectControllerDialog({ id: uuid(), endpoint: '127.0.0.1:30080', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', namespace: 'arc-ns', username: 'sa', rememberPassword: true, resources: [] },
|
||||
'https://127.0.0.1:30080');
|
||||
});
|
||||
|
||||
it('validate appends default port if missing', async function (): Promise<void> {
|
||||
await validateConnectControllerDialog({ id: uuid(), url: 'https://127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] },
|
||||
await validateConnectControllerDialog({ id: uuid(), endpoint: 'https://127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', namespace: 'arc-ns', username: 'sa', rememberPassword: true, resources: [] },
|
||||
'https://127.0.0.1:30080');
|
||||
});
|
||||
|
||||
it('validate appends both port and https if missing', async function (): Promise<void> {
|
||||
await validateConnectControllerDialog({ id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] },
|
||||
await validateConnectControllerDialog({ id: uuid(), endpoint: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', namespace: 'arc-ns', username: 'sa', rememberPassword: true, resources: [] },
|
||||
'https://127.0.0.1:30080');
|
||||
});
|
||||
|
||||
for (const name of ['', undefined]) {
|
||||
it.skip(`validate display name gets set to arc instance name for user chosen name of:${name}`, async function (): Promise<void> {
|
||||
await validateConnectControllerDialog(
|
||||
{ id: uuid(), url: 'http://127.0.0.1:30081', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: name!, username: 'sa', rememberPassword: true, resources: [] },
|
||||
{ id: uuid(), endpoint: 'http://127.0.0.1:30081', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: name!, namespace: 'arc-ns', username: 'sa', rememberPassword: true, resources: [] },
|
||||
'https://127.0.0.1:30081');
|
||||
});
|
||||
}
|
||||
|
||||
it.skip(`validate display name gets set to default data controller name for user chosen name of:'' and instanceName in explicably returned as undefined from the controller endpoint`, async function (): Promise<void> {
|
||||
await validateConnectControllerDialog(
|
||||
{ id: uuid(), url: 'http://127.0.0.1:30081', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: '', username: 'sa', rememberPassword: true, resources: [] },
|
||||
{ id: uuid(), endpoint: 'http://127.0.0.1:30081', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: '', namespace: 'arc-ns', username: 'sa', rememberPassword: true, resources: [] },
|
||||
'https://127.0.0.1:30081',
|
||||
undefined);
|
||||
});
|
||||
@@ -92,6 +92,6 @@ async function validateConnectControllerDialog(info: ControllerInfo, expectedUrl
|
||||
const validateResult = await connectControllerDialog.validate();
|
||||
should(validateResult).be.true('Validation should have returned true');
|
||||
const model = await connectControllerDialog.waitForClose();
|
||||
should(model?.controllerModel.info.url).equal(expectedUrl);
|
||||
should(model?.controllerModel.info.endpoint).equal(expectedUrl);
|
||||
should(model?.controllerModel.info.name).equal(expectedControllerInfoName);
|
||||
}
|
||||
|
||||
@@ -24,6 +24,20 @@ interface ExtensionGlobalMemento extends vscode.Memento {
|
||||
setKeysForSync(keys: string[]): void;
|
||||
}
|
||||
|
||||
function getDefaultControllerInfo(): ControllerInfo {
|
||||
return {
|
||||
id: uuid(),
|
||||
endpoint: '127.0.0.1',
|
||||
kubeConfigFilePath: '/path/to/.kube/config',
|
||||
kubeClusterContext: 'currentCluster',
|
||||
username: 'sa',
|
||||
name: 'my-arc',
|
||||
namespace: 'arc-ns',
|
||||
rememberPassword: true,
|
||||
resources: []
|
||||
};
|
||||
}
|
||||
|
||||
describe('AzureArcTreeDataProvider tests', function (): void {
|
||||
let treeDataProvider: AzureArcTreeDataProvider;
|
||||
beforeEach(function (): void {
|
||||
@@ -58,7 +72,7 @@ describe('AzureArcTreeDataProvider tests', function (): void {
|
||||
treeDataProvider['_loading'] = false;
|
||||
let children = await treeDataProvider.getChildren();
|
||||
should(children.length).equal(0, 'There initially shouldn\'t be any children');
|
||||
const controllerModel = new ControllerModel(treeDataProvider, { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] });
|
||||
const controllerModel = new ControllerModel(treeDataProvider, getDefaultControllerInfo());
|
||||
await treeDataProvider.addOrUpdateController(controllerModel, '');
|
||||
should(children.length).equal(1, 'Controller node should be added correctly');
|
||||
await treeDataProvider.addOrUpdateController(controllerModel, '');
|
||||
@@ -69,12 +83,12 @@ describe('AzureArcTreeDataProvider tests', function (): void {
|
||||
treeDataProvider['_loading'] = false;
|
||||
let children = await treeDataProvider.getChildren();
|
||||
should(children.length).equal(0, 'There initially shouldn\'t be any children');
|
||||
const originalInfo: ControllerInfo = { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] };
|
||||
const originalInfo: ControllerInfo = getDefaultControllerInfo();
|
||||
const controllerModel = new ControllerModel(treeDataProvider, originalInfo);
|
||||
await treeDataProvider.addOrUpdateController(controllerModel, '');
|
||||
should(children.length).equal(1, 'Controller node should be added correctly');
|
||||
should((<ControllerTreeNode>children[0]).model.info).deepEqual(originalInfo);
|
||||
const newInfo = { id: originalInfo.id, url: '1.1.1.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'new-name', username: 'admin', rememberPassword: false, resources: [] };
|
||||
const newInfo: ControllerInfo = { id: originalInfo.id, endpoint: '1.1.1.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'new-name', namespace: 'new-namespace', username: 'admin', rememberPassword: false, resources: [] };
|
||||
const controllerModel2 = new ControllerModel(treeDataProvider, newInfo);
|
||||
await treeDataProvider.addOrUpdateController(controllerModel2, '');
|
||||
should(children.length).equal(1, 'Shouldn\'t add duplicate controller node');
|
||||
@@ -109,8 +123,8 @@ describe('AzureArcTreeDataProvider tests', function (): void {
|
||||
mockArcApi.setup(x => x.azdata).returns(() => fakeAzdataApi);
|
||||
|
||||
sinon.stub(vscode.extensions, 'getExtension').returns(mockArcExtension.object);
|
||||
sinon.stub(kubeUtils, 'getKubeConfigClusterContexts').resolves([{ name: 'currentCluster', isCurrentContext: true }]);
|
||||
const controllerModel = new ControllerModel(treeDataProvider, { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] }, 'mypassword');
|
||||
sinon.stub(kubeUtils, 'getKubeConfigClusterContexts').returns([{ name: 'currentCluster', isCurrentContext: true }]);
|
||||
const controllerModel = new ControllerModel(treeDataProvider, getDefaultControllerInfo(), 'mypassword');
|
||||
await treeDataProvider.addOrUpdateController(controllerModel, '');
|
||||
const controllerNode = treeDataProvider.getControllerNode(controllerModel);
|
||||
const children = await treeDataProvider.getChildren(controllerNode);
|
||||
@@ -123,8 +137,10 @@ describe('AzureArcTreeDataProvider tests', function (): void {
|
||||
describe('removeController', function (): void {
|
||||
it('removing a controller should work as expected', async function (): Promise<void> {
|
||||
treeDataProvider['_loading'] = false;
|
||||
const controllerModel = new ControllerModel(treeDataProvider, { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] });
|
||||
const controllerModel2 = new ControllerModel(treeDataProvider, { id: uuid(), url: '127.0.0.2', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'cloudsa', rememberPassword: true, resources: [] });
|
||||
const controllerModel = new ControllerModel(treeDataProvider, getDefaultControllerInfo());
|
||||
const info2 = getDefaultControllerInfo();
|
||||
info2.username = 'cloudsa';
|
||||
const controllerModel2 = new ControllerModel(treeDataProvider, info2);
|
||||
await treeDataProvider.addOrUpdateController(controllerModel, '');
|
||||
await treeDataProvider.addOrUpdateController(controllerModel2, '');
|
||||
const children = <ControllerTreeNode[]>(await treeDataProvider.getChildren());
|
||||
@@ -141,20 +157,20 @@ describe('AzureArcTreeDataProvider tests', function (): void {
|
||||
|
||||
describe('openResourceDashboard', function (): void {
|
||||
it('Opening dashboard for nonexistent controller node throws', async function (): Promise<void> {
|
||||
const controllerModel = new ControllerModel(treeDataProvider, { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] });
|
||||
const controllerModel = new ControllerModel(treeDataProvider, getDefaultControllerInfo());
|
||||
const openDashboardPromise = treeDataProvider.openResourceDashboard(controllerModel, ResourceType.sqlManagedInstances, '');
|
||||
await should(openDashboardPromise).be.rejected();
|
||||
});
|
||||
|
||||
it('Opening dashboard for nonexistent resource throws', async function (): Promise<void> {
|
||||
const controllerModel = new ControllerModel(treeDataProvider, { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] });
|
||||
const controllerModel = new ControllerModel(treeDataProvider, getDefaultControllerInfo());
|
||||
await treeDataProvider.addOrUpdateController(controllerModel, '');
|
||||
const openDashboardPromise = treeDataProvider.openResourceDashboard(controllerModel, ResourceType.sqlManagedInstances, '');
|
||||
await should(openDashboardPromise).be.rejected();
|
||||
});
|
||||
|
||||
it('Opening dashboard for existing resource node succeeds', async function (): Promise<void> {
|
||||
const controllerModel = new ControllerModel(treeDataProvider, { id: uuid(), url: '127.0.0.1', kubeConfigFilePath: '/path/to/.kube/config', kubeClusterContext: 'currentCluster', name: 'my-arc', username: 'sa', rememberPassword: true, resources: [] });
|
||||
const controllerModel = new ControllerModel(treeDataProvider, getDefaultControllerInfo());
|
||||
const miaaModel = new MiaaModel(controllerModel, { name: 'miaa-1', resourceType: ResourceType.sqlManagedInstances }, undefined!, treeDataProvider);
|
||||
await treeDataProvider.addOrUpdateController(controllerModel, '');
|
||||
const controllerNode = treeDataProvider.getControllerNode(controllerModel)!;
|
||||
|
||||
3
extensions/arc/src/typings/arc.d.ts
vendored
3
extensions/arc/src/typings/arc.d.ts
vendored
@@ -37,7 +37,8 @@ declare module 'arc' {
|
||||
id: string,
|
||||
kubeConfigFilePath: string,
|
||||
kubeClusterContext: string
|
||||
url: string,
|
||||
endpoint: string | undefined,
|
||||
namespace: string,
|
||||
name: string,
|
||||
username: string,
|
||||
rememberPassword: boolean,
|
||||
|
||||
@@ -17,6 +17,9 @@ export class RadioOptionsGroup {
|
||||
private _loadingBuilder: azdata.LoadingComponentBuilder;
|
||||
private _currentRadioOption!: azdata.RadioButtonComponent;
|
||||
|
||||
private _onRadioOptionChanged: vscode.EventEmitter<string | undefined> = new vscode.EventEmitter<string | undefined>();
|
||||
public onRadioOptionChanged: vscode.Event<string | undefined> = this._onRadioOptionChanged.event;
|
||||
|
||||
constructor(private _modelBuilder: azdata.ModelBuilder, private _onNewDisposableCreated: (disposable: vscode.Disposable) => void, private _groupName: string = `RadioOptionsGroup${RadioOptionsGroup.id++}`) {
|
||||
this._divContainer = this._modelBuilder.divContainer().withProperties<azdata.DivContainerProperties>({ clickable: false }).component();
|
||||
this._loadingBuilder = this._modelBuilder.loadingComponent().withItem(this._divContainer);
|
||||
@@ -26,7 +29,7 @@ export class RadioOptionsGroup {
|
||||
return this._loadingBuilder.component();
|
||||
}
|
||||
|
||||
async load(optionsInfoGetter: () => Promise<RadioOptionsInfo>): Promise<void> {
|
||||
async load(optionsInfoGetter: () => RadioOptionsInfo | Promise<RadioOptionsInfo>): Promise<void> {
|
||||
this.component().loading = true;
|
||||
this._divContainer.clearItems();
|
||||
try {
|
||||
@@ -51,6 +54,7 @@ export class RadioOptionsGroup {
|
||||
// it is just better to keep things clean.
|
||||
this._currentRadioOption.checked = false;
|
||||
this._currentRadioOption = radioOption;
|
||||
this._onRadioOptionChanged.fire(this.value);
|
||||
}
|
||||
}));
|
||||
this._divContainer.addItem(radioOption);
|
||||
|
||||
@@ -129,16 +129,12 @@ export class MiaaComputeAndStoragePage extends DashboardPage {
|
||||
cancellable: false
|
||||
},
|
||||
async (_progress, _token): Promise<void> => {
|
||||
let session: azdataExt.AzdataSession | undefined = undefined;
|
||||
try {
|
||||
session = await this._miaaModel.controllerModel.acquireAzdataSession();
|
||||
await this._azdataApi.azdata.arc.sql.mi.edit(
|
||||
this._miaaModel.info.name, this.saveArgs, this._miaaModel.controllerModel.azdataAdditionalEnvVars, session);
|
||||
this._miaaModel.info.name, this.saveArgs, this._miaaModel.controllerModel.azdataAdditionalEnvVars, this._miaaModel.controllerModel.controllerContext);
|
||||
} catch (err) {
|
||||
this.saveButton!.enabled = true;
|
||||
throw err;
|
||||
} finally {
|
||||
session?.dispose();
|
||||
}
|
||||
try {
|
||||
await this._miaaModel.refresh();
|
||||
|
||||
@@ -244,12 +244,7 @@ export class MiaaDashboardOverviewPage extends DashboardPage {
|
||||
cancellable: false
|
||||
},
|
||||
async (_progress, _token) => {
|
||||
const session = await this._controllerModel.acquireAzdataSession();
|
||||
try {
|
||||
return await this._azdataApi.azdata.arc.sql.mi.delete(this._miaaModel.info.name, this._controllerModel.azdataAdditionalEnvVars, session);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
return await this._azdataApi.azdata.arc.sql.mi.delete(this._miaaModel.info.name, this._controllerModel.azdataAdditionalEnvVars, this._controllerModel.controllerContext);
|
||||
}
|
||||
);
|
||||
await this._controllerModel.refreshTreeNode();
|
||||
|
||||
@@ -179,9 +179,7 @@ export class PostgresComputeAndStoragePage extends DashboardPage {
|
||||
cancellable: false
|
||||
},
|
||||
async (_progress, _token): Promise<void> => {
|
||||
let session: azdataExt.AzdataSession | undefined = undefined;
|
||||
try {
|
||||
session = await this._postgresModel.controllerModel.acquireAzdataSession();
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{
|
||||
@@ -191,10 +189,7 @@ export class PostgresComputeAndStoragePage extends DashboardPage {
|
||||
memoryRequest: this.saveArgs.workerMemoryRequest,
|
||||
memoryLimit: this.saveArgs.workerMemoryLimit
|
||||
},
|
||||
this._postgresModel.engineVersion,
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars,
|
||||
session
|
||||
);
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars);
|
||||
/* TODO add second edit call for coordinator configuration
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
@@ -204,7 +199,6 @@ export class PostgresComputeAndStoragePage extends DashboardPage {
|
||||
memoryRequest: this.saveArgs.coordinatorMemoryRequest,
|
||||
memoryLimit: this.saveArgs.coordinatorMemoryLimit
|
||||
},
|
||||
this._postgresModel.engineVersion,
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars,
|
||||
session
|
||||
);
|
||||
@@ -214,8 +208,6 @@ export class PostgresComputeAndStoragePage extends DashboardPage {
|
||||
// the edit wasn't successfully applied
|
||||
this.saveButton.enabled = true;
|
||||
throw err;
|
||||
} finally {
|
||||
session?.dispose();
|
||||
}
|
||||
try {
|
||||
await this._postgresModel.refresh();
|
||||
|
||||
@@ -39,8 +39,7 @@ export class PostgresCoordinatorNodeParametersPage extends PostgresParametersPag
|
||||
/* TODO add correct azdata call for editing coordinator parameters
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{ engineSettings: engineSettings },
|
||||
this._postgresModel.engineVersion,
|
||||
{ engineSettings: engineSettings.toString() },
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars,
|
||||
session);
|
||||
*/
|
||||
@@ -51,7 +50,6 @@ export class PostgresCoordinatorNodeParametersPage extends PostgresParametersPag
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{ engineSettings: `''`, replaceEngineSettings: true },
|
||||
this._postgresModel.engineVersion,
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars,
|
||||
session);
|
||||
*/
|
||||
@@ -62,7 +60,6 @@ export class PostgresCoordinatorNodeParametersPage extends PostgresParametersPag
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{ engineSettings: parameterName + '=' },
|
||||
this._postgresModel.engineVersion,
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars,
|
||||
session);
|
||||
*/
|
||||
|
||||
@@ -39,7 +39,7 @@ export class PostgresDashboard extends Dashboard {
|
||||
// TODO Add dashboard once backend is able to be connected for per role server parameter edits.
|
||||
// const coordinatorNodeParametersPage = new PostgresCoordinatorNodeParametersPage(modelView, this._postgresModel);
|
||||
const workerNodeParametersPage = new PostgresWorkerNodeParametersPage(modelView, this.dashboard, this._postgresModel);
|
||||
const diagnoseAndSolveProblemsPage = new PostgresDiagnoseAndSolveProblemsPage(modelView, this.dashboard, this._context, this._postgresModel);
|
||||
const diagnoseAndSolveProblemsPage = new PostgresDiagnoseAndSolveProblemsPage(modelView, this.dashboard, this._context, this._controllerModel, this._postgresModel);
|
||||
const supportRequestPage = new PostgresSupportRequestPage(modelView, this.dashboard, this._controllerModel, this._postgresModel);
|
||||
const resourceHealthPage = new PostgresResourceHealthPage(modelView, this.dashboard, this._postgresModel);
|
||||
|
||||
|
||||
@@ -9,9 +9,10 @@ import * as loc from '../../../localizedConstants';
|
||||
import { IconPathHelper, cssStyles } from '../../../constants';
|
||||
import { DashboardPage } from '../../components/dashboardPage';
|
||||
import { PostgresModel } from '../../../models/postgresModel';
|
||||
import { ControllerModel } from '../../../models/controllerModel';
|
||||
|
||||
export class PostgresDiagnoseAndSolveProblemsPage extends DashboardPage {
|
||||
constructor(protected modelView: azdata.ModelView, dashboard: azdata.window.ModelViewDashboard, private _context: vscode.ExtensionContext, private _postgresModel: PostgresModel) {
|
||||
constructor(modelView: azdata.ModelView, dashboard: azdata.window.ModelViewDashboard, private _context: vscode.ExtensionContext, private _controllerModel: ControllerModel, private _postgresModel: PostgresModel) {
|
||||
super(modelView, dashboard);
|
||||
}
|
||||
|
||||
@@ -50,9 +51,8 @@ export class PostgresDiagnoseAndSolveProblemsPage extends DashboardPage {
|
||||
|
||||
this.disposables.push(
|
||||
troubleshootButton.onDidClick(() => {
|
||||
process.env['POSTGRES_SERVER_NAMESPACE'] = this._postgresModel.config?.metadata.namespace;
|
||||
process.env['POSTGRES_SERVER_NAMESPACE'] = this._controllerModel.controllerConfig?.metadata.namespace ?? '';
|
||||
process.env['POSTGRES_SERVER_NAME'] = this._postgresModel.info.name;
|
||||
process.env['POSTGRES_SERVER_VERSION'] = this._postgresModel.engineVersion;
|
||||
vscode.commands.executeCommand('bookTreeView.openBook', this._context.asAbsolutePath('notebooks/arcDataServices'), true, 'postgres/tsg100-troubleshoot-postgres');
|
||||
}));
|
||||
|
||||
|
||||
@@ -217,21 +217,13 @@ export class PostgresOverviewPage extends DashboardPage {
|
||||
try {
|
||||
const password = await promptAndConfirmPassword(input => !input ? loc.enterANonEmptyPassword : '');
|
||||
if (password) {
|
||||
const session = await this._postgresModel.controllerModel.acquireAzdataSession();
|
||||
try {
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{
|
||||
adminPassword: true,
|
||||
noWait: true
|
||||
},
|
||||
this._postgresModel.engineVersion,
|
||||
Object.assign({ 'AZDATA_PASSWORD': password }, this._controllerModel.azdataAdditionalEnvVars),
|
||||
session
|
||||
);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{
|
||||
adminPassword: true,
|
||||
noWait: true
|
||||
},
|
||||
Object.assign({ 'AZDATA_PASSWORD': password }, this._controllerModel.azdataAdditionalEnvVars));
|
||||
vscode.window.showInformationMessage(loc.passwordReset);
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -259,13 +251,7 @@ export class PostgresOverviewPage extends DashboardPage {
|
||||
cancellable: false
|
||||
},
|
||||
async (_progress, _token) => {
|
||||
const session = await this._postgresModel.controllerModel.acquireAzdataSession();
|
||||
try {
|
||||
return await this._azdataApi.azdata.arc.postgres.server.delete(this._postgresModel.info.name, this._controllerModel.azdataAdditionalEnvVars, session);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
|
||||
return await this._azdataApi.azdata.arc.postgres.server.delete(this._postgresModel.info.name, this._controllerModel.azdataAdditionalEnvVars, this._controllerModel.controllerContext);
|
||||
}
|
||||
);
|
||||
await this._controllerModel.refreshTreeNode();
|
||||
|
||||
@@ -152,12 +152,7 @@ export abstract class PostgresParametersPage extends DashboardPage {
|
||||
this.parameterUpdates.forEach((value, key) => {
|
||||
engineSettings.push(`${key}="${value}"`);
|
||||
});
|
||||
const session = await this._postgresModel.controllerModel.acquireAzdataSession();
|
||||
try {
|
||||
await this.saveParameterEdits(engineSettings.toString(), session);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
await this.saveParameterEdits(engineSettings.toString());
|
||||
} catch (err) {
|
||||
// If an error occurs while editing the instance then re-enable the save button since
|
||||
// the edit wasn't successfully applied
|
||||
@@ -230,12 +225,7 @@ export abstract class PostgresParametersPage extends DashboardPage {
|
||||
},
|
||||
async (_progress, _token): Promise<void> => {
|
||||
try {
|
||||
const session = await this._postgresModel.controllerModel.acquireAzdataSession();
|
||||
try {
|
||||
await this.resetAllParameters(session);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
await this.resetAllParameters();
|
||||
} catch (err) {
|
||||
// If an error occurs while resetting the instance then re-enable the reset button since
|
||||
// the edit wasn't successfully applied
|
||||
@@ -423,12 +413,7 @@ export abstract class PostgresParametersPage extends DashboardPage {
|
||||
cancellable: false
|
||||
},
|
||||
async (_progress, _token): Promise<void> => {
|
||||
const session = await this._postgresModel.controllerModel.acquireAzdataSession();
|
||||
try {
|
||||
await this.resetParameter(engineSetting.parameterName!, session);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
await this.resetParameter(engineSetting.parameterName!);
|
||||
try {
|
||||
await this._postgresModel.refresh();
|
||||
} catch (error) {
|
||||
@@ -633,9 +618,9 @@ export abstract class PostgresParametersPage extends DashboardPage {
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract saveParameterEdits(engineSettings: string, session: azdataExt.AzdataSession): Promise<void>;
|
||||
protected abstract saveParameterEdits(engineSettings: string): Promise<void>;
|
||||
|
||||
protected abstract resetAllParameters(session: azdataExt.AzdataSession): Promise<void>;
|
||||
protected abstract resetAllParameters(): Promise<void>;
|
||||
|
||||
protected abstract resetParameter(parameterName: string, session: azdataExt.AzdataSession): Promise<void>;
|
||||
protected abstract resetParameter(parameterName: string): Promise<void>;
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
*--------------------------------------------------------------------------------------------*/
|
||||
|
||||
import * as azdata from 'azdata';
|
||||
import * as azdataExt from 'azdata-ext';
|
||||
import * as loc from '../../../localizedConstants';
|
||||
import { IconPathHelper } from '../../../constants';
|
||||
import { PostgresParametersPage } from './postgresParameters';
|
||||
@@ -35,34 +34,32 @@ export class PostgresWorkerNodeParametersPage extends PostgresParametersPage {
|
||||
return loc.nodeParametersDescription;
|
||||
}
|
||||
|
||||
|
||||
protected get engineSettings(): EngineSettingsModel[] {
|
||||
return this._postgresModel.workerNodesEngineSettings;
|
||||
}
|
||||
|
||||
protected async saveParameterEdits(engineSettings: string, session: azdataExt.AzdataSession): Promise<void> {
|
||||
protected async saveParameterEdits(engineSettings: string): Promise<void> {
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{ engineSettings: engineSettings },
|
||||
this._postgresModel.engineVersion,
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars,
|
||||
session);
|
||||
this._postgresModel.controllerModel.controllerContext);
|
||||
}
|
||||
|
||||
protected async resetAllParameters(session: azdataExt.AzdataSession): Promise<void> {
|
||||
protected async resetAllParameters(): Promise<void> {
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{ engineSettings: `''`, replaceEngineSettings: true },
|
||||
this._postgresModel.engineVersion,
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars,
|
||||
session);
|
||||
this._postgresModel.controllerModel.controllerContext);
|
||||
}
|
||||
|
||||
protected async resetParameter(parameterName: string, session: azdataExt.AzdataSession): Promise<void> {
|
||||
protected async resetParameter(parameterName: string): Promise<void> {
|
||||
await this._azdataApi.azdata.arc.postgres.server.edit(
|
||||
this._postgresModel.info.name,
|
||||
{ engineSettings: parameterName + '=' },
|
||||
this._postgresModel.engineVersion,
|
||||
this._postgresModel.controllerModel.azdataAdditionalEnvVars,
|
||||
session);
|
||||
this._postgresModel.controllerModel.controllerContext);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import { InitializingComponent } from '../components/initializingComponent';
|
||||
import { AzureArcTreeDataProvider } from '../tree/azureArcTreeDataProvider';
|
||||
import { getErrorMessage } from '../../common/utils';
|
||||
import { RadioOptionsGroup } from '../components/radioOptionsGroup';
|
||||
import { getCurrentClusterContext, getDefaultKubeConfigPath, getKubeConfigClusterContexts } from '../../common/kubeUtils';
|
||||
import { getCurrentClusterContext, getDefaultKubeConfigPath, getKubeConfigClusterContexts, KubeClusterContext } from '../../common/kubeUtils';
|
||||
import { FilePicker } from '../components/filePicker';
|
||||
|
||||
export type ConnectToControllerDialogModel = { controllerModel: ControllerModel, password: string };
|
||||
@@ -25,24 +25,34 @@ abstract class ControllerDialogBase extends InitializingComponent {
|
||||
protected modelBuilder!: azdata.ModelBuilder;
|
||||
protected dialog: azdata.window.Dialog;
|
||||
|
||||
protected urlInputBox!: azdata.InputBoxComponent;
|
||||
protected namespaceInputBox!: azdata.InputBoxComponent;
|
||||
protected kubeConfigInputBox!: FilePicker;
|
||||
protected clusterContextRadioGroup!: RadioOptionsGroup;
|
||||
protected nameInputBox!: azdata.InputBoxComponent;
|
||||
protected usernameInputBox!: azdata.InputBoxComponent;
|
||||
protected passwordInputBox!: azdata.InputBoxComponent;
|
||||
protected urlInputBox!: azdata.InputBoxComponent;
|
||||
|
||||
private _kubeClusters: KubeClusterContext[] = [];
|
||||
|
||||
protected dispose(): void {
|
||||
this._toDispose.forEach(disposable => disposable.dispose());
|
||||
this._toDispose.length = 0; // clear the _toDispose array
|
||||
this._toDispose.length = 0;
|
||||
}
|
||||
|
||||
protected getComponents(): (azdata.FormComponent<azdata.Component> & { layout?: azdata.FormItemLayout | undefined; })[] {
|
||||
return [
|
||||
{
|
||||
component: this.namespaceInputBox,
|
||||
title: loc.namespace,
|
||||
required: true
|
||||
},
|
||||
{
|
||||
component: this.urlInputBox,
|
||||
title: loc.controllerUrl,
|
||||
required: true
|
||||
layout: {
|
||||
info: loc.controllerUrlDescription
|
||||
}
|
||||
}, {
|
||||
component: this.kubeConfigInputBox.component(),
|
||||
title: loc.controllerKubeConfig,
|
||||
@@ -54,14 +64,17 @@ abstract class ControllerDialogBase extends InitializingComponent {
|
||||
}, {
|
||||
component: this.nameInputBox,
|
||||
title: loc.controllerName,
|
||||
required: false
|
||||
required: false,
|
||||
layout: {
|
||||
info: loc.controllerNameDescription
|
||||
}
|
||||
}, {
|
||||
component: this.usernameInputBox,
|
||||
title: loc.username,
|
||||
title: loc.controllerUsername,
|
||||
required: true
|
||||
}, {
|
||||
component: this.passwordInputBox,
|
||||
title: loc.password,
|
||||
title: loc.controllerPassword,
|
||||
required: true
|
||||
}
|
||||
];
|
||||
@@ -71,11 +84,14 @@ abstract class ControllerDialogBase extends InitializingComponent {
|
||||
protected readonlyFields(): azdata.Component[] { return []; }
|
||||
|
||||
protected initializeFields(controllerInfo: ControllerInfo | undefined, password: string | undefined) {
|
||||
this.namespaceInputBox = this.modelBuilder.inputBox()
|
||||
.withProps({
|
||||
value: controllerInfo?.namespace,
|
||||
}).component();
|
||||
this.urlInputBox = this.modelBuilder.inputBox()
|
||||
.withProperties<azdata.InputBoxProperties>({
|
||||
value: controllerInfo?.url,
|
||||
// If we have a model then we're editing an existing connection so don't let them modify the URL
|
||||
readOnly: !!controllerInfo
|
||||
.withProps({
|
||||
value: controllerInfo?.endpoint,
|
||||
placeHolder: loc.controllerUrlPlaceholder,
|
||||
}).component();
|
||||
this.kubeConfigInputBox = new FilePicker(
|
||||
this.modelBuilder,
|
||||
@@ -83,22 +99,23 @@ abstract class ControllerDialogBase extends InitializingComponent {
|
||||
(disposable) => this._toDispose.push(disposable)
|
||||
);
|
||||
this.modelBuilder.inputBox()
|
||||
.withProperties<azdata.InputBoxProperties>({
|
||||
.withProps({
|
||||
value: controllerInfo?.kubeConfigFilePath || getDefaultKubeConfigPath()
|
||||
}).component();
|
||||
this.clusterContextRadioGroup = new RadioOptionsGroup(this.modelBuilder, (disposable) => this._toDispose.push(disposable));
|
||||
this.loadRadioGroup(controllerInfo?.kubeClusterContext);
|
||||
this._toDispose.push(this.clusterContextRadioGroup.onRadioOptionChanged(newContext => this.updateNamespace(newContext)));
|
||||
this._toDispose.push(this.kubeConfigInputBox.onTextChanged(() => this.loadRadioGroup(controllerInfo?.kubeClusterContext)));
|
||||
this.nameInputBox = this.modelBuilder.inputBox()
|
||||
.withProperties<azdata.InputBoxProperties>({
|
||||
.withProps({
|
||||
value: controllerInfo?.name
|
||||
}).component();
|
||||
this.usernameInputBox = this.modelBuilder.inputBox()
|
||||
.withProperties<azdata.InputBoxProperties>({
|
||||
.withProps({
|
||||
value: controllerInfo?.username
|
||||
}).component();
|
||||
this.passwordInputBox = this.modelBuilder.inputBox()
|
||||
.withProperties<azdata.InputBoxProperties>({
|
||||
.withProps({
|
||||
inputType: 'password',
|
||||
value: password
|
||||
}).component();
|
||||
@@ -114,15 +131,22 @@ abstract class ControllerDialogBase extends InitializingComponent {
|
||||
}
|
||||
|
||||
private loadRadioGroup(previousClusterContext?: string): void {
|
||||
this.clusterContextRadioGroup.load(async () => {
|
||||
const clusters = await getKubeConfigClusterContexts(this.kubeConfigInputBox.value!);
|
||||
this.clusterContextRadioGroup.load(() => {
|
||||
this._kubeClusters = getKubeConfigClusterContexts(this.kubeConfigInputBox.value!);
|
||||
const currentClusterContext = getCurrentClusterContext(this._kubeClusters, previousClusterContext, false);
|
||||
this.namespaceInputBox.value = currentClusterContext.namespace || this.namespaceInputBox.value;
|
||||
return {
|
||||
values: clusters.map(c => c.name),
|
||||
defaultValue: getCurrentClusterContext(clusters, previousClusterContext, false),
|
||||
values: this._kubeClusters.map(c => c.name),
|
||||
defaultValue: currentClusterContext.name
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
private updateNamespace(currentContextName: string | undefined): void {
|
||||
const currentContext = this._kubeClusters.find(cluster => cluster.name === currentContextName);
|
||||
this.namespaceInputBox.value = currentContext?.namespace;
|
||||
}
|
||||
|
||||
public showDialog(controllerInfo?: ControllerInfo, password: string | undefined = undefined): azdata.window.Dialog {
|
||||
this.id = controllerInfo?.id ?? uuid();
|
||||
this.resources = controllerInfo?.resources ?? [];
|
||||
@@ -168,7 +192,8 @@ abstract class ControllerDialogBase extends InitializingComponent {
|
||||
protected getControllerInfo(url: string, rememberPassword: boolean = false): ControllerInfo {
|
||||
return {
|
||||
id: this.id,
|
||||
url: url,
|
||||
endpoint: url || undefined,
|
||||
namespace: this.namespaceInputBox.value!,
|
||||
kubeConfigFilePath: this.kubeConfigInputBox.value!,
|
||||
kubeClusterContext: this.clusterContextRadioGroup.value!,
|
||||
name: this.nameInputBox.value ?? '',
|
||||
@@ -183,7 +208,7 @@ export class ConnectToControllerDialog extends ControllerDialogBase {
|
||||
protected rememberPwCheckBox!: azdata.CheckBoxComponent;
|
||||
|
||||
protected fieldToFocusOn() {
|
||||
return this.urlInputBox;
|
||||
return this.namespaceInputBox;
|
||||
}
|
||||
|
||||
protected getComponents() {
|
||||
@@ -209,22 +234,25 @@ export class ConnectToControllerDialog extends ControllerDialogBase {
|
||||
}
|
||||
|
||||
public async validate(): Promise<boolean> {
|
||||
if (!this.urlInputBox.value || !this.usernameInputBox.value || !this.passwordInputBox.value) {
|
||||
if (!this.namespaceInputBox.value || !this.usernameInputBox.value || !this.passwordInputBox.value) {
|
||||
return false;
|
||||
}
|
||||
let url = this.urlInputBox.value;
|
||||
// Only support https connections
|
||||
if (url.toLowerCase().startsWith('http://')) {
|
||||
url = url.replace('http', 'https');
|
||||
}
|
||||
// Append https if they didn't type it in
|
||||
if (!url.toLowerCase().startsWith('https://')) {
|
||||
url = `https://${url}`;
|
||||
}
|
||||
// Append default port if one wasn't specified
|
||||
if (!/.*:\d*$/.test(url)) {
|
||||
url = `${url}:30080`;
|
||||
let url = this.urlInputBox.value || '';
|
||||
if (url) {
|
||||
// Only support https connections
|
||||
if (url.toLowerCase().startsWith('http://')) {
|
||||
url = url.replace('http', 'https');
|
||||
}
|
||||
// Append https if they didn't type it in
|
||||
if (!url.toLowerCase().startsWith('https://')) {
|
||||
url = `https://${url}`;
|
||||
}
|
||||
// Append default port if one wasn't specified
|
||||
if (!/.*:\d*$/.test(url)) {
|
||||
url = `${url}:30080`;
|
||||
}
|
||||
}
|
||||
|
||||
const controllerInfo: ControllerInfo = this.getControllerInfo(url, !!this.rememberPwCheckBox.checked);
|
||||
const controllerModel = new ControllerModel(this.treeDataProvider, controllerInfo, this.passwordInputBox.value);
|
||||
try {
|
||||
@@ -234,7 +262,7 @@ export class ConnectToControllerDialog extends ControllerDialogBase {
|
||||
controllerModel.info.name = controllerModel.info.name || controllerModel.controllerConfig?.metadata.name || loc.defaultControllerName;
|
||||
} catch (err) {
|
||||
this.dialog.message = {
|
||||
text: loc.connectToControllerFailed(this.urlInputBox.value, err),
|
||||
text: loc.connectToControllerFailed(this.namespaceInputBox.value, err),
|
||||
level: azdata.window.MessageLevel.Error
|
||||
};
|
||||
return false;
|
||||
@@ -267,11 +295,16 @@ export class PasswordToControllerDialog extends ControllerDialogBase {
|
||||
if (!this.passwordInputBox.value) {
|
||||
return false;
|
||||
}
|
||||
const controllerInfo: ControllerInfo = this.getControllerInfo(this.urlInputBox.value!, false);
|
||||
const controllerModel = new ControllerModel(this.treeDataProvider, controllerInfo, this.passwordInputBox.value);
|
||||
const azdataApi = <azdataExt.IExtension>vscode.extensions.getExtension(azdataExt.extension.name)?.exports;
|
||||
try {
|
||||
await azdataApi.azdata.login(
|
||||
this.urlInputBox.value!,
|
||||
this.usernameInputBox.value!,
|
||||
{
|
||||
endpoint: controllerInfo.endpoint,
|
||||
namespace: controllerInfo.namespace
|
||||
},
|
||||
controllerInfo.username,
|
||||
this.passwordInputBox.value,
|
||||
{
|
||||
'KUBECONFIG': this.kubeConfigInputBox.value!,
|
||||
@@ -293,8 +326,6 @@ export class PasswordToControllerDialog extends ControllerDialogBase {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
const controllerInfo: ControllerInfo = this.getControllerInfo(this.urlInputBox.value!, false);
|
||||
const controllerModel = new ControllerModel(this.treeDataProvider, controllerInfo, this.passwordInputBox.value);
|
||||
this.completionPromise.resolve({ controllerModel: controllerModel, password: this.passwordInputBox.value });
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import { ControllerModel } from '../../models/controllerModel';
|
||||
import { ControllerTreeNode } from './controllerTreeNode';
|
||||
import { TreeNode } from './treeNode';
|
||||
|
||||
const mementoToken = 'arcDataControllers';
|
||||
const mementoToken = 'arcDataControllers.v2';
|
||||
|
||||
/**
|
||||
* The TreeDataProvider for the Azure Arc view, which displays a list of registered
|
||||
|
||||
@@ -44,7 +44,7 @@ export class ControllerTreeNode extends TreeNode {
|
||||
} catch (err) {
|
||||
vscode.window.showErrorMessage(loc.errorConnectingToController(err));
|
||||
try {
|
||||
await this.model.refresh(false, true);
|
||||
await this.model.refresh(false);
|
||||
this.updateChildren(this.model.registrations);
|
||||
} catch (err) {
|
||||
if (!(err instanceof UserCancelledError)) {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "azdata",
|
||||
"displayName": "%azdata.displayName%",
|
||||
"description": "%azdata.description%",
|
||||
"version": "0.6.0",
|
||||
"version": "0.6.2",
|
||||
"publisher": "Microsoft",
|
||||
"preview": true,
|
||||
"license": "https://raw.githubusercontent.com/Microsoft/azuredatastudio/main/LICENSE.txt",
|
||||
|
||||
@@ -55,47 +55,47 @@ export function getAzdataApi(localAzdataDiscovered: Promise<IAzdataTool | undefi
|
||||
profileName?: string,
|
||||
storageClass?: string,
|
||||
additionalEnvVars?: azdataExt.AdditionalEnvVars,
|
||||
session?: azdataExt.AzdataSession) => {
|
||||
azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.dc.create(namespace, name, connectivityMode, resourceGroup, location, subscription, profileName, storageClass, additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.dc.create(namespace, name, connectivityMode, resourceGroup, location, subscription, profileName, storageClass, additionalEnvVars, azdataContext);
|
||||
},
|
||||
endpoint: {
|
||||
list: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
list: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.dc.endpoint.list(additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.dc.endpoint.list(additionalEnvVars, azdataContext);
|
||||
}
|
||||
},
|
||||
config: {
|
||||
list: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
list: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.dc.config.list(additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.dc.config.list(additionalEnvVars, azdataContext);
|
||||
},
|
||||
show: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
show: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.dc.config.show(additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.dc.config.show(additionalEnvVars, azdataContext);
|
||||
}
|
||||
}
|
||||
},
|
||||
postgres: {
|
||||
server: {
|
||||
delete: async (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
delete: async (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.postgres.server.delete(name, additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.postgres.server.delete(name, additionalEnvVars, azdataContext);
|
||||
},
|
||||
list: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
list: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.postgres.server.list(additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.postgres.server.list(additionalEnvVars, azdataContext);
|
||||
},
|
||||
show: async (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
show: async (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.postgres.server.show(name, additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.postgres.server.show(name, additionalEnvVars, azdataContext);
|
||||
},
|
||||
edit: async (
|
||||
name: string,
|
||||
@@ -112,31 +112,30 @@ export function getAzdataApi(localAzdataDiscovered: Promise<IAzdataTool | undefi
|
||||
replaceEngineSettings?: boolean;
|
||||
workers?: number;
|
||||
},
|
||||
engineVersion?: string,
|
||||
additionalEnvVars?: azdataExt.AdditionalEnvVars,
|
||||
session?: azdataExt.AzdataSession) => {
|
||||
azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.postgres.server.edit(name, args, engineVersion, additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.postgres.server.edit(name, args, additionalEnvVars, azdataContext);
|
||||
}
|
||||
}
|
||||
},
|
||||
sql: {
|
||||
mi: {
|
||||
delete: async (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
delete: async (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.sql.mi.delete(name, additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.sql.mi.delete(name, additionalEnvVars, azdataContext);
|
||||
},
|
||||
list: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
list: async (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.sql.mi.list(additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.sql.mi.list(additionalEnvVars, azdataContext);
|
||||
},
|
||||
show: async (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession) => {
|
||||
show: async (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.sql.mi.show(name, additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.sql.mi.show(name, additionalEnvVars, azdataContext);
|
||||
},
|
||||
edit: async (
|
||||
name: string,
|
||||
@@ -148,11 +147,11 @@ export function getAzdataApi(localAzdataDiscovered: Promise<IAzdataTool | undefi
|
||||
noWait?: boolean;
|
||||
},
|
||||
additionalEnvVars?: azdataExt.AdditionalEnvVars,
|
||||
session?: azdataExt.AzdataSession
|
||||
azdataContext?: string
|
||||
) => {
|
||||
await localAzdataDiscovered;
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.arc.sql.mi.edit(name, args, additionalEnvVars, session);
|
||||
return azdataToolService.localAzdata.arc.sql.mi.edit(name, args, additionalEnvVars, azdataContext);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -162,13 +161,9 @@ export function getAzdataApi(localAzdataDiscovered: Promise<IAzdataTool | undefi
|
||||
throwIfNoAzdata(azdataToolService.localAzdata);
|
||||
return azdataToolService.localAzdata.getPath();
|
||||
},
|
||||
login: async (endpoint: string, username: string, password: string, additionalEnvVars?: azdataExt.AdditionalEnvVars) => {
|
||||
login: async (endpointOrNamespace: azdataExt.EndpointOrNamespace, username: string, password: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string) => {
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata.login(endpoint, username, password, additionalEnvVars);
|
||||
},
|
||||
acquireSession: async (endpoint: string, username: string, password: string, additionEnvVars?: azdataExt.AdditionalEnvVars) => {
|
||||
throwIfNoAzdataOrEulaNotAccepted(azdataToolService.localAzdata, isEulaAccepted(memento));
|
||||
return azdataToolService.localAzdata?.acquireSession(endpoint, username, password, additionEnvVars);
|
||||
return azdataToolService.localAzdata.login(endpointOrNamespace, username, password, additionalEnvVars, azdataContext);
|
||||
},
|
||||
getSemVersion: async () => {
|
||||
await localAzdataDiscovered;
|
||||
|
||||
@@ -13,7 +13,6 @@ import { getPlatformDownloadLink, getPlatformReleaseVersion } from './azdataRele
|
||||
import { executeCommand, executeSudoCommand, ExitCodeError, ProcessOutput } from './common/childProcess';
|
||||
import { HttpClient } from './common/httpClient';
|
||||
import Logger from './common/logger';
|
||||
import { Deferred } from './common/promise';
|
||||
import { getErrorMessage, NoAzdataError, searchForCmd } from './common/utils';
|
||||
import { azdataAcceptEulaKey, azdataConfigSection, azdataFound, azdataInstallKey, azdataUpdateKey, debugConfigKey, eulaAccepted, eulaUrl, microsoftPrivacyStatementUrl } from './constants';
|
||||
import * as loc from './localizedConstants';
|
||||
@@ -32,20 +31,7 @@ export interface IAzdataTool extends azdataExt.IAzdataApi {
|
||||
* @param args The args to pass to azdata
|
||||
* @param parseResult A function used to parse out the raw result into the desired shape
|
||||
*/
|
||||
executeCommand<R>(args: string[], additionalEnvVars?: azdataExt.AdditionalEnvVars): Promise<azdataExt.AzdataOutput<R>>
|
||||
}
|
||||
|
||||
class AzdataSession implements azdataExt.AzdataSession {
|
||||
|
||||
private _session = new Deferred<void>();
|
||||
|
||||
public sessionEnded(): Promise<void> {
|
||||
return this._session.promise;
|
||||
}
|
||||
|
||||
public dispose(): void {
|
||||
this._session.resolve();
|
||||
}
|
||||
executeCommand<R>(args: string[], additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<R>>
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -54,9 +40,6 @@ class AzdataSession implements azdataExt.AzdataSession {
|
||||
export class AzdataTool implements azdataExt.IAzdataApi {
|
||||
|
||||
private _semVersion: SemVer;
|
||||
private _currentSession: azdataExt.AzdataSession | undefined = undefined;
|
||||
private _currentlyExecutingCommands: Deferred<void>[] = [];
|
||||
private _queuedCommands: { deferred: Deferred<void>, session?: azdataExt.AzdataSession }[] = [];
|
||||
|
||||
constructor(private _path: string, version: string) {
|
||||
this._semVersion = new SemVer(version);
|
||||
@@ -90,7 +73,7 @@ export class AzdataTool implements azdataExt.IAzdataApi {
|
||||
profileName?: string,
|
||||
storageClass?: string,
|
||||
additionalEnvVars?: azdataExt.AdditionalEnvVars,
|
||||
session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
azdataContext?: string): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
const args = ['arc', 'dc', 'create',
|
||||
'--namespace', namespace,
|
||||
'--name', name,
|
||||
@@ -104,32 +87,32 @@ export class AzdataTool implements azdataExt.IAzdataApi {
|
||||
if (storageClass) {
|
||||
args.push('--storage-class', storageClass);
|
||||
}
|
||||
return this.executeCommand<void>(args, additionalEnvVars, session);
|
||||
return this.executeCommand<void>(args, additionalEnvVars, azdataContext);
|
||||
},
|
||||
endpoint: {
|
||||
list: (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<azdataExt.DcEndpointListResult[]>> => {
|
||||
return this.executeCommand<azdataExt.DcEndpointListResult[]>(['arc', 'dc', 'endpoint', 'list'], additionalEnvVars, session);
|
||||
list: (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<azdataExt.DcEndpointListResult[]>> => {
|
||||
return this.executeCommand<azdataExt.DcEndpointListResult[]>(['arc', 'dc', 'endpoint', 'list'], additionalEnvVars, azdataContext);
|
||||
}
|
||||
},
|
||||
config: {
|
||||
list: (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<azdataExt.DcConfigListResult[]>> => {
|
||||
return this.executeCommand<azdataExt.DcConfigListResult[]>(['arc', 'dc', 'config', 'list'], additionalEnvVars, session);
|
||||
list: (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<azdataExt.DcConfigListResult[]>> => {
|
||||
return this.executeCommand<azdataExt.DcConfigListResult[]>(['arc', 'dc', 'config', 'list'], additionalEnvVars, azdataContext);
|
||||
},
|
||||
show: (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<azdataExt.DcConfigShowResult>> => {
|
||||
return this.executeCommand<azdataExt.DcConfigShowResult>(['arc', 'dc', 'config', 'show'], additionalEnvVars, session);
|
||||
show: (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<azdataExt.DcConfigShowResult>> => {
|
||||
return this.executeCommand<azdataExt.DcConfigShowResult>(['arc', 'dc', 'config', 'show'], additionalEnvVars, azdataContext);
|
||||
}
|
||||
}
|
||||
},
|
||||
postgres: {
|
||||
server: {
|
||||
delete: (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
return this.executeCommand<void>(['arc', 'postgres', 'server', 'delete', '-n', name, '--force'], additionalEnvVars, session);
|
||||
delete: (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
return this.executeCommand<void>(['arc', 'postgres', 'server', 'delete', '-n', name, '--force'], additionalEnvVars, azdataContext);
|
||||
},
|
||||
list: (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<azdataExt.PostgresServerListResult[]>> => {
|
||||
return this.executeCommand<azdataExt.PostgresServerListResult[]>(['arc', 'postgres', 'server', 'list'], additionalEnvVars, session);
|
||||
list: (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<azdataExt.PostgresServerListResult[]>> => {
|
||||
return this.executeCommand<azdataExt.PostgresServerListResult[]>(['arc', 'postgres', 'server', 'list'], additionalEnvVars, azdataContext);
|
||||
},
|
||||
show: (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<azdataExt.PostgresServerShowResult>> => {
|
||||
return this.executeCommand<azdataExt.PostgresServerShowResult>(['arc', 'postgres', 'server', 'show', '-n', name], additionalEnvVars, session);
|
||||
show: (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<azdataExt.PostgresServerShowResult>> => {
|
||||
return this.executeCommand<azdataExt.PostgresServerShowResult>(['arc', 'postgres', 'server', 'show', '-n', name], additionalEnvVars, azdataContext);
|
||||
},
|
||||
edit: (
|
||||
name: string,
|
||||
@@ -146,9 +129,8 @@ export class AzdataTool implements azdataExt.IAzdataApi {
|
||||
replaceEngineSettings?: boolean,
|
||||
workers?: number
|
||||
},
|
||||
engineVersion?: string,
|
||||
additionalEnvVars?: azdataExt.AdditionalEnvVars,
|
||||
session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
azdataContext?: string): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
const argsArray = ['arc', 'postgres', 'server', 'edit', '-n', name];
|
||||
if (args.adminPassword) { argsArray.push('--admin-password'); }
|
||||
if (args.coresLimit) { argsArray.push('--cores-limit', args.coresLimit); }
|
||||
@@ -161,21 +143,20 @@ export class AzdataTool implements azdataExt.IAzdataApi {
|
||||
if (args.port) { argsArray.push('--port', args.port.toString()); }
|
||||
if (args.replaceEngineSettings) { argsArray.push('--replace-engine-settings'); }
|
||||
if (args.workers) { argsArray.push('--workers', args.workers.toString()); }
|
||||
if (engineVersion) { argsArray.push('--engine-version', engineVersion); }
|
||||
return this.executeCommand<void>(argsArray, additionalEnvVars, session);
|
||||
return this.executeCommand<void>(argsArray, additionalEnvVars, azdataContext);
|
||||
}
|
||||
}
|
||||
},
|
||||
sql: {
|
||||
mi: {
|
||||
delete: (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
return this.executeCommand<void>(['arc', 'sql', 'mi', 'delete', '-n', name], additionalEnvVars, session);
|
||||
delete: (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
return this.executeCommand<void>(['arc', 'sql', 'mi', 'delete', '-n', name], additionalEnvVars, azdataContext);
|
||||
},
|
||||
list: (additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<azdataExt.SqlMiListResult[]>> => {
|
||||
return this.executeCommand<azdataExt.SqlMiListResult[]>(['arc', 'sql', 'mi', 'list'], additionalEnvVars, session);
|
||||
list: (additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<azdataExt.SqlMiListResult[]>> => {
|
||||
return this.executeCommand<azdataExt.SqlMiListResult[]>(['arc', 'sql', 'mi', 'list'], additionalEnvVars, azdataContext);
|
||||
},
|
||||
show: (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<azdataExt.SqlMiShowResult>> => {
|
||||
return this.executeCommand<azdataExt.SqlMiShowResult>(['arc', 'sql', 'mi', 'show', '-n', name], additionalEnvVars, session);
|
||||
show: (name: string, additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<azdataExt.SqlMiShowResult>> => {
|
||||
return this.executeCommand<azdataExt.SqlMiShowResult>(['arc', 'sql', 'mi', 'show', '-n', name], additionalEnvVars, azdataContext);
|
||||
},
|
||||
edit: (
|
||||
name: string,
|
||||
@@ -186,8 +167,7 @@ export class AzdataTool implements azdataExt.IAzdataApi {
|
||||
memoryRequest?: string,
|
||||
noWait?: boolean,
|
||||
},
|
||||
additionalEnvVars?: azdataExt.AdditionalEnvVars,
|
||||
session?: azdataExt.AzdataSession
|
||||
additionalEnvVars?: azdataExt.AdditionalEnvVars
|
||||
): Promise<azdataExt.AzdataOutput<void>> => {
|
||||
const argsArray = ['arc', 'sql', 'mi', 'edit', '-n', name];
|
||||
if (args.coresLimit) { argsArray.push('--cores-limit', args.coresLimit); }
|
||||
@@ -195,59 +175,22 @@ export class AzdataTool implements azdataExt.IAzdataApi {
|
||||
if (args.memoryLimit) { argsArray.push('--memory-limit', args.memoryLimit); }
|
||||
if (args.memoryRequest) { argsArray.push('--memory-request', args.memoryRequest); }
|
||||
if (args.noWait) { argsArray.push('--no-wait'); }
|
||||
return this.executeCommand<void>(argsArray, additionalEnvVars, session);
|
||||
return this.executeCommand<void>(argsArray, additionalEnvVars);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
public async login(endpoint: string, username: string, password: string, additionalEnvVars: azdataExt.AdditionalEnvVars = {}): Promise<azdataExt.AzdataOutput<void>> {
|
||||
// Since login changes the context we want to wait until all currently executing commands are finished before this is executed
|
||||
while (this._currentlyExecutingCommands.length > 0) {
|
||||
await this._currentlyExecutingCommands[0];
|
||||
}
|
||||
// Logins need to be done outside the session aware logic so call impl directly
|
||||
return this.executeCommandImpl<void>(['login', '-e', endpoint, '-u', username], Object.assign({}, additionalEnvVars, { 'AZDATA_PASSWORD': password }));
|
||||
}
|
||||
|
||||
public async acquireSession(endpoint: string, username: string, password: string, additionalEnvVars?: azdataExt.AdditionalEnvVars): Promise<azdataExt.AzdataSession> {
|
||||
const session = new AzdataSession();
|
||||
session.sessionEnded().then(async () => {
|
||||
// Wait for all commands running for this session to end
|
||||
while (this._currentlyExecutingCommands.length > 0) {
|
||||
await this._currentlyExecutingCommands[0].promise;
|
||||
}
|
||||
this._currentSession = undefined;
|
||||
// Start our next command now that we're all done with this session
|
||||
// TODO: Should we check if the command has a session that hasn't started? That should never happen..
|
||||
// TODO: Look into kicking off multiple commands
|
||||
this._queuedCommands.shift()?.deferred.resolve();
|
||||
});
|
||||
|
||||
// We're not in a session or waiting on anything so just set the current session right now
|
||||
if (!this._currentSession && this._queuedCommands.length === 0) {
|
||||
this._currentSession = session;
|
||||
public async login(endpointOrNamespace: azdataExt.EndpointOrNamespace, username: string, password: string, additionalEnvVars: azdataExt.AdditionalEnvVars = {}, azdataContext?: string): Promise<azdataExt.AzdataOutput<void>> {
|
||||
const args = ['login', '-u', username];
|
||||
if (endpointOrNamespace.endpoint) {
|
||||
args.push('-e', endpointOrNamespace.endpoint);
|
||||
} else if (endpointOrNamespace.namespace) {
|
||||
args.push('--namespace', endpointOrNamespace.namespace);
|
||||
} else {
|
||||
// We're in a session or another command is executing so add this to the end of the queued commands and wait our turn
|
||||
const deferred = new Deferred<void>();
|
||||
deferred.promise.then(() => {
|
||||
this._currentSession = session;
|
||||
// We've started a new session so look at all our queued commands and start
|
||||
// the ones for this session now.
|
||||
this._queuedCommands = this._queuedCommands.filter(c => {
|
||||
if (c.session === this._currentSession) {
|
||||
c.deferred.resolve();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
});
|
||||
this._queuedCommands.push({ deferred, session: undefined });
|
||||
await deferred.promise;
|
||||
throw new Error(loc.endpointOrNamespaceRequired);
|
||||
}
|
||||
|
||||
await this.login(endpoint, username, password, additionalEnvVars);
|
||||
return session;
|
||||
return this.executeCommand<void>(args, Object.assign({}, additionalEnvVars, { 'AZDATA_PASSWORD': password }), azdataContext);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -265,34 +208,16 @@ export class AzdataTool implements azdataExt.IAzdataApi {
|
||||
};
|
||||
}
|
||||
|
||||
public async executeCommand<R>(args: string[], additionalEnvVars?: azdataExt.AdditionalEnvVars, session?: azdataExt.AzdataSession): Promise<azdataExt.AzdataOutput<R>> {
|
||||
if (this._currentSession && this._currentSession !== session) {
|
||||
const deferred = new Deferred<void>();
|
||||
this._queuedCommands.push({ deferred, session: session });
|
||||
await deferred.promise;
|
||||
}
|
||||
const executingDeferred = new Deferred<void>();
|
||||
this._currentlyExecutingCommands.push(executingDeferred);
|
||||
try {
|
||||
return await this.executeCommandImpl<R>(args, additionalEnvVars);
|
||||
}
|
||||
finally {
|
||||
this._currentlyExecutingCommands = this._currentlyExecutingCommands.filter(c => c !== executingDeferred);
|
||||
executingDeferred.resolve();
|
||||
// If there isn't an active session and we still have queued commands then we have to manually kick off the next one
|
||||
if (this._queuedCommands.length > 0 && !this._currentSession) {
|
||||
this._queuedCommands.shift()?.deferred.resolve();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the specified azdata command. This is NOT session-aware so should only be used for calls that don't care about a session
|
||||
* Executes the specified azdata command.
|
||||
* @param args The args to pass to azdata
|
||||
* @param additionalEnvVars Additional environment variables to set for this execution
|
||||
*/
|
||||
private async executeCommandImpl<R>(args: string[], additionalEnvVars?: azdataExt.AdditionalEnvVars): Promise<azdataExt.AzdataOutput<R>> {
|
||||
public async executeCommand<R>(args: string[], additionalEnvVars?: azdataExt.AdditionalEnvVars, azdataContext?: string): Promise<azdataExt.AzdataOutput<R>> {
|
||||
try {
|
||||
if (azdataContext) {
|
||||
args = args.concat('--controller-context', azdataContext);
|
||||
}
|
||||
const output = JSON.parse((await executeAzdataCommand(`"${this._path}"`, args.concat(['--output', 'json']), additionalEnvVars)).stdout);
|
||||
return {
|
||||
logs: <string[]>output.log,
|
||||
|
||||
@@ -66,3 +66,4 @@ export const promptForEula = (privacyStatementUrl: string, eulaUrl: string) => l
|
||||
export const promptForEulaLog = (privacyStatementUrl: string, eulaUrl: string) => promptLog(promptForEula(privacyStatementUrl, eulaUrl));
|
||||
export const userResponseToEulaPrompt = (response: string | undefined) => localize('azdata.promptForEulaResponse', "User response to EULA prompt: {0}", response);
|
||||
export const eulaAcceptedStateOnStartup = (eulaAccepted: boolean) => localize('azdata.eulaAcceptedStateOnStartup', "'EULA Accepted' state on startup: {0}", eulaAccepted);
|
||||
export const endpointOrNamespaceRequired = localize('azdata.endpointOrNamespaceRequired', "Either an endpoint or a namespace must be specified");
|
||||
|
||||
@@ -96,15 +96,8 @@ describe('api', function (): void {
|
||||
async function assertApiCalls(api: azdataExt.IExtension, assertCallback: (promise: Promise<any>, message: string) => Promise<void>): Promise<void> {
|
||||
await assertCallback(api.azdata.getPath(), 'getPath');
|
||||
await assertCallback(api.azdata.getSemVersion(), 'getSemVersion');
|
||||
await assertCallback(api.azdata.login('', '', ''), 'login');
|
||||
await assertCallback((async () => {
|
||||
let session: azdataExt.AzdataSession | undefined;
|
||||
try {
|
||||
session = await api.azdata.acquireSession('', '', '');
|
||||
} finally {
|
||||
session?.dispose();
|
||||
}
|
||||
})(), 'acquireSession');
|
||||
await assertCallback(api.azdata.login({ endpoint: 'https://127.0.0.1' }, '', ''), 'login');
|
||||
await assertCallback(api.azdata.login({ namespace: 'namespace' }, '', ''), 'login');
|
||||
await assertCallback(api.azdata.version(), 'version');
|
||||
|
||||
await assertCallback(api.azdata.arc.dc.create('', '', '', '', '', ''), 'arc dc create');
|
||||
@@ -117,7 +110,7 @@ describe('api', function (): void {
|
||||
await assertCallback(api.azdata.arc.sql.mi.list(), 'arc sql mi list');
|
||||
await assertCallback(api.azdata.arc.sql.mi.delete(''), 'arc sql mi delete');
|
||||
await assertCallback(api.azdata.arc.sql.mi.show(''), 'arc sql mi show');
|
||||
await assertCallback(api.azdata.arc.sql.mi.edit('', { }), 'arc sql mi edit');
|
||||
await assertCallback(api.azdata.arc.sql.mi.edit('', {}), 'arc sql mi edit');
|
||||
await assertCallback(api.azdata.arc.postgres.server.list(), 'arc sql postgres server list');
|
||||
await assertCallback(api.azdata.arc.postgres.server.delete(''), 'arc sql postgres server delete');
|
||||
await assertCallback(api.azdata.arc.postgres.server.show(''), 'arc sql postgres server show');
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
* Licensed under the Source EULA. See License.txt in the project root for license information.
|
||||
*--------------------------------------------------------------------------------------------*/
|
||||
|
||||
import * as azdataExt from 'azdata-ext';
|
||||
import * as should from 'should';
|
||||
import * as sinon from 'sinon';
|
||||
import * as vscode from 'vscode';
|
||||
@@ -17,7 +16,6 @@ import * as fs from 'fs';
|
||||
import { AzdataReleaseInfo } from '../azdataReleaseInfo';
|
||||
import * as TypeMoq from 'typemoq';
|
||||
import { eulaAccepted } from '../constants';
|
||||
import { sleep } from './testUtils';
|
||||
|
||||
const oldAzdataMock = new azdata.AzdataTool('/path/to/azdata', '0.0.0');
|
||||
const currentAzdataMock = new azdata.AzdataTool('/path/to/azdata', '9999.999.999');
|
||||
@@ -222,120 +220,10 @@ describe('azdata', function () {
|
||||
const endpoint = 'myEndpoint';
|
||||
const username = 'myUsername';
|
||||
const password = 'myPassword';
|
||||
await azdataTool.login(endpoint, username, password);
|
||||
await azdataTool.login({ endpoint: endpoint }, username, password);
|
||||
verifyExecuteCommandCalledWithArgs(['login', endpoint, username]);
|
||||
});
|
||||
|
||||
describe('acquireSession', function (): void {
|
||||
it('calls login', async function (): Promise<void> {
|
||||
const endpoint = 'myEndpoint';
|
||||
const username = 'myUsername';
|
||||
const password = 'myPassword';
|
||||
const session = await azdataTool.acquireSession(endpoint, username, password);
|
||||
session.dispose();
|
||||
verifyExecuteCommandCalledWithArgs(['login', endpoint, username]);
|
||||
});
|
||||
|
||||
it('command executed under current session completes', async function (): Promise<void> {
|
||||
const session = await azdataTool.acquireSession('', '', '');
|
||||
try {
|
||||
await azdataTool.arc.dc.config.show(undefined, session);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
verifyExecuteCommandCalledWithArgs(['login'], 0);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'dc', 'config', 'show'], 1);
|
||||
});
|
||||
it('multiple commands executed under current session completes', async function (): Promise<void> {
|
||||
const session = await azdataTool.acquireSession('', '', '');
|
||||
try {
|
||||
// Kick off multiple commands at the same time and then ensure that they both complete
|
||||
await Promise.all([
|
||||
azdataTool.arc.dc.config.show(undefined, session),
|
||||
azdataTool.arc.sql.mi.list(undefined, session)
|
||||
]);
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
verifyExecuteCommandCalledWithArgs(['login'], 0);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'dc', 'config', 'show'], 1);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'sql', 'mi', 'list'], 2);
|
||||
});
|
||||
it('command executed without session context is queued up until session is closed', async function (): Promise<void> {
|
||||
const session = await azdataTool.acquireSession('', '', '');
|
||||
let nonSessionCommand: Promise<any> | undefined = undefined;
|
||||
try {
|
||||
// Start one command in the current session
|
||||
await azdataTool.arc.dc.config.show(undefined, session);
|
||||
// Verify that the command isn't executed until after the session is disposed
|
||||
let isFulfilled = false;
|
||||
nonSessionCommand = azdataTool.arc.sql.mi.list().then(() => isFulfilled = true);
|
||||
await sleep(2000);
|
||||
should(isFulfilled).equal(false, 'The command should not be completed yet');
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
await nonSessionCommand;
|
||||
verifyExecuteCommandCalledWithArgs(['login'], 0);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'dc', 'config', 'show'], 1);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'sql', 'mi', 'list'], 2);
|
||||
});
|
||||
it('multiple commands executed without session context are queued up until session is closed', async function (): Promise<void> {
|
||||
const session = await azdataTool.acquireSession('', '', '');
|
||||
let nonSessionCommand1: Promise<any> | undefined = undefined;
|
||||
let nonSessionCommand2: Promise<any> | undefined = undefined;
|
||||
try {
|
||||
// Start one command in the current session
|
||||
await azdataTool.arc.dc.config.show(undefined, session);
|
||||
// Verify that neither command is completed until the session is closed
|
||||
let isFulfilled = false;
|
||||
nonSessionCommand1 = azdataTool.arc.sql.mi.list().then(() => isFulfilled = true);
|
||||
nonSessionCommand2 = azdataTool.arc.postgres.server.list().then(() => isFulfilled = true);
|
||||
await sleep(2000);
|
||||
should(isFulfilled).equal(false, 'The commands should not be completed yet');
|
||||
} finally {
|
||||
session.dispose();
|
||||
}
|
||||
await Promise.all([nonSessionCommand1, nonSessionCommand2]);
|
||||
verifyExecuteCommandCalledWithArgs(['login'], 0);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'dc', 'config', 'show'], 1);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'sql', 'mi', 'list'], 2);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'postgres', 'server', 'list'], 3);
|
||||
});
|
||||
it('attempting to acquire a second session while a first is still active queues the second session', async function (): Promise<void> {
|
||||
const firstSession = await azdataTool.acquireSession('', '', '');
|
||||
let sessionPromise: Promise<azdataExt.AzdataSession> | undefined = undefined;
|
||||
let secondSessionCommand: Promise<any> | undefined = undefined;
|
||||
try {
|
||||
try {
|
||||
// Start one command in the current session
|
||||
await azdataTool.arc.dc.config.show(undefined, firstSession);
|
||||
// Verify that none of the commands for the second session are completed before the first is disposed
|
||||
let isFulfilled = false;
|
||||
sessionPromise = azdataTool.acquireSession('', '', '');
|
||||
sessionPromise.then(session => {
|
||||
isFulfilled = true;
|
||||
secondSessionCommand = azdataTool.arc.sql.mi.list(undefined, session).then(() => isFulfilled = true);
|
||||
});
|
||||
await sleep(2000);
|
||||
should(isFulfilled).equal(false, 'The commands should not be completed yet');
|
||||
} finally {
|
||||
firstSession.dispose();
|
||||
}
|
||||
} finally {
|
||||
(await sessionPromise)?.dispose();
|
||||
}
|
||||
should(secondSessionCommand).not.equal(undefined, 'The second command should have been queued already');
|
||||
await secondSessionCommand!;
|
||||
|
||||
|
||||
verifyExecuteCommandCalledWithArgs(['login'], 0);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'dc', 'config', 'show'], 1);
|
||||
verifyExecuteCommandCalledWithArgs(['login'], 2);
|
||||
verifyExecuteCommandCalledWithArgs(['arc', 'sql', 'mi', 'list'], 3);
|
||||
});
|
||||
});
|
||||
|
||||
it('version', async function (): Promise<void> {
|
||||
executeCommandStub.resolves({ stdout: '1.0.0', stderr: '' });
|
||||
await azdataTool.version();
|
||||
|
||||
@@ -18,7 +18,3 @@ export async function assertRejected(promise: Promise<any>, message: string): Pr
|
||||
throw new Error(message);
|
||||
}
|
||||
|
||||
export async function sleep(ms: number): Promise<void> {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
|
||||
47
extensions/azdata/src/typings/azdata-ext.d.ts
vendored
47
extensions/azdata/src/typings/azdata-ext.d.ts
vendored
@@ -160,7 +160,7 @@ declare module 'azdata-ext' {
|
||||
|
||||
export interface PostgresServerShowResult {
|
||||
apiVersion: string, // "arcdata.microsoft.com/v1alpha1"
|
||||
kind: string, // "postgresql-12"
|
||||
kind: string, // "postgresql"
|
||||
metadata: {
|
||||
creationTimestamp: string, // "2020-08-19T20:25:11Z"
|
||||
generation: number, // 1
|
||||
@@ -177,7 +177,8 @@ declare module 'azdata-ext' {
|
||||
}[],
|
||||
settings: {
|
||||
default: { [key: string]: string } // { "max_connections": "101", "work_mem": "4MB" }
|
||||
}
|
||||
},
|
||||
version: string // "12"
|
||||
},
|
||||
scale: {
|
||||
shards: number, // 1 (shards was renamed to workers, kept here for backwards compatibility)
|
||||
@@ -244,25 +245,27 @@ declare module 'azdata-ext' {
|
||||
code?: number
|
||||
}
|
||||
|
||||
export interface AzdataSession extends vscode.Disposable { }
|
||||
|
||||
export interface EndpointOrNamespace {
|
||||
endpoint?: string,
|
||||
namespace?: string
|
||||
}
|
||||
export interface IAzdataApi {
|
||||
arc: {
|
||||
dc: {
|
||||
create(namespace: string, name: string, connectivityMode: string, resourceGroup: string, location: string, subscription: string, profileName?: string, storageClass?: string, additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<void>>,
|
||||
create(namespace: string, name: string, connectivityMode: string, resourceGroup: string, location: string, subscription: string, profileName?: string, storageClass?: string, additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<void>>,
|
||||
endpoint: {
|
||||
list(additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<DcEndpointListResult[]>>
|
||||
list(additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<DcEndpointListResult[]>>
|
||||
},
|
||||
config: {
|
||||
list(additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<DcConfigListResult[]>>,
|
||||
show(additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<DcConfigShowResult>>
|
||||
list(additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<DcConfigListResult[]>>,
|
||||
show(additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<DcConfigShowResult>>
|
||||
}
|
||||
},
|
||||
postgres: {
|
||||
server: {
|
||||
delete(name: string, additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<void>>,
|
||||
list(additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<PostgresServerListResult[]>>,
|
||||
show(name: string, additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<PostgresServerShowResult>>,
|
||||
delete(name: string, additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<void>>,
|
||||
list(additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<PostgresServerListResult[]>>,
|
||||
show(name: string, additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<PostgresServerShowResult>>,
|
||||
edit(
|
||||
name: string,
|
||||
args: {
|
||||
@@ -278,17 +281,16 @@ declare module 'azdata-ext' {
|
||||
replaceEngineSettings?: boolean,
|
||||
workers?: number
|
||||
},
|
||||
engineVersion?: string,
|
||||
additionalEnvVars?: AdditionalEnvVars,
|
||||
session?: AzdataSession
|
||||
azdataContext?: string
|
||||
): Promise<AzdataOutput<void>>
|
||||
}
|
||||
},
|
||||
sql: {
|
||||
mi: {
|
||||
delete(name: string, additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<void>>,
|
||||
list(additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<SqlMiListResult[]>>,
|
||||
show(name: string, additionalEnvVars?: AdditionalEnvVars, session?: AzdataSession): Promise<AzdataOutput<SqlMiShowResult>>,
|
||||
delete(name: string, additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<void>>,
|
||||
list(additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<SqlMiListResult[]>>,
|
||||
show(name: string, additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<SqlMiShowResult>>,
|
||||
edit(
|
||||
name: string,
|
||||
args: {
|
||||
@@ -299,22 +301,13 @@ declare module 'azdata-ext' {
|
||||
noWait?: boolean,
|
||||
},
|
||||
additionalEnvVars?: AdditionalEnvVars,
|
||||
session?: AzdataSession
|
||||
azdataContext?: string
|
||||
): Promise<AzdataOutput<void>>
|
||||
}
|
||||
}
|
||||
},
|
||||
getPath(): Promise<string>,
|
||||
login(endpoint: string, username: string, password: string, additionalEnvVars?: AdditionalEnvVars): Promise<AzdataOutput<void>>,
|
||||
/**
|
||||
* Acquires a session for the specified controller, which will log in to the specified controller and then block all other commands
|
||||
* that are not part of the original session from executing until the session is released (disposed).
|
||||
* @param endpoint
|
||||
* @param username
|
||||
* @param password
|
||||
* @param additionalEnvVars
|
||||
*/
|
||||
acquireSession(endpoint: string, username: string, password: string, additionalEnvVars?: AdditionalEnvVars): Promise<AzdataSession>,
|
||||
login(endpointOrNamespace: EndpointOrNamespace, username: string, password: string, additionalEnvVars?: AdditionalEnvVars, azdataContext?: string): Promise<AzdataOutput<void>>,
|
||||
/**
|
||||
* The semVersion corresponding to this installation of azdata. version() method should have been run
|
||||
* before fetching this value to ensure that correct value is returned. This is almost always correct unless
|
||||
|
||||
Reference in New Issue
Block a user